diff --git a/deploy_metadata.json b/deploy_metadata.json index d5560af2..d946667d 100644 --- a/deploy_metadata.json +++ b/deploy_metadata.json @@ -1,159 +1,57 @@ { "resources": { - "azure": { - "az-genesis-200": { - "artifact": "seismic-dev-azure-20251121185658.vhd", - "public_ip": "51.8.245.138", + "gcp": { + "gcp-genesis-13": { + "artifact": "seismic-dev-gcp-20260105213931.tar.gz", + "public_ip": "34.63.184.53", "domain": { - "url": "https://az-200.seismictest.net", - "record": "az-200", + "url": "https://gcp-13.seismictest.net", + "record": "gcp-13", "name": "seismictest.net", "resource_group": "yocto-testnet" }, "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-200", - "nsgName": "az-genesis-200", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" + "resourceGroup": "testnet-477314", + "name": "gcp-genesis-13", + "nsgName": "gcp-genesis-13", + "cloud": "gcp", + "region": "us-central1-a", + "size": "c3-standard-4" }, - "data_disk": "az-genesis-200-persistent" - }, - "az-genesis-201": { - "artifact": "seismic-dev-azure-20251121185658.vhd", - "public_ip": "135.237.40.94", - "domain": { - "url": "https://az-201.seismictest.net", - "record": "az-201", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-201", - "nsgName": "az-genesis-201", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-201-persistent" - }, - "az-genesis-202": { - "artifact": "seismic-dev-azure-20251121185658.vhd", - "public_ip": "48.194.106.11", - "domain": { - "url": "https://az-202.seismictest.net", - "record": "az-202", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-202", - "nsgName": "az-genesis-202", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-202-persistent" - }, - "az-genesis-203": { - "artifact": "seismic-dev-azure-20251121185658.vhd", - "public_ip": "51.8.237.70", - "domain": { - "url": "https://az-203.seismictest.net", - "record": "az-203", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-203", - "nsgName": "az-genesis-203", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-203-persistent" - }, - "az-genesis-1": { - "artifact": "seismic-dev-azure-20251124143117.vhd", - "public_ip": "48.223.236.240", - "domain": { - "url": "https://az-1.seismictest.net", - "record": "az-1", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-1", - "nsgName": "az-genesis-1", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-1-persistent" - }, - "az-genesis-2": { - "artifact": "seismic-dev-azure-20251124143117.vhd", - "public_ip": "48.223.215.252", - "domain": { - "url": "https://az-2.seismictest.net", - "record": "az-2", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-2", - "nsgName": "az-genesis-2", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-2-persistent" + "data_disk": "gcp-genesis-13-persistent" + } + } + }, + "artifacts": { + "seismic-dev-gcp-20260105211714.tar.gz": { + "repos": { + "enclave": "d6c4badd0ee5639a432e96d3cced228cbf5fa3b3", + "sreth": "4141f746b6169f5afeb7a94b9bf087cc2fe35221", + "summit": "2435d1b5c762c170cf68a67ef5300052d2d66265" }, - "az-genesis-3": { - "artifact": "seismic-dev-azure-20251124143117.vhd", - "public_ip": "134.33.152.130", - "domain": { - "url": "https://az-3.seismictest.net", - "record": "az-3", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-3", - "nsgName": "az-genesis-3", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-3-persistent" + "image": { + "measurement_id": "seismic-dev-gcp-20260105211714.tar.gz", + "attestation_type": "CloudProvider.GCP-tdx", + "measurements": { + "rtmr1": "791f162f5ddf2e5fd3411e3c1c1b2696905172af02e3a28d665794a9571b4e0ce867e4695680076eb09e4d6bd2076cc0", + "rtmr2": "569879bf0d25b1f7fe45eaf9bcb9222138ebe1b3f482c62ba1afee6eb34bfe7fa3e40da798c54bb990b0487850cca247" + } + } + }, + "seismic-dev-gcp-20260105213931.tar.gz": { + "repos": { + "enclave": "d6c4badd0ee5639a432e96d3cced228cbf5fa3b3", + "sreth": "4141f746b6169f5afeb7a94b9bf087cc2fe35221", + "summit": "2435d1b5c762c170cf68a67ef5300052d2d66265" }, - "az-genesis-4": { - "artifact": "seismic-dev-azure-20251124143117.vhd", - "public_ip": "128.203.77.183", - "domain": { - "url": "https://az-4.seismictest.net", - "record": "az-4", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "tdx-testnet", - "name": "az-genesis-4", - "nsgName": "az-genesis-4", - "cloud": "azure", - "region": "eastus", - "size": "Standard_DC4es_v6" - }, - "data_disk": "az-genesis-4-persistent" + "image": { + "measurement_id": "seismic-dev-gcp-20260105213931.tar.gz", + "attestation_type": "CloudProvider.GCP-tdx", + "measurements": { + "rtmr1": "f913c622cb1e665d44a4bf13a29d97506200ca1dce73e3baca7adba8f3de436e10efcfc0cb2d1ffabc362ebca41a631a", + "rtmr2": "fcacd15646e24245dff786003c104c100fed55628ec4b70a3be0f120fd52e6d71d7de3971ddb086e4a2aef4594732179" + } } } - }, - "artifacts": {} + } } \ No newline at end of file diff --git a/yocto/README.md b/yocto/README.md index 3f20271f..24d647e7 100644 --- a/yocto/README.md +++ b/yocto/README.md @@ -98,10 +98,9 @@ Upon successful deployment, the script will: - `--logs` If flagged, print build and/or deploy logs as they run ### Build arguments -- `--enclave-branch` Seismic Enclave git branch name. Defaults to 'main' - `--enclave-commit` Seismic Enclave git gommit hash. If not provided, does not change image -- `--sreth-branch` Seismic Reth git branch name. Defaults to 'seismic' - `--sreth-commit` Seismic Reth git commit hash. If not provided, does not change image +- `--summit-commit` Summit git commit hash. If not provided, does not change image ### Deploy arguments - `--artifact` Required when running --deploy without --build (e.g. '20241203182636') diff --git a/yocto/cloud/azure/api.py b/yocto/cloud/azure/api.py index c18df551..58b60161 100644 --- a/yocto/cloud/azure/api.py +++ b/yocto/cloud/azure/api.py @@ -18,12 +18,12 @@ from yocto.cloud.cloud_api import CloudApi from yocto.cloud.cloud_config import CloudProvider from yocto.cloud.cloud_parser import confirm -from yocto.config import DeployConfigs, VmConfigs +from yocto.config import DeployConfigs logger = logging.getLogger(__name__) OPEN_PORTS = [ - 22, # ssh + 22, # ssh 80, # http 443, # https 7878, # enclave @@ -464,7 +464,7 @@ def add_nsg_rule( source, ] cls.run_command(cmd, show_logs=config.show_logs) - + @staticmethod def get_nsg_rules(config: DeployConfigs) -> list[str]: tcp_rules = [ @@ -645,43 +645,35 @@ def create_vm( disk_name: str, ) -> None: """Create the virtual machine with user-data.""" - user_data_file = cls.create_user_data_file(config) - - try: - logger.info("Booting VM...") - cmd = [ - "az", - "vm", - "create", - "--name", - config.vm.name, - "--size", - config.vm.size, - "--resource-group", - config.vm.resource_group, - "--attach-os-disk", - disk_name, - "--security-type", - "ConfidentialVM", - "--enable-vtpm", - "true", - "--enable-secure-boot", - "false", - "--os-disk-security-encryption-type", - "NonPersistedTPM", - "--os-type", - "Linux", - "--nsg", - config.vm.nsg_name, - "--public-ip-address", - ip_name, - "--user-data", - user_data_file, - ] - cls.run_command(cmd, show_logs=False) - finally: - os.unlink(user_data_file) - logger.info(f"Deleted temporary user-data file: {user_data_file}") + logger.info("Booting VM...") + cmd = [ + "az", + "vm", + "create", + "--name", + config.vm.name, + "--size", + config.vm.size, + "--resource-group", + config.vm.resource_group, + "--attach-os-disk", + disk_name, + "--security-type", + "ConfidentialVM", + "--enable-vtpm", + "true", + "--enable-secure-boot", + "false", + "--os-disk-security-encryption-type", + "NonPersistedTPM", + "--os-type", + "Linux", + "--nsg", + config.vm.nsg_name, + "--public-ip-address", + ip_name, + ] + cls.run_command(cmd, show_logs=False) @classmethod def get_vm_ip(cls, vm_name: str, resource_group: str, location: str) -> str: diff --git a/yocto/cloud/cloud_api.py b/yocto/cloud/cloud_api.py index c2b41d2c..744951e7 100644 --- a/yocto/cloud/cloud_api.py +++ b/yocto/cloud/cloud_api.py @@ -239,11 +239,7 @@ def attach_data_disk( """Attach a data disk to a VM.""" raise NotImplementedError - @classmethod - @abstractmethod - def create_user_data_file(cls, config: "DeployConfigs") -> str: - """Create temporary user data file.""" - raise NotImplementedError + @classmethod @abstractmethod diff --git a/yocto/cloud/cloud_config.py b/yocto/cloud/cloud_config.py index 5d842648..c48a0324 100644 --- a/yocto/cloud/cloud_config.py +++ b/yocto/cloud/cloud_config.py @@ -40,6 +40,21 @@ class CloudProvider(str, Enum): GCP = "gcp" OVH = "ovh" + @staticmethod + def from_string(s: str) -> "CloudProvider": + if "-azure-" in s.lower(): + return CloudProvider.AZURE + elif "-gcp-" in s.lower(): + return CloudProvider.GCP + else: + return CloudProvider.OVH + + def is_gcp(self) -> bool: + return self == CloudProvider.GCP + + def is_azure(self) -> bool: + return self == CloudProvider.AZURE + # Re-export for convenience __all__ = [ diff --git a/yocto/cloud/cloud_parser.py b/yocto/cloud/cloud_parser.py index 61784616..8279f682 100644 --- a/yocto/cloud/cloud_parser.py +++ b/yocto/cloud/cloud_parser.py @@ -49,7 +49,10 @@ def create_cloud_parser(description: str) -> argparse.ArgumentParser: type=str, choices=["azure", "gcp", "ovh"], required=False, - help="Cloud provider to use (azure, gcp, or ovh). Required for deployment, optional for build.", + help=( + "Cloud provider to use (azure, gcp, or ovh). " + "Required for deployment, optional for build." + ), ) # Region/Zone (optional, defaults based on cloud) diff --git a/yocto/cloud/gcp/api.py b/yocto/cloud/gcp/api.py index 93a28c07..95fd4e9a 100644 --- a/yocto/cloud/gcp/api.py +++ b/yocto/cloud/gcp/api.py @@ -12,14 +12,14 @@ import time from pathlib import Path +from google.api_core import exceptions as gcp_exceptions from google.cloud import compute_v1, resourcemanager_v3, storage -from yocto.cloud.azure.api import AzureApi, OPEN_PORTS +from yocto.cloud.azure.api import AzureApi from yocto.cloud.cloud_api import CloudApi from yocto.cloud.cloud_config import CloudProvider from yocto.cloud.cloud_parser import confirm from yocto.cloud.gcp.defaults import ( - CONSENSUS_PORT, DEFAULT_DISK_TYPE, DEFAULT_NETWORK_TIER, DEFAULT_NIC_TYPE, @@ -345,12 +345,15 @@ def _create_image_from_gcs( # Set sourceType to RAW (required by gcloud flow) image.source_type = "RAW" + # Set architecture to X86_64 (required for TDX C3 VMs) + image.architecture = "X86_64" + logger.info(f"Using Storage API URL: {storage_api_url}") logger.info("Source type: RAW") + logger.info("Architecture: X86_64") # Add all required guest OS features for TDX - # These match: --guest-os-features=UEFI_COMPATIBLE, - # VIRTIO_SCSI_MULTIQUEUE,GVNIC,TDX_CAPABLE + # These match the features from a working GCP TDX instance guest_os_features = [ "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", @@ -403,6 +406,7 @@ def _create_disk_from_image( disk.name = disk_name disk.source_image = f"projects/{project}/global/images/{image_name}" disk.type_ = f"projects/{project}/zones/{zone}/diskTypes/{disk_type}" + disk.architecture = "X86_64" operation = disk_client.insert( project=project, @@ -647,14 +651,19 @@ def delete_disk( ) disk_client = compute_v1.DisksClient() - operation = disk_client.delete( - project=resource_group, - zone=zone, - disk=disk_name, - ) + try: + operation = disk_client.delete( + project=resource_group, + zone=zone, + disk=disk_name, + ) - wait_for_extended_operation(operation, f"disk deletion for {disk_name}") - logger.info(f"Disk {disk_name} deleted successfully") + wait_for_extended_operation(operation, f"disk deletion for {disk_name}") + logger.info(f"Disk {disk_name} deleted successfully") + except gcp_exceptions.NotFound: + logger.info( + f"Disk {disk_name} not found - likely already deleted by Google automatically" + ) @classmethod def delete_disk_by_name( @@ -675,14 +684,19 @@ def delete_disk_by_name( ) disk_client = compute_v1.DisksClient() - operation = disk_client.delete( - project=resource_group, - zone=zone, - disk=disk_name, - ) + try: + operation = disk_client.delete( + project=resource_group, + zone=zone, + disk=disk_name, + ) - wait_for_extended_operation(operation, f"disk deletion for {disk_name}") - logger.info(f"Disk {disk_name} deleted successfully") + wait_for_extended_operation(operation, f"disk deletion for {disk_name}") + logger.info(f"Disk {disk_name} deleted successfully") + except gcp_exceptions.NotFound: + logger.info( + f"Disk {disk_name} not found - likely already deleted by Google automatically" + ) @classmethod def upload_disk(cls, config: DeployConfigs, image_path: Path) -> None: @@ -761,7 +775,7 @@ def create_data_disk( disk_name: str, location: str, size_gb: int, - sku: str = "pd-ssd", + sku: str = DEFAULT_DISK_TYPE, show_logs: bool = False, ) -> None: """Create a data disk for persistent storage. @@ -792,85 +806,30 @@ def create_data_disk( logger.info(f"Data disk {disk_name} created successfully") @classmethod - def attach_data_disk( - cls, - resource_group: str, - vm_name: str, - disk_name: str, - zone: str, - lun: int = 10, - show_logs: bool = False, - ) -> None: - """Attach a data disk to a VM. - - Args: - zone: For GCP, the zone where the VM and disk are located. - """ - logger.info(f"Attaching data disk {disk_name} to {vm_name}") - - instance_client = compute_v1.InstancesClient() - - attached_disk = compute_v1.AttachedDisk() - disk_path = f"projects/{resource_group}/zones/{zone}/disks/" - attached_disk.source = f"{disk_path}{disk_name}" - attached_disk.auto_delete = False - - operation = instance_client.attach_disk( - project=resource_group, - zone=zone, - instance=vm_name, - attached_disk_resource=attached_disk, - ) - - wait_for_extended_operation( - operation, f"disk attachment for {disk_name}" - ) - logger.info(f"Disk {disk_name} attached to {vm_name} successfully") - - @classmethod - def create_user_data_file(cls, config: DeployConfigs) -> str: - """Create temporary user data file.""" - fd, temp_file = tempfile.mkstemp(suffix=".yaml") - try: - with os.fdopen(fd, "w") as f: - f.write(f'CERTBOT_EMAIL="{config.email}"\n') - f.write(f'RECORD_NAME="{config.domain.record}"\n') - f.write(f'DOMAIN="{config.domain.name}"\n') - - logger.info(f"Created temporary user-data file: {temp_file}") - with open(temp_file) as f: - logger.info(f.read()) - - return temp_file - except: - os.close(fd) - raise - - @classmethod - def create_vm_simple( + def create_vm( cls, - vm_name: str, - vm_size: str, - resource_group: str, - location: str, - os_disk_name: str, - nsg_name: str, + config: DeployConfigs, + image_path: Path, ip_name: str, - show_logs: bool = False, + disk_name: str, + data_disk_name: str | None = None, ) -> None: - """Create a confidential VM without user-data. - + """Create the virtual machine with user-data. Args: - location: For GCP, this should be the zone (e.g., 'us-central1-a') + config: Deployment configuration + image_path: Path to the image file + ip_name: Name of the IP address + disk_name: Sanitized disk name to use for the VM + data_disk_name: Optional name of the persistent data disk """ - logger.info("Creating TDX-enabled confidential VM...") + logger.info("Booting VM...") instance_client = compute_v1.InstancesClient() # Configure network interface with external IP network_interface = compute_v1.NetworkInterface() network_interface.network = ( - f"projects/{resource_group}/global/networks/default" + f"projects/{config.vm.resource_group}/global/networks/default" ) network_interface.stack_type = "IPV4_ONLY" network_interface.nic_type = DEFAULT_NIC_TYPE @@ -882,27 +841,48 @@ def create_vm_simple( # Get the reserved IP address if ip_name is provided if ip_name: - reserved_ip = cls.get_existing_public_ip(ip_name, resource_group) + reserved_ip = cls.get_existing_public_ip( + ip_name, config.vm.resource_group + ) if reserved_ip: access_config.nat_i_p = reserved_ip logger.info(f"Using reserved IP: {reserved_ip}") else: logger.warning( - f"Reserved IP {ip_name} not found, using ephemeral IP" + f"Reserved IP {ip_name} not found, " + "using ephemeral IP" ) network_interface.access_configs = [access_config] - # Configure attached disk - attached_disk = compute_v1.AttachedDisk() - attached_disk.boot = True - attached_disk.auto_delete = True - attached_disk.mode = "READ_WRITE" - attached_disk.device_name = vm_name - attached_disk.source = ( - f"projects/{resource_group}/zones/{location}/disks/{os_disk_name}" + # Configure boot disk + boot_disk = compute_v1.AttachedDisk() + boot_disk.boot = True + boot_disk.auto_delete = True + boot_disk.mode = "READ_WRITE" + boot_disk.interface = "NVME" + boot_disk.device_name = config.vm.name + boot_disk.source = ( + f"projects/{config.vm.resource_group}/zones/" + f"{config.vm.location}/disks/{disk_name}" ) + disks = [boot_disk] + + # Configure and add data disk if provided + if data_disk_name: + logger.info(f"Attaching data disk {data_disk_name} at creation") + data_disk = compute_v1.AttachedDisk() + data_disk.source = ( + f"projects/{config.vm.resource_group}/zones/" + f"{config.vm.location}/disks/{data_disk_name}" + ) + data_disk.auto_delete = False + data_disk.device_name = data_disk_name + # Use NVME for data disks on GCP + data_disk.interface = "NVME" + disks.append(data_disk) + # Configure shielded instance config shielded_config = compute_v1.ShieldedInstanceConfig() shielded_config.enable_secure_boot = False @@ -911,6 +891,7 @@ def create_vm_simple( # Configure confidential instance config confidential_config = compute_v1.ConfidentialInstanceConfig() + confidential_config.enable_confidential_compute = True confidential_config.confidential_instance_type = "TDX" # Configure scheduling @@ -920,146 +901,42 @@ def create_vm_simple( # Configure network tags for firewall rules tags = compute_v1.Tags() - tags.items = [vm_name] + tags.items = [config.vm.name] + + # Configure metadata for serial port + metadata = compute_v1.Metadata() + metadata_items = [] + + # Enable serial port + serial_port_item = compute_v1.Items() + serial_port_item.key = "serial-port-enable" + serial_port_item.value = "TRUE" + metadata_items.append(serial_port_item) + + metadata.items = metadata_items # Create instance instance = compute_v1.Instance() - instance.name = vm_name - instance.machine_type = f"zones/{location}/machineTypes/{vm_size}" + instance.name = config.vm.name + instance.machine_type = ( + f"zones/{config.vm.location}/machineTypes/{config.vm.size}" + ) instance.network_interfaces = [network_interface] - instance.disks = [attached_disk] + instance.disks = disks instance.shielded_instance_config = shielded_config instance.confidential_instance_config = confidential_config instance.scheduling = scheduling instance.tags = tags + instance.metadata = metadata operation = instance_client.insert( - project=resource_group, - zone=location, + project=config.vm.resource_group, + zone=config.vm.location, instance_resource=instance, ) wait_for_extended_operation(operation, "VM creation") - logger.info(f"VM {vm_name} created successfully") - - @classmethod - def create_vm( - cls, - config: DeployConfigs, - image_path: Path, - ip_name: str, - disk_name: str, - ) -> None: - """Create the virtual machine with user-data. - - Args: - config: Deployment configuration - image_path: Path to the image file - ip_name: Name of the IP address - disk_name: Sanitized disk name to use for the VM - """ - user_data_file = cls.create_user_data_file(config) - - try: - logger.info("Booting VM...") - - instance_client = compute_v1.InstancesClient() - - # Read user data content - with open(user_data_file) as f: - user_data_content = f.read() - - # Configure network interface with external IP - network_interface = compute_v1.NetworkInterface() - network_interface.network = ( - f"projects/{config.vm.resource_group}/global/networks/default" - ) - network_interface.stack_type = "IPV4_ONLY" - network_interface.nic_type = DEFAULT_NIC_TYPE - - # Add access config for external IP - access_config = compute_v1.AccessConfig() - access_config.name = "External NAT" - access_config.type_ = "ONE_TO_ONE_NAT" - - # Get the reserved IP address if ip_name is provided - if ip_name: - reserved_ip = cls.get_existing_public_ip( - ip_name, config.vm.resource_group - ) - if reserved_ip: - access_config.nat_i_p = reserved_ip - logger.info(f"Using reserved IP: {reserved_ip}") - else: - logger.warning( - f"Reserved IP {ip_name} not found, " - "using ephemeral IP" - ) - - network_interface.access_configs = [access_config] - - # Configure attached disk - attached_disk = compute_v1.AttachedDisk() - attached_disk.boot = True - attached_disk.auto_delete = True - attached_disk.mode = "READ_WRITE" - attached_disk.device_name = config.vm.name - attached_disk.source = ( - f"projects/{config.vm.resource_group}/zones/" - f"{config.vm.location}/disks/{disk_name}" - ) - - # Configure shielded instance config - shielded_config = compute_v1.ShieldedInstanceConfig() - shielded_config.enable_secure_boot = False - shielded_config.enable_vtpm = True - shielded_config.enable_integrity_monitoring = True - - # Configure confidential instance config - confidential_config = compute_v1.ConfidentialInstanceConfig() - confidential_config.confidential_instance_type = "TDX" - - # Configure scheduling - scheduling = compute_v1.Scheduling() - scheduling.on_host_maintenance = "TERMINATE" - scheduling.provisioning_model = DEFAULT_PROVISIONING_MODEL - - # Configure metadata with user-data - metadata = compute_v1.Metadata() - metadata_item = compute_v1.Items() - metadata_item.key = "user-data" - metadata_item.value = user_data_content - metadata.items = [metadata_item] - - # Configure network tags for firewall rules - tags = compute_v1.Tags() - tags.items = [config.vm.name] - - # Create instance - instance = compute_v1.Instance() - instance.name = config.vm.name - instance.machine_type = ( - f"zones/{config.vm.location}/machineTypes/{config.vm.size}" - ) - instance.network_interfaces = [network_interface] - instance.disks = [attached_disk] - instance.shielded_instance_config = shielded_config - instance.confidential_instance_config = confidential_config - instance.scheduling = scheduling - instance.metadata = metadata - instance.tags = tags - - operation = instance_client.insert( - project=config.vm.resource_group, - zone=config.vm.location, - instance_resource=instance, - ) - - wait_for_extended_operation(operation, "VM creation") - logger.info(f"VM {config.vm.name} created successfully") - finally: - os.unlink(user_data_file) - logger.info(f"Deleted temporary user-data file: {user_data_file}") + logger.info(f"VM {config.vm.name} created successfully") @classmethod def get_vm_ip(cls, vm_name: str, resource_group: str, location: str) -> str: diff --git a/yocto/config/mode.py b/yocto/config/mode.py index 083f8ad3..85c8219b 100644 --- a/yocto/config/mode.py +++ b/yocto/config/mode.py @@ -15,7 +15,8 @@ class Mode: @staticmethod def from_args(args: argparse.Namespace, home: str) -> "Mode": - # For delete_artifact, use dev flag if available (e.g., when deleting by timestamp) + # For delete_artifact, use dev flag if available + # (e.g., when deleting by timestamp) dev = getattr(args, "dev", False) mode = Mode( build=args.build, diff --git a/yocto/deployment/deploy.py b/yocto/deployment/deploy.py index 77215a27..71c267ee 100644 --- a/yocto/deployment/deploy.py +++ b/yocto/deployment/deploy.py @@ -26,11 +26,9 @@ def delete_vm(vm_name: str, home: str) -> bool: # Search for VM in all clouds meta = None - cloud_str = None - for cloud_key, cloud_resources in resources.items(): + for _, cloud_resources in resources.items(): if vm_name in cloud_resources: meta = cloud_resources[vm_name] - cloud_str = cloud_key break if not meta: @@ -77,9 +75,6 @@ def deploy_image( cloud_api.create_nsg(configs) cloud_api.create_standard_nsg_rules(configs) - # Actually create the VM - cloud_api.create_vm(configs, image_path, ip_name, disk_name) - # Create and attach persistent data disk at LUN 10 (required by tdx-init) data_disk_name = f"{configs.vm.name}-persistent" logger.info(f"Creating persistent data disk: {data_disk_name}") @@ -88,18 +83,26 @@ def deploy_image( disk_name=data_disk_name, location=configs.vm.location, size_gb=1024, # 1TB default - sku="Premium_LRS", - show_logs=configs.show_logs, - ) - cloud_api.attach_data_disk( - resource_group=configs.vm.resource_group, - vm_name=configs.vm.name, - disk_name=data_disk_name, - zone=configs.vm.location, # Not used by Azure but required by API - lun=10, # MUST be LUN 10 for tdx-init + sku="pd-balanced", # Required for GCP TDX show_logs=configs.show_logs, ) + # Actually create the VM + if configs.vm.cloud == CloudProvider.GCP: + # For GCP, attach the data disk at creation to avoid hot-plug issues + cloud_api.create_vm(configs, image_path, ip_name, disk_name, data_disk_name) + else: + # For Azure, create the VM and then attach the disk + cloud_api.create_vm(configs, image_path, ip_name, disk_name) + cloud_api.attach_data_disk( + resource_group=configs.vm.resource_group, + vm_name=configs.vm.name, + disk_name=data_disk_name, + zone=configs.vm.location, # Not used by Azure but required by API + lun=10, # MUST be LUN 10 for tdx-init + show_logs=configs.show_logs, + ) + # Get the VM's IP address public_ip = cloud_api.get_vm_ip( vm_name=configs.vm.name, diff --git a/yocto/deployment/deploy_bob.py b/yocto/deployment/deploy_bob.py index f39a8380..219f4e9c 100755 --- a/yocto/deployment/deploy_bob.py +++ b/yocto/deployment/deploy_bob.py @@ -250,7 +250,10 @@ def print_next_steps( logger.info("\n⚠️ DEV MODE - SSH Access Enabled:") logger.info(f" ssh root@{ip_address}") logger.info(" Password: dqSPjo4p") - logger.info("\n Note: This is a development image with debugging tools enabled.") + logger.info( + "\n Note: This is a development image with debugging " + "tools enabled." + ) logger.info("\nNext Steps:") logger.info( @@ -386,10 +389,13 @@ def main(): ip_address = deploy_bob_vm(config, vhd_path, args.data_disk_size) - # Check if this is a dev build by looking for "-dev-" in the artifact name + # Check if this is a dev build by looking for "-dev-" in the + # artifact name is_dev = "-dev-" in args.artifact - print_next_steps(config.vm_name, ip_address, config.resource_group, is_dev) + print_next_steps( + config.vm_name, ip_address, config.resource_group, is_dev + ) except Exception as e: logger.error(f"Deployment failed: {e}") diff --git a/yocto/deployment/validators.py b/yocto/deployment/validators.py index d2f4df98..3ed4bdeb 100644 --- a/yocto/deployment/validators.py +++ b/yocto/deployment/validators.py @@ -10,7 +10,6 @@ from yocto.utils.metadata import load_metadata from yocto.utils.summit_client import SummitClient - _ANVIL_ADDRESSES = [ "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", @@ -46,7 +45,10 @@ def _parse_args() -> argparse.Namespace: "--node", nargs="*", type=int, - help="Specific node numbers (e.g., --node 23 24 25). Overrides -n/--nodes if provided.", + help=( + "Specific node numbers (e.g., --node 23 24 25). " + "Overrides -n/--nodes if provided." + ), ) parser.add_argument( "--code-path", @@ -95,7 +97,9 @@ def _get_pubkeys( "node_public_key": pubkeys.node, "consensus_public_key": pubkeys.consensus, "ip_address": f"{ip_address}:{CONSENSUS_PORT}", - "withdrawal_credentials": _ANVIL_ADDRESSES[i % len(_ANVIL_ADDRESSES)], + "withdrawal_credentials": _ANVIL_ADDRESSES[ + i % len(_ANVIL_ADDRESSES) + ], } ) node_to_pubkey[node] = pubkeys.node @@ -113,7 +117,7 @@ def main(): if args.node: node_numbers = args.node elif args.nodes == 0: - raise ValueError(f'Must provide --node or --nodes ') + raise ValueError('Must provide --node or --nodes ') else: node_numbers = list(range(1, args.nodes + 1)) diff --git a/yocto/genesis_deploy.py b/yocto/genesis_deploy.py index 4f7004e6..d00509a4 100755 --- a/yocto/genesis_deploy.py +++ b/yocto/genesis_deploy.py @@ -164,15 +164,18 @@ def deploy_genesis_vm(args: DeploymentConfig) -> None: print(f"IP Address: {ip_address}") print(f"Domain: {deploy_cfg.domain.record}.{deploy_cfg.domain.name}") print("\nNext steps:") - print(f"1. Register SSH key and domain config (port 8080):") - print(f" curl -X POST http://{ip_address}:8080 -H 'Content-Type: application/json' -d '{payload_json}'") - print(f"\n2. Nginx with SSL will automatically set up after initialization") - print(f" Endpoints will be available at:") + print("1. Register SSH key and domain config (port 8080):") + print( + f" curl -X POST http://{ip_address}:8080 " + f"-H 'Content-Type: application/json' -d '{payload_json}'" + ) + print("\n2. Nginx with SSL will automatically set up after initialization") + print(" Endpoints will be available at:") print(f" https://{deploy_cfg.domain.record}.{deploy_cfg.domain.name}/rpc") print(f" https://{deploy_cfg.domain.record}.{deploy_cfg.domain.name}/ws") print(f" https://{deploy_cfg.domain.record}.{deploy_cfg.domain.name}/summit") if args.dev: - print(f"\n3. SSH access uses dropbear (from bob-common) on port 22") + print("\n3. SSH access uses dropbear (from bob-common) on port 22") print(" NOTE: Seismic uses bob-common's SSH setup:") print(" - Production: key-only auth, no root, no password") print("\n ⚠️ DEV MODE - SSH Root Access Enabled:") @@ -202,7 +205,10 @@ def parse_genesis_args(): parser.add_argument( "--name", type=str, - help="Manual VM name override (default: cloud-specific prefix + node number)", + help=( + "Manual VM name override " + "(default: cloud-specific prefix + node number)" + ), ) parser.add_argument( "--peers", diff --git a/yocto/image/build.py b/yocto/image/build.py index 20eebfc7..bba48f05 100644 --- a/yocto/image/build.py +++ b/yocto/image/build.py @@ -4,6 +4,7 @@ from dataclasses import dataclass from pathlib import Path +from yocto.cloud.cloud_config import CloudProvider from yocto.config import BuildConfigs, Configs from yocto.image.git import GitConfigs, update_git_mkosi_batch from yocto.image.measurements import Measurements, generate_measurements @@ -34,14 +35,15 @@ def build_image( Args: home: Home directory path image_name: Image name (default: "seismic") - profile: Build profile - "azure", "gcp", or None for baremetal/no profile + profile: Build profile - "azure", "gcp", or None for + baremetal/no profile dev: Whether to build dev version capture_output: Whether to capture build output Returns: Path to the built image """ - flashbots_images_path = BuildPaths(home).flashbots_images + flashbots_images_path = BuildPaths(home).seismic_images if not flashbots_images_path.exists(): raise FileNotFoundError( f"flashbots-images path not found: {flashbots_images_path}" @@ -60,6 +62,7 @@ def build_image( build_cmd = " && ".join( [f"cd {flashbots_images_path}", f"{env_vars} make {make_target}"] ) + print(build_cmd) build_result = subprocess.run( build_cmd, shell=True, @@ -75,8 +78,6 @@ def build_image( raise RuntimeError(f"Image build failed: {err}") # Find the latest built image based on profile - from yocto.cloud.cloud_config import CloudProvider - if profile == "azure": cloud = CloudProvider.AZURE elif profile == "gcp": @@ -86,10 +87,14 @@ def build_image( else: cloud = None # Bare metal - artifact_pattern = BuildPaths.artifact_pattern(cloud, dev) if cloud else f"{image_name}-*.efi" + artifact_pattern = ( + BuildPaths.artifact_pattern(cloud, dev) + if cloud + else f"{image_name}-*.efi" + ) find_cmd = f""" - find {BuildPaths(home).artifacts} \ + find {BuildPaths(home).artifacts / cloud} \ -name '{artifact_pattern}' \ -type f -printf '%T@ %p\n' | sort -n | tail -1 | cut -f2- -d" " """ @@ -104,7 +109,9 @@ def build_image( image_path_str = find_result.stdout.strip() if not image_path_str: - raise FileNotFoundError(f"No image file found matching: {artifact_pattern}") + raise FileNotFoundError( + f"No image file found matching: {artifact_pattern}" + ) ts = artifact_timestamp(image_path_str) if ( diff --git a/yocto/image/git.py b/yocto/image/git.py index 8e9830f9..dca4afcb 100644 --- a/yocto/image/git.py +++ b/yocto/image/git.py @@ -8,63 +8,35 @@ logger = logging.getLogger(__name__) +GitCommit = str | None -@dataclass -class GitConfig: - commit: str | None - branch: str - @staticmethod - def from_args(args: Namespace, repo: str) -> "GitConfig": - values = vars(args) - return GitConfig( - commit=values[f"{repo}_commit"], branch=values[f"{repo}_branch"] - ) - - def to_dict(self) -> dict[str, str]: - # if not self.commit: - # raise ValueError( - # "Cannot call to_dict() on GitConfig without commit" - # ) - return { - "branch": self.branch, - "commit": self.commit, - } - - @staticmethod - def branch_only(branch: str) -> "GitConfig": - return GitConfig(commit=None, branch=branch) +def commit_from_args(args: Namespace, repo: str) -> GitCommit: + values = vars(args) + return values[f"{repo}_commit"] @dataclass class GitConfigs: - enclave: GitConfig - sreth: GitConfig - summit: GitConfig + enclave: GitCommit + sreth: GitCommit + summit: GitCommit @staticmethod def from_args(args: Namespace) -> "GitConfigs": return GitConfigs( - enclave=GitConfig.from_args(args, "enclave"), - sreth=GitConfig.from_args(args, "sreth"), - summit=GitConfig.from_args(args, "summit"), + enclave=commit_from_args(args, "enclave"), + sreth=commit_from_args(args, "sreth"), + summit=commit_from_args(args, "summit"), ) def to_dict(self): return { - "enclave": self.enclave.to_dict(), - "sreth": self.sreth.to_dict(), - "summit": self.summit.to_dict(), + "enclave": self.enclave, + "sreth": self.sreth, + "summit": self.summit, } - @staticmethod - def default() -> "GitConfigs": - return GitConfigs( - enclave=GitConfig.branch_only("seismic"), - sreth=GitConfig.branch_only("seismic"), - summit=GitConfig.branch_only("main"), - ) - def run_command( cmd: str, cwd: Path | None = None @@ -100,7 +72,8 @@ def _extract_commit_from_mkosi(build_file: Path, package_name: str) -> str: Args: build_file: Path to mkosi.build file - package_name: Package name (e.g., "summit", "seismic-reth", "seismic-enclave-server") + package_name: Package name (e.g., "summit", "seismic-reth", + "seismic-enclave-server") Returns: The commit hash as a string @@ -130,72 +103,32 @@ def _extract_commit_from_mkosi(build_file: Path, package_name: str) -> str: return result -def _extract_branch_from_mkosi(build_file: Path, package_name: str) -> str: - """Extract branch name from mkosi.build file for a given package. - - Args: - build_file: Path to mkosi.build file - package_name: Package name (e.g., "summit", "seismic-reth", "seismic-enclave-server") - - Returns: - The branch name as a string - - The format in mkosi.build is: - RETH_BRANCH="seismic" - """ - # Map package names to variable prefixes - package_var_map = { - "seismic-reth": "RETH", - "seismic-enclave-server": "ENCLAVE", - "summit": "SUMMIT", - } - - var_prefix = package_var_map.get(package_name) - if not var_prefix: - raise ValueError(f"Unknown package name: {package_name}") - - branch_var = f"{var_prefix}_BRANCH" - cmd = f"""grep '^{branch_var}=' {build_file} | cut -d'"' -f2""" - result = _extract(cmd, f"{package_name} branch") - if not result: - raise ValueError( - f"Failed to extract branch for {package_name}. " - f"Got: '{result}'" - ) - return result - - -def _extract_branch(bb_path: Path) -> str: - """Legacy function - branches are not stored in mkosi.build.""" - # For mkosi builds, we don't track branches separately - # The commit hash is what matters - return "main" - - def update_git_mkosi_batch( - updates: dict[str, GitConfig], + updates: dict[str, GitCommit], home: str, commit_message: str | None = None, -) -> dict[str, GitConfig]: +) -> dict[str, GitCommit]: """ - Update git commits for multiple packages in seismic/mkosi.build in a single commit. + Update git commits for multiple packages in seismic/mkosi.build in a + single commit. Args: - updates: Dict mapping package name to GitConfig - (e.g., {"summit": GitConfig(...), "seismic-reth": GitConfig(...)}) + updates: Dict mapping package name to commit hash + (e.g., {"summit": "3720ab4...", + "seismic-reth": "3720ab4..."}) home: Home directory path commit_message: Optional custom commit message Returns: - Dict mapping package names to their final GitConfig + Dict mapping package names to their final commit hash """ paths = BuildPaths(home) - build_file = paths.flashbots_images / "seismic" / "mkosi.build" + build_file = paths.seismic_images / "seismic" / "mkosi.build" - if not paths.flashbots_images.exists(): + if not paths.seismic_images.exists(): raise FileNotFoundError( - f"flashbots-images path not found: {paths.flashbots_images}" + f"flashbots-images path not found: {paths.seismic_images}" ) if not build_file.exists(): @@ -212,72 +145,71 @@ def update_git_mkosi_batch( results = {} packages_to_update = [] - for package_name, git_config in updates.items(): - if git_config.commit is None: + for package_name, git_commit in updates.items(): + if git_commit is None: # No commit specified, use current - current_commit = _extract_commit_from_mkosi(build_file, package_name) - current_branch = _extract_branch_from_mkosi(build_file, package_name) - current_git = GitConfig( - commit=current_commit, - branch=git_config.branch or current_branch, + current_commit = _extract_commit_from_mkosi( + build_file=build_file, + package_name=package_name, ) logger.info( f"No git commit provided for {package_name}. " - f"Using current git state {current_git.branch}#{current_git.commit}" + f"Using current git commit " + f"{current_commit}" ) - results[package_name] = current_git + results[package_name] = current_commit else: # Mark for update - packages_to_update.append((package_name, git_config)) - results[package_name] = git_config + packages_to_update.append((package_name, git_commit)) + results[package_name] = git_commit # If nothing to update, return early if not packages_to_update: logger.info("No packages to update") return results - logger.info(f"Updating {len(packages_to_update)} packages in {build_file.name}...") + logger.info( + f"Updating {len(packages_to_update)} packages in " + f"{build_file.name}..." + ) # Update all packages in one pass - for package_name, git_config in packages_to_update: - logger.info(f" - {package_name} → {git_config.branch}#{git_config.commit[:8]}") + for package_name, git_commit in packages_to_update: + logger.info(f" - {package_name} @ {git_commit[:8]}") var_prefix = package_var_map.get(package_name) if not var_prefix: raise ValueError(f"Unknown package name: {package_name}") - # Update branch variable (e.g., RETH_BRANCH="seismic") - branch_var = f"{var_prefix}_BRANCH" - branch_update_cmd = f""" - sed -i 's/^{branch_var}=.*$/{branch_var}="{git_config.branch}"/' {build_file} - """ - run_command(branch_update_cmd, cwd=paths.flashbots_images) - # Update commit variable (e.g., RETH_COMMIT="abc123...") commit_var = f"{var_prefix}_COMMIT" - commit_update_cmd = f""" - sed -i 's/^{commit_var}=.*$/{commit_var}="{git_config.commit}"/' {build_file} - """ - run_command(commit_update_cmd, cwd=paths.flashbots_images) + commit_update_cmd = ( + f"sed -i 's|^{commit_var}=.*$|{commit_var}=" + f'"{git_commit}"|\' {build_file}' + ) + run_command(commit_update_cmd, cwd=paths.seismic_images) logger.info("All packages updated in file") # Stage the file - run_command(f"git add seismic/mkosi.build", cwd=paths.flashbots_images) + run_command("git add seismic/mkosi.build", cwd=paths.seismic_images) - # Check if there are changes to commit + # Check if there are staged changes to commit status_result = run_command( - "git status --porcelain", cwd=paths.flashbots_images + cmd="git diff --cached --name-only", + cwd=paths.seismic_images, ) if status_result.stdout.strip(): logger.info("Changes detected, committing...") if not commit_message: package_names = ", ".join([name for name, _ in packages_to_update]) commit_message = f"Update commit hashes for {package_names}" - run_command(f'git commit -m "{commit_message}"', cwd=paths.flashbots_images) + run_command( + f'git commit -m "{commit_message}"', cwd=paths.seismic_images + ) logger.info("Committed changes") - run_command("git push", cwd=paths.flashbots_images) + run_command("git push", cwd=paths.seismic_images) logger.info("Successfully pushed changes") else: logger.info("No changes to commit") @@ -288,14 +220,15 @@ def update_git_mkosi_batch( def update_git_mkosi( package_name: str, - git_config: GitConfig, + git_config: GitCommit, home: str, commit_message: str | None = None, -) -> GitConfig: +) -> GitCommit: """ Update the git commit for a single package in seismic/mkosi.build. - For batch updates of multiple packages, use update_git_mkosi_batch() instead. + For batch updates of multiple packages, use update_git_mkosi_batch() + instead. """ results = update_git_mkosi_batch( {package_name: git_config}, @@ -303,30 +236,3 @@ def update_git_mkosi( commit_message, ) return results[package_name] - - -# Keep old function name for backwards compatibility, but delegate to new one -def update_git_bb( - bb_pathname: str, - git_config: GitConfig, - home: str, - commit_message: str | None = None, -) -> GitConfig: - """Legacy wrapper for update_git_mkosi. - - Maps old bb_pathname to package names: - - recipes-nodes/enclave/enclave.bb -> seismic-enclave-server - - recipes-nodes/reth/reth.bb -> seismic-reth - - recipes-nodes/summit/summit.bb -> summit - """ - package_map = { - "recipes-nodes/enclave/enclave.bb": "seismic-enclave-server", - "recipes-nodes/reth/reth.bb": "seismic-reth", - "recipes-nodes/summit/summit.bb": "summit", - } - - package_name = package_map.get(bb_pathname) - if not package_name: - raise ValueError(f"Unknown bb_pathname: {bb_pathname}") - - return update_git_mkosi(package_name, git_config, home, commit_message) diff --git a/yocto/image/measurements.py b/yocto/image/measurements.py old mode 100644 new mode 100755 index 7b4865af..6007a4a2 --- a/yocto/image/measurements.py +++ b/yocto/image/measurements.py @@ -1,12 +1,16 @@ +#!/usr/bin/env python3 +import argparse import json import logging -import os +import re import subprocess +import sys import tempfile from pathlib import Path from typing import Any from yocto.utils.paths import BuildPaths +from yocto.cloud.cloud_config import CloudProvider logger = logging.getLogger(__name__) @@ -20,28 +24,88 @@ def write_measurements_tmpfile(measurements: Measurements) -> Path: return measurements_tmpfile +def parse_gcp_measurements(measurements_output: Path, result: subprocess.CompletedProcess): + # Parse stdout and extract only the JSON part (first valid JSON object) + stdout = result.stdout.strip() + + # Try to parse as JSON directly first (in case output is clean) + try: + measurements_data = json.loads(stdout) + json_str = json.dumps(measurements_data, indent=2) + except json.JSONDecodeError: + # If that fails, extract JSON by tracking brace balance + # This handles cases where Lima VM messages are mixed with output + json_lines = [] + brace_count = 0 + in_json = False + + for line in stdout.split('\n'): + stripped = line.strip() + # Look for the start of a JSON object + if not in_json and '{' in stripped: + in_json = True + # Find where the brace starts and slice from there + brace_pos = line.index('{') + line = line[brace_pos:] + + if in_json: + json_lines.append(line) + # Count braces on this line + brace_count += line.count('{') - line.count('}') + + # If brace count is 0, we've completed the JSON object + if brace_count == 0: + break + + if not json_lines: + raise RuntimeError( + f"Could not find JSON in dstack-mr output:\n{stdout}" + ) + + json_str = '\n'.join(json_lines) + # Validate it's valid JSON + try: + json.loads(json_str) + except json.JSONDecodeError as e: + raise RuntimeError( + f"Extracted invalid JSON from dstack-mr output:\n{json_str}\n\nError: {e}" + ) + + measurements_output.parent.mkdir(parents=True, exist_ok=True) + measurements_output.write_text(json_str) + + def generate_measurements(image_path: Path, home: str) -> Measurements: """Generate measurements for TDX boot process using make measure""" paths = BuildPaths(home) + + # Resolve to absolute path + image_path = image_path.resolve() + if not image_path.exists(): raise FileNotFoundError(f"Image path not found: {image_path}") - # For mkosi builds, we need the .efi file for measurements, not .vhd + # For mkosi builds, we need the .efi file for measurements, not .vhd/.tar.gz efi_path = image_path - if image_path.suffix in [".vhd", ".tar.gz"]: + if image_path.suffix == ".vhd" or image_path.name.endswith(".tar.gz"): # Look for the corresponding .efi file # Pattern: seismic-dev-azure-TIMESTAMP.vhd -> seismic-dev-azure-TIMESTAMP.efi - efi_path = image_path.with_suffix(".efi") + # Pattern: seismic-dev-gcp-TIMESTAMP.tar.gz -> seismic-dev-gcp-TIMESTAMP.efi + if image_path.name.endswith(".tar.gz"): + efi_path = Path(str(image_path)[:-7] + ".efi") # Remove .tar.gz, add .efi + else: + efi_path = image_path.with_suffix(".efi") + if not efi_path.exists(): raise FileNotFoundError( f"EFI file not found for {image_path.name}. " f"Expected: {efi_path}" ) - if not paths.flashbots_images.exists(): + if not paths.seismic_images.exists(): raise FileNotFoundError( - f"flashbots-images path not found: {paths.flashbots_images}" + f"flashbots-images path not found: {paths.seismic_images}" ) logger.info(f"Generating measurements for: {efi_path.name}") @@ -50,48 +114,172 @@ def generate_measurements(image_path: Path, home: str) -> Measurements: # Extract image name from path (e.g., seismic from seismic-dev-azure-*.efi) image_name = efi_path.name.split("-")[0] - # Use the same command as make measure, but with our specific EFI file - # This is what make measure does internally: - # $(WRAPPER) measured-boot "$$EFI_FILE" build/measurements.json --direct-uki - # - # Important: env_wrapper.sh runs in Lima VM where flashbots-images is mounted at ~/mnt - # So we need to use relative paths from flashbots-images root - wrapper_script = paths.flashbots_images / "scripts" / "env_wrapper.sh" + # Detect cloud provider from filename (e.g., seismic-dev-gcp-*.efi) + cloud_provider = CloudProvider.from_string(efi_path.name) - # Get relative path from flashbots-images root (e.g., "build/seismic-dev-azure-*.efi") - efi_relative = efi_path.relative_to(paths.flashbots_images) - measurements_relative = "build/measurements.json" + # Important: env_wrapper.sh runs in Lima VM where flashbots-images is + # mounted at ~/mnt. So we need to use relative paths from + # flashbots-images root + wrapper_script = paths.seismic_images / "scripts" / "env_wrapper.sh" - measure_cmd = f""" - cd {paths.flashbots_images} && - IMAGE={image_name} {wrapper_script} measured-boot "{efi_relative}" {measurements_relative} --direct-uki - """ + # Get relative path from flashbots-images root + # (e.g., "build/gcp/seismic-dev-gcp-*.efi" or "build/seismic-dev-azure-*.efi") + efi_relative = efi_path.relative_to(paths.seismic_images) + + # Generate timestamped output filename + # Extract timestamp from filename (e.g., 20251204212823 from seismic-dev-gcp-20251204212823.efi) + timestamp_match = re.search(r'-(\d{14})\.', efi_path.name) + timestamp = timestamp_match.group(1) if timestamp_match else "latest" + + if cloud_provider.is_gcp(): + measurements_relative = f"build/gcp/measurements-{timestamp}.json" + # GCP uses dstack-mr which outputs to stdout + # We need to capture only stdout (not the Lima message), so we'll handle this differently + measure_cmd = ( + f"cd {paths.seismic_images} && " + f"IMAGE={image_name} {wrapper_script} dstack-mr " + f'-uki "{efi_relative}" -json' + ) + else: + measurements_relative = f"build/{cloud_provider.value}/measurements-{timestamp}.json" + # Azure uses measured-boot which writes to a file + measure_cmd = ( + f"cd {paths.seismic_images} && " + f"IMAGE={image_name} {wrapper_script} measured-boot " + f'"{efi_relative}" {measurements_relative} --direct-uki' + ) + + logger.info(f"Running measurement tool for {cloud_provider.value.upper()}") + logger.info(f"Output: {measurements_relative}") result = subprocess.run( measure_cmd, shell=True, capture_output=True, text=True ) if result.returncode != 0: + tool_name = "dstack-mr" if cloud_provider.is_gcp() else "measured-boot" raise RuntimeError( - f"measured-boot failed: {result.stderr.strip()}" + f"{tool_name} failed:\n" + f"{result.stderr.strip()}\n" + f"Command:\n{measure_cmd}" ) - # Read the generated measurements.json - measurements_output = paths.flashbots_images / measurements_relative - if not measurements_output.exists(): - raise FileNotFoundError( - f"Measurements file not generated: {measurements_output}" - ) + # For GCP, we need to manually write the stdout to file (filtering out non-JSON) + measurements_output = paths.seismic_images / measurements_relative + if cloud_provider.is_gcp(): + parse_gcp_measurements(measurements_output, result) + else: + # measured-boot writes directly to file + if not measurements_output.exists(): + raise FileNotFoundError( + f"Measurements file not generated: {measurements_output}" + ) with open(measurements_output) as f: raw_measurements = json.load(f) # Format to match expected structure + attestation_type = f"{cloud_provider}-tdx" measurements = { "measurement_id": image_path.name, - "attestation_type": "azure-tdx", + "attestation_type": attestation_type, "measurements": raw_measurements.get("measurements", raw_measurements), } - logger.info(f"Measurements generated successfully") + logger.info("Measurements generated successfully") return measurements + + +def main(): + """CLI entry point for standalone measurement generation.""" + parser = argparse.ArgumentParser( + description="Generate TDX measurements from UKI EFI files", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Generate measurements for a GCP image + %(prog)s build/gcp/seismic-dev-gcp-20251204212823.tar.gz --home /home/azureuser + + # Generate measurements for an Azure image with explicit EFI path + %(prog)s build/azure/seismic-dev-azure-20251204212823.efi --home /home/azureuser + + # Override cloud provider detection + %(prog)s build/seismic.efi --home /home/azureuser --cloud gcp + + # Specify custom output path + %(prog)s build/seismic.efi --home /home/azureuser --output custom-measurements.json + """, + ) + parser.add_argument( + "image_path", + type=Path, + help="Path to image file (.efi, .vhd, or .tar.gz)", + ) + parser.add_argument( + "--code", + type=str, + default=str(Path.home()), + help=( + f"Code directory path (required for BuildPaths). " + f"Defaults to $HOME: {Path.home()}" + ), + ) + parser.add_argument( + "--cloud", + choices=["auto", "gcp", "azure"], + default="auto", + help="Cloud provider (default: auto-detect from filename)", + ) + parser.add_argument( + "--output", + type=Path, + help="Custom output path (default: auto-generated with timestamp)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose logging", + ) + + args = parser.parse_args() + + # Setup logging + logging.basicConfig( + level=logging.DEBUG if args.verbose else logging.INFO, + format="%(levelname)s: %(message)s", + ) + + try: + # Generate measurements + measurements = generate_measurements(args.image_path, args.code) + + # If custom output specified, also write there + if args.output: + logger.info(f"Writing measurements to custom path: {args.output}") + args.output.parent.mkdir(parents=True, exist_ok=True) + with open(args.output, "w") as f: + json.dump(measurements, f, indent=2) + + logger.info(f"✓ Measurements generated successfully") + logger.info(f" Measurement ID: {measurements['measurement_id']}") + logger.info(f" Attestation Type: {measurements['attestation_type']}") + + return 0 + + except FileNotFoundError as e: + logger.error(f"File not found: {e}") + return 1 + except RuntimeError as e: + logger.error(f"Measurement generation failed: {e}") + return 1 + except Exception as e: + logger.error(f"Unexpected error: {e}") + if args.verbose: + import traceback + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/yocto/utils/artifact.py b/yocto/utils/artifact.py index 9150e87d..4fcd1da0 100644 --- a/yocto/utils/artifact.py +++ b/yocto/utils/artifact.py @@ -3,6 +3,7 @@ import logging import os import re +from pathlib import Path from yocto.utils.metadata import load_metadata, remove_artifact_from_metadata from yocto.utils.paths import BuildPaths @@ -49,21 +50,25 @@ def _artifact_from_timestamp( ) -> str | None: """Find artifact file by timestamp. - Searches the artifacts directory for files matching the timestamp. - Returns the filename if found, or constructs a legacy name as fallback. + Searches the artifacts directory (including subdirectories) for files + matching the timestamp. Returns the filename if found, or constructs a + legacy name as fallback. Args: timestamp: 14-digit timestamp string home: Home directory path - dev: If True, prefer dev artifacts (seismic-dev-*), else prefer non-dev + dev: If True, prefer dev artifacts (seismic-dev-*), else prefer + non-dev Returns: Artifact filename """ artifacts_path = BuildPaths(home).artifacts - # Search for any file with this timestamp - matches = list(glob.glob(f"{artifacts_path}/*{timestamp}*")) + # Search for any file with this timestamp in all subdirectories + matches = list( + glob.glob(f"{artifacts_path}/**/*{timestamp}*", recursive=True) + ) if matches: # Filter by dev preference if dev: @@ -77,7 +82,8 @@ def _artifact_from_timestamp( if non_dev_matches: matches = non_dev_matches - # Return the basename of the first match (preferring .vhd, .tar.gz, or .efi) + # Return the basename of the first match + # (preferring .vhd, .tar.gz, or .efi) for ext in [".vhd", ".tar.gz", ".efi"]: for match in matches: if match.endswith(ext): @@ -98,11 +104,12 @@ def parse_artifact( if len(artifact_arg) == 14: if all(a.isdigit() for a in artifact_arg): if home is None: - raise ValueError("home parameter required when parsing timestamp") + msg = "home parameter required when parsing timestamp" + raise ValueError(msg) return _artifact_from_timestamp(artifact_arg, home, dev) # Validate that it's correctly named - timestamp = _extract_timestamp(artifact_arg) + _extract_timestamp(artifact_arg) # If full artifact name provided, just return it # (it already has the correct format - either old or new) @@ -116,6 +123,42 @@ def expect_artifact(artifact_arg: str, home: str, dev: bool = False) -> str: return artifact +def get_artifact_path(artifact: str, home: str) -> Path: + """Get the full path to an artifact file. + + Args: + artifact: Artifact filename (e.g., "seismic-azure-20251121.vhd") + home: Home directory path + + Returns: + Full path to the artifact file + + Raises: + FileNotFoundError: If the artifact file doesn't exist + """ + artifacts_base = BuildPaths(home).artifacts + + # Determine subdirectory from artifact filename + if "-azure-" in artifact: + artifact_path = artifacts_base / "azure" / artifact + elif "-gcp-" in artifact: + artifact_path = artifacts_base / "gcp" / artifact + elif "-baremetal-" in artifact: + artifact_path = artifacts_base / "baremetal" / artifact + else: + msg = ( + f"Cannot determine cloud provider from artifact name: " + f"{artifact}. Expected format: " + f"seismic-[azure|gcp|baremetal]-YYYYMMDDHHMMSS." + ) + raise ValueError(msg) + + if not artifact_path.exists(): + raise FileNotFoundError(f"Artifact not found: {artifact_path}") + + return artifact_path + + def delete_artifact(artifact: str, home: str): resources = load_metadata(home).get("resources", {}) @@ -141,7 +184,10 @@ def delete_artifact(artifact: str, home: str): timestamp = _extract_timestamp(artifact) artifacts_path = BuildPaths(home).artifacts files_deleted = 0 - for filepath in glob.glob(f"{artifacts_path}/*{timestamp}*"): + # Search in all subdirectories + for filepath in glob.glob( + f"{artifacts_path}/**/*{timestamp}*", recursive=True + ): os.remove(filepath) files_deleted += 1 diff --git a/yocto/utils/metadata.py b/yocto/utils/metadata.py index 67874b06..2c9ac867 100644 --- a/yocto/utils/metadata.py +++ b/yocto/utils/metadata.py @@ -51,18 +51,24 @@ def remove_artifact_from_metadata(name: str, home: str): def load_artifact_measurements( artifact: str, home: str ) -> tuple[Path, "Measurements"]: + from yocto.utils.artifact import get_artifact_path + artifacts = load_metadata(home).get("artifacts", {}) if artifact not in artifacts: metadata_path = BuildPaths(home).deploy_metadata msg = f"Could not find artifact {artifact} in {metadata_path}" raise ValueError(msg) - image_path = BuildPaths(home).artifacts / artifact - artifact = artifacts[artifact] - if not image_path.exists(): + + # Use get_artifact_path to handle subdirectory structure + try: + image_path = get_artifact_path(artifact, home) + except FileNotFoundError as e: raise FileNotFoundError( f"Artifact {artifact} is defined in the deploy metadata, " "but the corresponding file was not found on the machine" - ) + ) from e + + artifact = artifacts[artifact] return image_path, artifact["image"] diff --git a/yocto/utils/parser.py b/yocto/utils/parser.py index 85f69d67..6a1da4ae 100644 --- a/yocto/utils/parser.py +++ b/yocto/utils/parser.py @@ -30,7 +30,10 @@ def parse_args() -> argparse.Namespace: "--cloud", type=str, choices=["azure", "gcp", "ovh"], - help="Cloud provider (azure, gcp, ovh). Required for deployment, optional for build.", + help=( + "Cloud provider (azure, gcp, ovh). " + "Required for deployment, optional for build." + ), ) parser.add_argument( "--resource-group", @@ -52,14 +55,6 @@ def parse_args() -> argparse.Namespace: ) # Git args - parser.add_argument( - "--enclave-branch", - default="seismic", - help=( - "Seismic Enclave git branch name. Defaults to 'main'. " - "Only used if --enclave-commit is provided too" - ), - ) parser.add_argument( "--enclave-commit", help=( @@ -67,15 +62,6 @@ def parse_args() -> argparse.Namespace: "If not provided, does not change image" ), ) - - parser.add_argument( - "--sreth-branch", - default="seismic", - help=( - "Seismic Reth git branch name. Defaults to 'seismic'. " - "Only used if --sreth-commit is provided too" - ), - ) parser.add_argument( "--sreth-commit", help=( @@ -83,15 +69,6 @@ def parse_args() -> argparse.Namespace: "If not provided, does not change image" ), ) - - parser.add_argument( - "--summit-branch", - default="main", - help=( - "Summit git branch name. Defaults to 'main'. " - "Only used if --summit-commit is provided too" - ), - ) parser.add_argument( "--summit-commit", help=("Summit git commit hash. If not provided, does not change image"), diff --git a/yocto/utils/paths.py b/yocto/utils/paths.py index c5f762ca..4acb3af9 100644 --- a/yocto/utils/paths.py +++ b/yocto/utils/paths.py @@ -11,16 +11,12 @@ def __init__(self, home: str): self.home = Path(home) @property - def yocto_manifests(self) -> Path: - return self.home / "yocto-manifests" - - @property - def flashbots_images(self) -> Path: - return self.home / "flashbots-images" + def seismic_images(self) -> Path: + return self.home / "seismic-images" @property def artifacts(self) -> Path: - return self.flashbots_images / "build" + return self.seismic_images / "build" @staticmethod def artifact_pattern(cloud: "CloudProvider", dev: bool = False) -> str: @@ -31,21 +27,20 @@ def artifact_pattern(cloud: "CloudProvider", dev: bool = False) -> str: dev: Whether this is a dev build Returns: - Glob pattern like "seismic-dev-azure-*.vhd" or "seismic-gcp-*.tar.gz" + Glob pattern like "azure/seismic-dev-azure-*.vhd" or + "gcp/seismic-gcp-*.tar.gz" """ prefix = "seismic-dev" if dev else "seismic" if cloud == CloudProvider.AZURE: - # Dev builds include devtools profile, resulting in comma-separated profiles - # e.g., seismic-dev-azure,devtools-timestamp.vhd or seismic-azure-timestamp.vhd - return f"{prefix}-azure*-*.vhd" + # Dev builds use "seismic-dev" prefix + # e.g., azure/seismic-dev-azure-timestamp.vhd or + # azure/seismic-azure-timestamp.vhd + return f"{prefix}-azure-*.vhd" elif cloud == CloudProvider.GCP: - return f"{prefix}-gcp*-*.tar.gz" - elif cloud == CloudProvider.OVH: - # OVH uses baremetal profile (no PROFILE in build) - return f"{prefix}-baremetal-*.efi" + return f"{prefix}-gcp-*.tar.gz" else: - # Bare metal or unknown + # OVH uses baremetal profile (no PROFILE in build) return f"{prefix}-baremetal-*.efi" @staticmethod @@ -53,34 +48,10 @@ def artifact_prefix() -> str: """Legacy method for backward compatibility.""" return "cvm-image-azure-tdx.rootfs" - @property - def meta_seismic(self) -> Path: - return self.home / "meta-seismic" - - @property - def measured_boot(self) -> Path: - return self.home / "measured-boot" - - @property - def enclave_bb(self) -> str: - return "recipes-nodes/enclave/enclave.bb" - - @property - def sreth_bb(self) -> str: - return "recipes-nodes/reth/reth.bb" - - @property - def summit_bb(self) -> str: - return "recipes-nodes/summit/summit.bb" - @property def repo_root(self) -> Path: return self.home / "deploy" - @property - def deploy_script(self) -> Path: - return self.repo_root / "deploy.sh" - @property def deploy_metadata(self) -> Path: return self.repo_root / "deploy_metadata.json" @@ -88,7 +59,3 @@ def deploy_metadata(self) -> Path: @property def proxy_client(self) -> Path: return self.home / "cvm-reverse-proxy/build/proxy-client" - - @property - def source_env(self) -> Path: - return self.home / "yocto-manifests/build/srcs/poky" diff --git a/yocto/utils/summit_client.py b/yocto/utils/summit_client.py index 269290f5..8f1e481e 100644 --- a/yocto/utils/summit_client.py +++ b/yocto/utils/summit_client.py @@ -1,10 +1,10 @@ import logging import tomllib +from dataclasses import dataclass from pathlib import Path from typing import Any import requests -from dataclasses import dataclass logger = logging.getLogger(__name__) @@ -25,7 +25,7 @@ def _get(self, path: str) -> str: response = requests.get(f"{self.url}/{path}") response.raise_for_status() return response.text - + def _get_json(self, path: str) -> str: response = requests.get(f"{self.url}/{path}") response.raise_for_status()