diff --git a/src/promptflow/promptflow/azure/_pf_client.py b/src/promptflow/promptflow/azure/_pf_client.py index f82d8a2eac92..b2487033b10b 100644 --- a/src/promptflow/promptflow/azure/_pf_client.py +++ b/src/promptflow/promptflow/azure/_pf_client.py @@ -4,7 +4,7 @@ import os from os import PathLike from pathlib import Path -from typing import IO, Any, AnyStr, Dict, List, Optional, Union +from typing import Dict, List, Optional, Union from azure.ai.ml import MLClient from azure.core.credentials import TokenCredential @@ -14,7 +14,6 @@ from promptflow._sdk._errors import RunOperationParameterError from promptflow._sdk._user_agent import USER_AGENT from promptflow._sdk.entities import Run -from promptflow.azure._load_functions import load_flow from promptflow.azure._restclient.service_caller_factory import _FlowServiceCallerFactory from promptflow.azure._utils.gerneral import is_remote_uri from promptflow.azure.operations import RunOperations @@ -300,63 +299,6 @@ def visualize(self, runs: Union[List[str], List[Run]]) -> None: """ self.runs.visualize(runs) - def load_as_component( - self, - source: Union[str, PathLike, IO[AnyStr]], - *, - component_type: str, - columns_mapping: Dict[str, Union[str, float, int, bool]] = None, - variant: str = None, - environment_variables: Dict[str, Any] = None, - is_deterministic: bool = True, - **kwargs, - ) -> "Component": - """ - Load a flow as a component. - - :param source: Source of the flow. Should be a path to a flow dag yaml file or a flow directory. - :type source: Union[str, PathLike, IO[AnyStr]] - :param component_type: Type of the loaded component, support parallel only for now. - :type component_type: str - :param variant: Node variant used for the flow. - :type variant: str - :param environment_variables: Environment variables to set for the flow. - :type environment_variables: dict - :param columns_mapping: Inputs mapping for the flow. - :type columns_mapping: dict - :param is_deterministic: Whether the loaded component is deterministic. - :type is_deterministic: bool - """ - name = kwargs.pop("name", None) - version = kwargs.pop("version", None) - description = kwargs.pop("description", None) - display_name = kwargs.pop("display_name", None) - tags = kwargs.pop("tags", None) - - flow = load_flow( - source=source, - relative_origin=kwargs.pop("relative_origin", None), - **kwargs, - ) - - if component_type != "parallel": - raise NotImplementedError(f"Component type {component_type} is not supported yet.") - - # TODO: confirm if we should keep flow operations - component = self._flows.load_as_component( - flow=flow, - columns_mapping=columns_mapping, - variant=variant, - environment_variables=environment_variables, - name=name, - version=version, - description=description, - is_deterministic=is_deterministic, - display_name=display_name, - tags=tags, - ) - return component - def _add_user_agent(self, kwargs) -> None: user_agent = kwargs.pop("user_agent", None) user_agent = f"{user_agent} {USER_AGENT}" if user_agent else USER_AGENT diff --git a/src/promptflow/promptflow/azure/operations/_artifact_utilities.py b/src/promptflow/promptflow/azure/operations/_artifact_utilities.py index 3edbbf481770..0a1cf2ec51a4 100644 --- a/src/promptflow/promptflow/azure/operations/_artifact_utilities.py +++ b/src/promptflow/promptflow/azure/operations/_artifact_utilities.py @@ -9,7 +9,7 @@ import uuid from datetime import datetime, timedelta from pathlib import Path -from typing import Dict, Optional, Tuple, TypeVar, Union +from typing import Dict, Optional, TypeVar, Union from azure.ai.ml._artifacts._blob_storage_helper import BlobStorageClient from azure.ai.ml._artifacts._gen2_storage_helper import Gen2StorageClient @@ -34,7 +34,6 @@ get_artifact_path_from_storage_url, get_storage_client, ) -from azure.ai.ml._utils.utils import is_mlflow_uri, is_url from azure.ai.ml.constants._common import SHORT_URI_FORMAT, STORAGE_ACCOUNT_URLS from azure.ai.ml.entities import Environment from azure.ai.ml.entities._assets._artifacts.artifact import Artifact, ArtifactStorageInfo @@ -357,56 +356,6 @@ def _update_gen2_metadata(name, version, indicator_file, storage_client) -> None T = TypeVar("T", bound=Artifact) -def _check_and_upload_path( - artifact: T, - asset_operations: Union["DataOperations", "ModelOperations", "CodeOperations", "FeatureSetOperations"], - artifact_type: str, - datastore_name: Optional[str] = None, - sas_uri: Optional[str] = None, - show_progress: bool = True, -) -> Tuple[T, str]: - """Checks whether `artifact` is a path or a uri and uploads it to the datastore if necessary. - - param T artifact: artifact to check and upload param - Union["DataOperations", "ModelOperations", "CodeOperations"] - asset_operations: the asset operations to use for uploading - param str datastore_name: the name of the datastore to upload to - param str sas_uri: the sas uri to use for uploading - """ - - datastore_name = artifact.datastore - if ( - hasattr(artifact, "local_path") - and artifact.local_path is not None - or ( - hasattr(artifact, "path") - and artifact.path is not None - and not (is_url(artifact.path) or is_mlflow_uri(artifact.path)) - ) - ): - path = ( - Path(artifact.path) - if hasattr(artifact, "path") and artifact.path is not None - else Path(artifact.local_path) - ) - if not path.is_absolute(): - path = Path(artifact.base_path, path).resolve() - uploaded_artifact = _upload_to_datastore( - asset_operations._operation_scope, - asset_operations._datastore_operation, - path, - datastore_name=datastore_name, - asset_name=artifact.name, - asset_version=str(artifact.version), - asset_hash=artifact._upload_hash if hasattr(artifact, "_upload_hash") else None, - sas_uri=sas_uri, - artifact_type=artifact_type, - show_progress=show_progress, - ignore_file=getattr(artifact, "_ignore_file", None), - ) - return uploaded_artifact - - def _check_and_upload_env_build_context( environment: Environment, operations: "EnvironmentOperations", diff --git a/src/promptflow/promptflow/azure/operations/_flow_operations.py b/src/promptflow/promptflow/azure/operations/_flow_operations.py index cef2ef52a84c..debb642fffb4 100644 --- a/src/promptflow/promptflow/azure/operations/_flow_operations.py +++ b/src/promptflow/promptflow/azure/operations/_flow_operations.py @@ -5,9 +5,8 @@ import logging import os -import re from pathlib import Path -from typing import Any, Dict +from typing import Dict from azure.ai.ml._artifacts._artifact_utilities import _check_and_upload_path from azure.ai.ml._scope_dependent_operations import ( @@ -16,11 +15,7 @@ OperationScope, _ScopeDependentOperations, ) -from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri -from azure.ai.ml._utils.utils import hash_dict -from azure.ai.ml.constants._common import SHORT_URI_FORMAT, AzureMLResourceType -from azure.ai.ml.operations import ComponentOperations -from azure.ai.ml.operations._code_operations import CodeOperations +from azure.ai.ml.constants._common import SHORT_URI_FORMAT from azure.ai.ml.operations._operation_orchestrator import OperationOrchestrator from azure.core.exceptions import HttpResponseError @@ -32,12 +27,8 @@ ) from promptflow._sdk._utils import PromptflowIgnoreFile, generate_flow_tools_json from promptflow._sdk._vendor._asset_utils import traverse_directory -from promptflow.azure._constants._flow import DEFAULT_STORAGE from promptflow.azure._entities._flow import Flow -from promptflow.azure._ml import Component -from promptflow.azure._restclient.flow.models import FlowRunMode, LoadFlowAsComponentRequest from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller -from promptflow.azure._utils import is_arm_id from promptflow.exceptions import SystemErrorException @@ -63,10 +54,6 @@ def __init__( self._service_caller = service_caller self._credential = credential - @property - def _code_operations(self) -> CodeOperations: - return self._all_operations.get_operation(AzureMLResourceType.CODE, lambda x: isinstance(x, CodeOperations)) - def _create_or_update(self, flow, **kwargs): # upload to file share self._resolve_arm_id_or_upload_dependencies(flow) @@ -102,140 +89,6 @@ def _download(self, source, dest): # TODO: support download flow raise NotImplementedError("Not implemented yet") - @classmethod - def _clear_empty_item(cls, obj): - if not isinstance(obj, dict): - return obj - return {k: cls._clear_empty_item(v) for k, v in obj.items() if v is not None} - - @classmethod - def _get_component_hash(cls, rest_object): - """this hash should include all the burn-in information: - - code - - keys of inputs_mapping - - environment_variables, it will be burned into something like component.task.environment_variables? - some other fields will be burned into component but will impact default value of inputs: - - variant - - connections - - values of inputs_mapping - Now we use all of them as hash key. - """ - obj = rest_object.as_dict() - - return hash_dict(cls._clear_empty_item(obj)) - - @classmethod - def _get_name_and_version(cls, *, rest_object, name: str = None, version: str = None): - if name and version: - return name, version - if name or version: - raise ValueError("name and version of the component must be provided together") - # the hash will be impacted by all editable fields, including default value of inputs_mapping - # so components with different default value of columns_mapping can't be reused from each other - return "azureml_anonymous_flow", cls._get_component_hash(rest_object) - - def load_as_component( - self, - flow, - name: str = None, - version: str = None, - display_name: str = None, - description: str = None, - tags: Dict[str, str] = None, - variant: str = None, - columns_mapping: Dict[str, str] = None, - environment_variables: Dict[str, Any] = None, - connections: Dict[str, Dict[str, str]] = None, - is_deterministic: bool = True, - **kwargs, - ) -> Component: - """Load a flow as a component.""" - rest_object = LoadFlowAsComponentRequest( - node_variant=variant, - inputs_mapping=columns_mapping, - environment_variables=environment_variables, - connections=connections, - display_name=display_name, - description=description, - tags=tags, - is_deterministic=is_deterministic, - # hack: MT support this only for now, will remove after MT release new version - run_mode=FlowRunMode.BULK_TEST, - ) - - if is_arm_id(flow): - rest_object.flow_definition_resource_id = flow.id - else: - # upload to file share - self._resolve_arm_id_or_upload_dependencies(flow) - if flow.path.startswith("azureml://"): - # upload via _check_and_upload_path - # submit with params FlowDefinitionDataStoreName and FlowDefinitionBlobPath - path_uri = AzureMLDatastorePathUri(flow.path) - rest_object.flow_definition_data_store_name = path_uri.datastore - rest_object.flow_definition_blob_path = path_uri.path - else: - # upload via CodeOperations.create_or_update - # submit with param FlowDefinitionDataUri - rest_object.flow_definition_data_uri = flow.path - - rest_object.component_name, rest_object.component_version = self._get_name_and_version( - rest_object=rest_object, name=name, version=version - ) - - component_id = self._service_caller.create_component_from_flow( - subscription_id=self._operation_scope.subscription_id, - resource_group_name=self._operation_scope.resource_group_name, - workspace_name=self._operation_scope.workspace_name, - body=rest_object, - ) - name, version = re.match(r".*/components/(.*)/versions/(.*)", component_id).groups() - return self._all_operations.get_operation( - AzureMLResourceType.COMPONENT, - lambda x: isinstance(x, ComponentOperations), - ).get(name, version) - - def _resolve_arm_id_or_upload_dependencies_to_file_share(self, flow: Flow) -> None: - ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config) - # resolve flow's code - self._try_resolve_code_for_flow_to_file_share(flow=flow, ops=ops) - - @classmethod - def _try_resolve_code_for_flow_to_file_share(cls, flow: Flow, ops: OperationOrchestrator) -> None: - from ._artifact_utilities import _check_and_upload_path - - if flow.path: - if flow.path.startswith("azureml://datastores"): - # remote path - path_uri = AzureMLDatastorePathUri(flow.path) - if path_uri.datastore != DEFAULT_STORAGE: - raise ValueError(f"Only {DEFAULT_STORAGE} is supported as remote storage for now.") - flow.path = path_uri.path - flow._code_uploaded = True - return - else: - raise ValueError("Path is required for flow.") - - with flow._build_code() as code: - if code is None: - return - if flow._code_uploaded: - return - code.datastore = DEFAULT_STORAGE - uploaded_code_asset = _check_and_upload_path( - artifact=code, - asset_operations=ops._code_assets, - artifact_type="Code", - show_progress=False, - ) - if "remote_path" in uploaded_code_asset: - path = uploaded_code_asset["remote_path"] - elif "remote path" in uploaded_code_asset: - path = uploaded_code_asset["remote path"] - flow.code = path - flow.path = (Path(path) / flow.path).as_posix() - flow._code_uploaded = True - def _resolve_arm_id_or_upload_dependencies(self, flow: Flow, ignore_tools_json=False) -> None: ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config) # resolve flow's code diff --git a/src/promptflow/setup.py b/src/promptflow/setup.py index 11a86b99e629..6aa5a0f2c881 100644 --- a/src/promptflow/setup.py +++ b/src/promptflow/setup.py @@ -75,7 +75,7 @@ "azure-core>=1.26.4,<2.0.0", "azure-storage-blob>=12.13.0,<13.0.0", "azure-identity>=1.12.0,<2.0.0", - "azure-ai-ml>=1.9.0,<2.0.0", + "azure-ai-ml>=1.11.0,<2.0.0", "pyjwt>=2.4.0,<3.0.0", # requirement of control plane SDK ], }, diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py index a5bbfa2820d1..16e423034cbc 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py @@ -4,9 +4,8 @@ import pydash import pytest import yaml -from azure.ai.ml import Input, dsl -from azure.ai.ml.constants import AssetTypes -from azure.ai.ml.entities import Component, PipelineJob +from azure.ai.ml import load_component +from azure.ai.ml.entities import Component from promptflow.connections import AzureOpenAIConnection @@ -59,6 +58,7 @@ def update_saved_spec(component: Component, saved_spec_path: str): current_spec_text = saved_spec_path.read_text() if current_spec_text == yaml_text: return + saved_spec_path.parent.mkdir(parents=True, exist_ok=True) saved_spec_path.write_text(yaml_text) @@ -70,42 +70,19 @@ class TestFlowInAzureML: [ pytest.param( { - "component_type": "parallel", - "columns_mapping": { - "groundtruth": "1", - "prediction": "${{batch_run.outputs.category}}", - }, - "environment_variables": { - "verbose": "true", - }, - }, - { - "type": "parallel", - }, - id="parallel_anonymous", - ), - pytest.param( - { - "name": "web_classification_0", + "name": "web_classification_3", "version": "1.0.0", - "component_type": "parallel", "description": "Create flows that use large language models to " "classify URLs into multiple categories.", - "columns_mapping": { - "groundtruth": "1", - "prediction": "${{batch_run.outputs.category}}", - }, "environment_variables": { "verbose": "true", }, }, { - "name": "web_classification_0", + "name": "web_classification_3", "version": "1.0.0", "description": "Create flows that use large language models to " "classify URLs into multiple categories.", - "inputs.groundtruth.default": "1", - "inputs.prediction.default": "${{batch_run.outputs.category}}", "type": "parallel", }, id="parallel", @@ -116,62 +93,25 @@ def test_flow_as_component( self, azure_open_ai_connection: AzureOpenAIConnection, temp_output_dir, - pf, + ml_client, load_params: dict, expected_spec_attrs: dict, request, ) -> None: + # keep the simplest test here, more tests are in azure-ai-ml flows_dir = "./tests/test_configs/flows" - flow_func: Component = pf.load_as_component( - f"{flows_dir}/web_classification", - **load_params, - ) - - update_saved_spec(flow_func, f"./tests/test_configs/flows/saved_component_spec/{request.node.callspec.id}.yaml") - - component_dict = flow_func._to_dict() - slimmed_created_component_attrs = {key: pydash.get(component_dict, key) for key in expected_spec_attrs.keys()} - assert slimmed_created_component_attrs == expected_spec_attrs - - def test_flow_as_component_in_dsl_pipeline( - self, azure_open_ai_connection: AzureOpenAIConnection, temp_output_dir, pf - ) -> None: - - flows_dir = "./tests/test_configs/flows" - - flow_func: Component = pf.load_as_component( - f"{flows_dir}/web_classification", - component_type="parallel", - columns_mapping={ - "groundtruth": "${data.answer}", - "url": "${data.url}", - }, - environment_variables={ - "verbose": "true", - }, + flow_func: Component = load_component( + f"{flows_dir}/web_classification/flow.dag.yaml", params_override=[load_params] ) - @dsl.pipeline - def pipeline_with_flow(input_data): - flow_node = flow_func( - data=input_data, - connections={ - "summarize_text_content": { - "deployment_name": "test_deployment_name", - } - }, - ) - flow_node.logging_level = "DEBUG" - return flow_node.outputs + created_component = ml_client.components.create_or_update(flow_func) - pipeline: PipelineJob = pipeline_with_flow( - input_data=Input(path=f"{flows_dir}/web_classification_input_dir", type=AssetTypes.URI_FOLDER), + update_saved_spec( + created_component, f"./tests/test_configs/flows/saved_component_spec/{request.node.callspec.id}.yaml" ) - # compute cluster doesn't have access to azurecr for now, so the submitted job will fail in building image stage - pipeline.settings.default_compute = "cpu-cluster" - created_job: PipelineJob = pf.ml_client.jobs.create_or_update(pipeline) - assert created_job.id - assert created_job.jobs["flow_node"].logging_level == "DEBUG" + component_dict = created_component._to_dict() + slimmed_created_component_attrs = {key: pydash.get(component_dict, key) for key in expected_spec_attrs.keys()} + assert slimmed_created_component_attrs == expected_spec_attrs diff --git a/src/promptflow/tests/test_configs/flows/saved_component_spec/command.yaml b/src/promptflow/tests/test_configs/flows/saved_component_spec/command.yaml deleted file mode 100644 index fd9dbf0e52a9..000000000000 --- a/src/promptflow/tests/test_configs/flows/saved_component_spec/command.yaml +++ /dev/null @@ -1,49 +0,0 @@ -$schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json -name: web_classification_updated -version: version_updated -display_name: web_classification_display_name_updated -description: Create flows that use large language models to classify URLs into multiple - categories. -type: command -inputs: - data: - type: uri_folder - variant: - type: string - optional: true - default: variant_1 - groundtruth: - type: string - optional: true - default: '1' - prediction: - type: string - optional: true - default: ${{variant.outputs.category}} - connections.summarize_text_content.connection: - type: string - optional: true - default: azure_open_ai_connection - connections.summarize_text_content.deployment_name: - type: string - optional: true - connections.classify_with_llm.connection: - type: string - optional: true - default: azure_open_ai_connection - connections.classify_with_llm.deployment_name: - type: string - optional: true -outputs: - output: - type: uri_folder -command: pf run_bulk --input ${{inputs.data}} --output ${{outputs.output}} $[[--inputs-mapping - groundtruth=${{inputs.groundtruth}},prediction=${{inputs.prediction}}]] --connections - "$[[summarize_text_content.deployment_name=${{inputs.connections.summarize_text_content.deployment_name}},]]$[[summarize_text_content.connection=${{inputs.connections.summarize_text_content.connection}},]]$[[classify_with_llm.deployment_name=${{inputs.connections.classify_with_llm.deployment_name}},]]$[[classify_with_llm.connection=${{inputs.connections.classify_with_llm.connection}},]]" - $[[--variant ${{inputs.variant}}]] -environment: - name: CliV2AnonymousEnvironment - version: e2ebc71877ca60434ac0cb69936b523f - image: promptflow.azurecr.io/cli_test:latest -code: D:/PycharmProjects/PromptFlow/src/promptflow-sdk/tests/test_configs/flows/web_classification -is_deterministic: true diff --git a/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel.yaml b/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel.yaml index 74a0dff470b8..babdcb6f7d2e 100644 --- a/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel.yaml +++ b/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel.yaml @@ -7,7 +7,7 @@ creation_context: last_modified_by_type: xxx description: Create flows that use large language models to classify URLs into multiple categories. -display_name: web_classification_0 +display_name: web_classification_3 error_threshold: -1 id: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/components/xxx/versions/xxx input_data: ${{inputs.data}} @@ -20,6 +20,18 @@ inputs: default: text-davinci-003 optional: true type: string + connections.classify_with_llm.model: + enum: + - text-davinci-001 + - text-davinci-002 + - text-davinci-003 + - text-curie-001 + - text-babbage-001 + - text-ada-001 + - code-cushman-001 + - code-davinci-002 + optional: true + type: string connections.summarize_text_content.connection: default: azure_open_ai_connection optional: true @@ -28,16 +40,26 @@ inputs: default: text-davinci-003 optional: true type: string + connections.summarize_text_content.model: + enum: + - text-davinci-001 + - text-davinci-002 + - text-davinci-003 + - text-curie-001 + - text-babbage-001 + - text-ada-001 + - code-cushman-001 + - code-davinci-002 + optional: true + type: string data: - description: Flow data input optional: false type: uri_folder - groundtruth: - default: '1' - optional: false - type: string - prediction: - default: ${{batch_run.outputs.category}} + run_outputs: + optional: true + type: uri_folder + url: + default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h optional: false type: string is_deterministic: true @@ -45,8 +67,10 @@ logging_level: INFO max_concurrency_per_instance: 1 mini_batch_error_threshold: 0 mini_batch_size: '1' -name: web_classification_0 +name: web_classification_3 outputs: + debug_info: + type: uri_folder flow_outputs: type: uri_folder retry_settings: @@ -54,13 +78,15 @@ retry_settings: timeout: 3600 task: append_row_to: ${{outputs.flow_outputs}} - code: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/codes/xxx/versions/xxx + code: /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/codes/xxx/versions/xxx entry_script: driver/azureml_user/parallel_run/prompt_flow_entry.py environment: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/environments/xxx/versions/xxx program_arguments: --amlbi_pf_enabled True --amlbi_pf_run_mode component --amlbi_mini_batch_rows - 1 --amlbi_file_format jsonl --amlbi_pf_connections "$[[classify_with_llm.connection=${{inputs.connections.classify_with_llm.connection}},]]$[[classify_with_llm.deployment_name=${{inputs.connections.classify_with_llm.deployment_name}},]]$[[summarize_text_content.connection=${{inputs.connections.summarize_text_content.connection}},]]$[[summarize_text_content.deployment_name=${{inputs.connections.summarize_text_content.deployment_name}},]]" - --amlbi_pf_input_groundtruth ${{inputs.groundtruth}} --amlbi_pf_input_prediction - ${{inputs.prediction}} + 1 --amlbi_file_format jsonl $[[--amlbi_pf_run_outputs ${{inputs.run_outputs}}]] + --amlbi_pf_debug_info ${{outputs.debug_info}} --amlbi_pf_connections "$[[classify_with_llm.connection=${{inputs.connections.classify_with_llm.connection}},]]$[[summarize_text_content.connection=${{inputs.connections.summarize_text_content.connection}},]]" + --amlbi_pf_deployment_names "$[[classify_with_llm.deployment_name=${{inputs.connections.classify_with_llm.deployment_name}},]]$[[summarize_text_content.deployment_name=${{inputs.connections.summarize_text_content.deployment_name}},]]" + --amlbi_pf_model_names "$[[classify_with_llm.model=${{inputs.connections.classify_with_llm.model}},]]$[[summarize_text_content.model=${{inputs.connections.summarize_text_content.model}},]]" + --amlbi_pf_input_url ${{inputs.url}} type: run_function type: parallel version: 1.0.0 diff --git a/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel_anonymous.yaml b/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel_anonymous.yaml deleted file mode 100644 index f47c9c4d9411..000000000000 --- a/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel_anonymous.yaml +++ /dev/null @@ -1,73 +0,0 @@ -creation_context: - created_at: xxx - created_by: xxx - created_by_type: xxx - last_modified_at: xxx - last_modified_by: xxx - last_modified_by_type: xxx -display_name: azureml_anonymous_flow -error_threshold: -1 -id: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/components/xxx/versions/xxx -input_data: ${{inputs.data}} -inputs: - connections.classify_with_llm.connection: - default: azure_open_ai_connection - optional: true - type: string - connections.classify_with_llm.deployment_name: - default: text-davinci-003 - optional: true - type: string - connections.summarize_text_content.connection: - default: azure_open_ai_connection - optional: true - type: string - connections.summarize_text_content.deployment_name: - default: text-davinci-003 - optional: true - type: string - data: - optional: false - type: uri_folder - groundtruth: - default: '1' - optional: false - type: string - prediction: - default: ${{batch_run.outputs.category}} - optional: false - type: string - run_outputs: - optional: true - type: uri_folder - url: - default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h - optional: false - type: string -is_deterministic: true -logging_level: INFO -max_concurrency_per_instance: 1 -mini_batch_error_threshold: 0 -mini_batch_size: '1' -name: azureml_anonymous_flow -outputs: - debug_info: - type: uri_folder - flow_outputs: - type: uri_folder -retry_settings: - max_retries: 2 - timeout: 3600 -task: - append_row_to: ${{outputs.flow_outputs}} - code: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/codes/xxx/versions/xxx - entry_script: driver/azureml_user/parallel_run/prompt_flow_entry.py - environment: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/environments/xxx/versions/xxx - program_arguments: --amlbi_pf_enabled True --amlbi_pf_run_mode component --amlbi_mini_batch_rows - 1 --amlbi_file_format jsonl $[[--amlbi_pf_run_outputs ${{inputs.run_outputs}}]] - --amlbi_pf_debug_info ${{outputs.debug_info}} --amlbi_pf_connections "$[[classify_with_llm.connection=${{inputs.connections.classify_with_llm.connection}},]]$[[classify_with_llm.deployment_name=${{inputs.connections.classify_with_llm.deployment_name}},]]$[[summarize_text_content.connection=${{inputs.connections.summarize_text_content.connection}},]]$[[summarize_text_content.deployment_name=${{inputs.connections.summarize_text_content.deployment_name}},]]" - --amlbi_pf_input_url ${{inputs.url}} --amlbi_pf_input_groundtruth ${{inputs.groundtruth}} - --amlbi_pf_input_prediction ${{inputs.prediction}} - type: run_function -type: parallel -version: c8160529-52a3-0626-39da-3040f0a43655 diff --git a/src/promptflow/tests/test_configs/flows/web_classification/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/web_classification/flow.dag.yaml index ebcb28362558..e5c71f474bef 100644 --- a/src/promptflow/tests/test_configs/flows/web_classification/flow.dag.yaml +++ b/src/promptflow/tests/test_configs/flows/web_classification/flow.dag.yaml @@ -1,3 +1,4 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: url: type: string