forked from microsoft/MLOpsPython
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.example
81 lines (66 loc) · 2.69 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# Azure Subscription Variables
SUBSCRIPTION_ID = ''
LOCATION = ''
TENANT_ID = ''
BASE_NAME = ''
SP_APP_ID = ''
SP_APP_SECRET = ''
RESOURCE_GROUP = 'mlops-RG'
# Mock build/release ID for local testing
BUILD_BUILDID = '001'
# Azure ML Workspace Variables
WORKSPACE_NAME = 'mlops-aml-ws'
EXPERIMENT_NAME = 'mlopspython'
# AML Compute Cluster Config
AML_ENV_NAME='diabetes_regression_training_env'
AML_ENV_TRAIN_CONDA_DEP_FILE="conda_dependencies.yml"
AML_COMPUTE_CLUSTER_NAME = 'train-cluster'
AML_COMPUTE_CLUSTER_CPU_SKU = 'STANDARD_DS2_V2'
AML_CLUSTER_MAX_NODES = '4'
AML_CLUSTER_MIN_NODES = '0'
AML_CLUSTER_PRIORITY = 'lowpriority'
# Training Config
MODEL_NAME = 'diabetes_regression_model.pkl'
MODEL_VERSION = '1'
TRAIN_SCRIPT_PATH = 'training/train_aml.py'
# AML Pipeline Config
TRAINING_PIPELINE_NAME = 'Training Pipeline'
MODEL_PATH = ''
EVALUATE_SCRIPT_PATH = 'evaluate/evaluate_model.py'
REGISTER_SCRIPT_PATH = 'register/register_model.py'
SOURCES_DIR_TRAIN = 'diabetes_regression'
DATASET_NAME = 'diabetes_ds'
DATASET_VERSION = 'latest'
# Optional. Set it if you have configured non default datastore to point to your data
DATASTORE_NAME = ''
SCORE_SCRIPT = 'scoring/score.py'
# Optional. Used by a training pipeline with R on Databricks
DB_CLUSTER_ID = ''
# Optional. Container Image name for image creation
IMAGE_NAME = 'mltrained'
# Run Evaluation Step in AML pipeline
RUN_EVALUATION = 'true'
# Set to true cancels the Azure ML pipeline run when evaluation criteria are not met.
ALLOW_RUN_CANCEL = 'true'
# Flag to allow rebuilding the AML Environment after it was built for the first time. This enables dependency updates from conda_dependencies.yaml.
AML_REBUILD_ENVIRONMENT = 'false'
USE_GPU_FOR_SCORING = "false"
AML_ENV_SCORE_CONDA_DEP_FILE="conda_dependencies_scoring.yml"
AML_ENV_SCORECOPY_CONDA_DEP_FILE="conda_dependencies_scorecopy.yml"
# AML Compute Cluster Config for parallel batch scoring
AML_ENV_NAME_SCORING='diabetes_regression_scoring_env'
AML_ENV_NAME_SCORE_COPY='diabetes_regression_score_copy_env'
AML_COMPUTE_CLUSTER_NAME_SCORING = 'score-cluster'
AML_COMPUTE_CLUSTER_CPU_SKU_SCORING = 'STANDARD_DS2_V2'
AML_CLUSTER_MAX_NODES_SCORING = '4'
AML_CLUSTER_MIN_NODES_SCORING = '0'
AML_CLUSTER_PRIORITY_SCORING = 'lowpriority'
AML_REBUILD_ENVIRONMENT_SCORING = 'true'
BATCHSCORE_SCRIPT_PATH = 'scoring/parallel_batchscore.py'
BATCHSCORE_COPY_SCRIPT_PATH = 'scoring/parallel_batchscore_copyoutput.py'
SCORING_DATASTORE_INPUT_CONTAINER = 'input'
SCORING_DATASTORE_INPUT_FILENAME = 'diabetes_scoring_input.csv'
SCORING_DATASTORE_OUTPUT_CONTAINER = 'output'
SCORING_DATASTORE_OUTPUT_FILENAME = 'diabetes_scoring_output.csv'
SCORING_DATASET_NAME = 'diabetes_scoring_ds'
SCORING_PIPELINE_NAME = 'diabetes-scoring-pipeline'