From 8571a0527b9426c0b427bded7124443e4eae3524 Mon Sep 17 00:00:00 2001 From: Angel Antonio Avalos Cisneros Date: Thu, 11 Jan 2024 14:18:10 -0800 Subject: [PATCH] Project import generated by Copybara. (#82) GitOrigin-RevId: e2b4905f145b3b67e5f8bb1501343c2f993b9ee0 Co-authored-by: Snowflake Authors --- .pre-commit-config.yaml | 2 +- CHANGELOG.md | 27 ++ CONTRIBUTING.md | 3 + README.md | 17 +- bazel/BUILD.bazel | 10 + ci/conda_recipe/meta.yaml | 2 +- ci/type_check.sh | 92 +--- ci/type_check/BUILD.bazel | 0 ci/type_check/runner/.gitignore | 1 + ci/type_check/type_check.sh | 111 +++++ codegen/sklearn_wrapper_generator.py | 54 +++ codegen/sklearn_wrapper_template.py_template | 48 +- docs/source/_templates/autosummary/class.rst | 17 +- docs/source/index.rst | 2 + docs/source/model.rst | 64 +++ docs/source/registry.rst | 5 +- docs/sphinxconf/conf.py | 18 +- snowflake/cortex/BUILD.bazel | 6 +- .../image_registry/BUILD.bazel | 56 +++ .../image_registry/credential.py} | 0 .../image_registry/http_client.py} | 0 .../image_registry/http_client_test.py} | 4 +- .../image_registry}/imagelib.py | 4 +- .../image_registry/registry_client.py} | 6 +- .../image_registry/registry_client_test.py} | 4 +- snowflake/ml/_internal/env_utils.py | 83 ++-- snowflake/ml/_internal/env_utils_test.py | 220 ++++----- snowflake/ml/_internal/file_utils.py | 17 + snowflake/ml/_internal/utils/BUILD.bazel | 33 +- .../_internal/utils/query_result_checker.py | 13 +- .../utils/query_result_checker_test.py | 20 +- snowflake/ml/_internal/utils/snowflake_env.py | 95 ++++ .../ml/_internal/utils/snowflake_env_test.py | 93 ++++ snowflake/ml/feature_store/BUILD.bazel | 5 +- .../ml/feature_store/_internal/BUILD.bazel | 5 +- .../_internal/scripts/BUILD.bazel | 4 + .../_internal/scripts/upload_test_datasets.py | 12 +- snowflake/ml/feature_store/entity.py | 13 +- snowflake/ml/feature_store/feature_store.py | 100 +--- .../customer_demo/Basic_Feature_Demo.ipynb | 6 +- .../customer_demo/Basic_Feature_Demo.pdf | Bin 221129 -> 222468 bytes .../Time_Series_Feature_Demo.ipynb | 6 +- .../Time_Series_Feature_Demo.pdf | Bin 257355 -> 257608 bytes snowflake/ml/feature_store/tests/BUILD.bazel | 5 +- .../tests/feature_store_object_test.py | 6 +- .../feature_store/tests/feature_store_test.py | 12 +- snowflake/ml/fileset/parquet_parser.py | 32 +- snowflake/ml/model/BUILD.bazel | 24 + snowflake/ml/model/__init__.py | 6 + snowflake/ml/model/_client/model/BUILD.bazel | 12 +- .../ml/model/_client/model/model_impl.py | 185 +++++++- .../ml/model/_client/model/model_impl_test.py | 224 +++++++-- .../model/_client/model/model_method_info.py | 19 - .../model/_client/model/model_version_impl.py | 148 ++++-- .../_client/model/model_version_impl_test.py | 99 +++- snowflake/ml/model/_client/ops/BUILD.bazel | 7 +- .../ml/model/_client/ops/metadata_ops.py | 4 +- snowflake/ml/model/_client/ops/model_ops.py | 186 +++++++- .../ml/model/_client/ops/model_ops_test.py | 354 +++++++++++++- snowflake/ml/model/_client/sql/BUILD.bazel | 33 +- snowflake/ml/model/_client/sql/model.py | 71 ++- snowflake/ml/model/_client/sql/model_test.py | 16 +- .../ml/model/_client/sql/model_version.py | 98 ++-- .../model/_client/sql/model_version_test.py | 22 +- snowflake/ml/model/_client/sql/stage.py | 14 +- snowflake/ml/model/_client/sql/tag.py | 118 +++++ snowflake/ml/model/_client/sql/tag_test.py | 104 ++++ .../_deploy_client/image_builds/BUILD.bazel | 4 +- .../image_builds/client_image_builder.py | 4 +- .../image_builds/docker_context.py | 16 +- .../image_builds/inference_server/main.py | 10 +- .../image_builds/server_image_builder.py | 13 +- .../_deploy_client/snowservice/BUILD.bazel | 2 +- .../_deploy_client/snowservice/deploy.py | 9 +- .../snowservice/instance_types.py | 10 +- .../ml/model/_deploy_client/utils/BUILD.bazel | 28 -- .../model/_deploy_client/warehouse/deploy.py | 31 +- .../_deploy_client/warehouse/deploy_test.py | 16 +- .../_model_composer/model_composer_test.py | 12 +- .../model_manifest/BUILD.bazel | 20 +- .../model_manifest/fixtures/MANIFEST_0.yml | 41 ++ .../model_manifest/fixtures/MANIFEST_1.yml | 65 +++ .../model_manifest/fixtures/MANIFEST_2.yml | 66 +++ .../model_manifest/fixtures/MANIFEST_3.yml | 117 +++++ .../model_manifest/model_manifest.py | 51 +- .../model_manifest/model_manifest_schema.py | 32 ++ .../model_manifest/model_manifest_test.py | 285 +++++++---- .../_model_composer/model_method/BUILD.bazel | 4 +- ...ion_fixture_1.py_fixture => function_1.py} | 0 ...ion_fixture_2.py_fixture => function_2.py} | 0 .../model_method/function_generator.py | 3 +- .../model_method/function_generator_test.py | 4 +- .../model_method/model_method_test.py | 6 +- .../model_runtime/model_runtime.py | 15 +- .../model_runtime/model_runtime_test.py | 48 +- .../model_handlers/huggingface_pipeline.py | 2 +- .../model/_packager/model_handlers/xgboost.py | 15 +- snowflake/ml/model/_signatures/core.py | 37 +- snowflake/ml/model/custom_model.py | 57 +-- snowflake/ml/model/model_signature.py | 33 +- snowflake/ml/model/package_visibility_test.py | 34 ++ snowflake/ml/modeling/_internal/BUILD.bazel | 46 ++ .../_internal/distributed_hpo_trainer.py | 309 ++++++++---- .../ml/modeling/_internal/estimator_utils.py | 21 + .../_internal/model_specifications.py | 13 +- .../_internal/model_specifications_test.py | 249 +++++++++- .../_internal/model_trainer_builder.py | 66 ++- .../_internal/model_trainer_builder_test.py | 84 ++++ .../ml/modeling/_internal/snowpark_trainer.py | 12 +- .../xgboost_external_memory_trainer.py | 444 ++++++++++++++++++ .../xgboost_external_memory_trainer_test.py | 100 ++++ .../modeling/preprocessing/min_max_scaler.py | 16 +- snowflake/ml/monitoring/tests/BUILD.bazel | 5 +- snowflake/ml/registry/BUILD.bazel | 38 +- snowflake/ml/registry/__init__.py | 3 + snowflake/ml/registry/_manager/BUILD.bazel | 40 ++ .../ml/registry/_manager/model_manager.py | 163 +++++++ .../registry/_manager/model_manager_test.py | 351 ++++++++++++++ snowflake/ml/registry/model_registry.py | 12 + ...sing MODEL via Registry in Snowflake.ipynb | 36 +- .../ml/registry/package_visibility_test.py | 21 + snowflake/ml/registry/registry.py | 188 ++++---- snowflake/ml/registry/registry_test.py | 214 ++------- snowflake/ml/version.bzl | 2 +- .../ml/_internal/env_utils_integ_test.py | 33 +- .../snowflake/ml/extra_tests/BUILD.bazel | 22 + .../ml/extra_tests/fit_transform_test.py | 73 +++ .../xgboost_external_memory_training_test.py | 81 ++++ .../snowflake/ml/image_builds/BUILD.bazel | 2 +- .../image_registry_client_integ_test.py | 8 +- .../ml/model/_client/model/BUILD.bazel | 5 +- .../_client/model/model_impl_integ_test.py | 59 ++- .../model/model_version_impl_integ_test.py | 18 +- .../ml/modeling/model_selection/BUILD.bazel | 12 + .../check_output_hpo_integ_test.py | 243 ++++++++++ .../model_selection/grid_search_integ_test.py | 24 + .../preprocessing/min_max_scaler_test.py | 17 + .../snowflake/ml/registry/model/BUILD.bazel | 1 + .../model/registry_model_test_base.py | 20 +- .../model/registry_tensorflow_model_test.py | 2 + .../ml/registry/model_registry_compat_test.py | 3 + .../integ/snowflake/ml/test_utils/BUILD.bazel | 7 +- .../ml/test_utils/common_test_base.py | 21 +- .../ml/test_utils/spcs_integ_test_base.py | 18 +- .../snowflake/ml/test_utils/test_env_utils.py | 70 +-- third_party/rules_mypy/BUILD.bazel | 3 +- third_party/rules_mypy/main.py | 68 +-- third_party/rules_mypy/mypy.bzl | 166 +++++-- third_party/rules_mypy/templates/BUILD.bazel | 1 + third_party/rules_mypy/templates/mypy.sh.tpl | 44 ++ 150 files changed, 6079 insertions(+), 1542 deletions(-) create mode 100644 ci/type_check/BUILD.bazel create mode 100644 ci/type_check/runner/.gitignore create mode 100755 ci/type_check/type_check.sh create mode 100644 docs/source/model.rst create mode 100644 snowflake/ml/_internal/container_services/image_registry/BUILD.bazel rename snowflake/ml/_internal/{utils/spcs_image_registry.py => container_services/image_registry/credential.py} (100%) rename snowflake/ml/_internal/{utils/image_registry_http_client.py => container_services/image_registry/http_client.py} (100%) rename snowflake/ml/_internal/{utils/image_registry_http_client_test.py => container_services/image_registry/http_client_test.py} (98%) rename snowflake/ml/{model/_deploy_client/utils => _internal/container_services/image_registry}/imagelib.py (99%) rename snowflake/ml/{model/_deploy_client/utils/image_registry_client.py => _internal/container_services/image_registry/registry_client.py} (98%) rename snowflake/ml/{model/_deploy_client/utils/image_registry_client_test.py => _internal/container_services/image_registry/registry_client_test.py} (97%) create mode 100644 snowflake/ml/_internal/utils/snowflake_env.py create mode 100644 snowflake/ml/_internal/utils/snowflake_env_test.py create mode 100644 snowflake/ml/model/__init__.py delete mode 100644 snowflake/ml/model/_client/model/model_method_info.py create mode 100644 snowflake/ml/model/_client/sql/tag.py create mode 100644 snowflake/ml/model/_client/sql/tag_test.py create mode 100644 snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_0.yml create mode 100644 snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_1.yml create mode 100644 snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_2.yml create mode 100644 snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_3.yml rename snowflake/ml/model/_model_composer/model_method/fixtures/{function_fixture_1.py_fixture => function_1.py} (100%) rename snowflake/ml/model/_model_composer/model_method/fixtures/{function_fixture_2.py_fixture => function_2.py} (100%) create mode 100644 snowflake/ml/model/package_visibility_test.py create mode 100644 snowflake/ml/modeling/_internal/model_trainer_builder_test.py create mode 100644 snowflake/ml/modeling/_internal/xgboost_external_memory_trainer.py create mode 100644 snowflake/ml/modeling/_internal/xgboost_external_memory_trainer_test.py create mode 100644 snowflake/ml/registry/__init__.py create mode 100644 snowflake/ml/registry/_manager/BUILD.bazel create mode 100644 snowflake/ml/registry/_manager/model_manager.py create mode 100644 snowflake/ml/registry/_manager/model_manager_test.py create mode 100644 snowflake/ml/registry/package_visibility_test.py create mode 100644 tests/integ/snowflake/ml/extra_tests/fit_transform_test.py create mode 100644 tests/integ/snowflake/ml/extra_tests/xgboost_external_memory_training_test.py create mode 100644 tests/integ/snowflake/ml/modeling/model_selection/check_output_hpo_integ_test.py create mode 100644 third_party/rules_mypy/templates/BUILD.bazel create mode 100644 third_party/rules_mypy/templates/mypy.sh.tpl diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5e487a89..ff975b24 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,5 @@ --- -exclude: ^(.*egg.info.*|.*/parameters.py$|.*\.py_template|.*/experimental/.*|docs/source/_themes/.*) +exclude: ^(.*egg.info.*|.*/parameters.py$|.*\.py_template|.*/experimental/.*|.*/fixtures/.*|docs/source/_themes/.*) minimum_pre_commit_version: 3.4.0 repos: - repo: https://github.com/asottile/pyupgrade diff --git a/CHANGELOG.md b/CHANGELOG.md index d70fb5ba..da6550e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Release History +## 1.2.0 + +### Bug Fixes + +- Model Registry: Fix "XGBoost version not compiled with GPU support" error when running CPU inference against open-source + XGBoost models deployed to SPCS. +- Model Registry: Fix model deployment to SPCS on Windows machines. + +### Behavior Changes + +### New Features + +- Model Development: Introduced XGBoost external memory training feature. This feature enables training XGBoost models + on large datasets that don't fit into memory. +- Registry: New Registry class named `snowflake.ml.registry.Registry` providing similar APIs as the old one but works + with new MODEL object in Snowflake SQL. Also, we are providing`snowflake.ml.model.Model` and + `snowflake.ml.model.ModelVersion` to represent a model and a specific version of a model. +- Model Development: Add support for `fit_predict` method in `AgglomerativeClustering`, `DBSCAN`, and `OPTICS` classes; +- Model Development: Add support for `fit_transform` method in `MDS`, `SpectralEmbedding` and `TSNE` class. + +### Additional Notes + +- Model Registry: The `snowflake.ml.registry.model_registry.ModelRegistry` has been deprecated starting from version +1.2.0. It will stay in the Private Preview phase. For future implementations, kindly utilize +`snowflake.ml.registry.Registry`, except when specifically required. The old model registry will be removed once all +its primary functionalities are fully integrated into the new registry. + ## 1.1.2 ### Bug Fixes diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2a4074d0..7df6ff5a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -117,6 +117,9 @@ You can build an entire sub-tree as: ) ``` ++ If the visibility of the target is not `//visibility:public`, you need to make sure your target is visible to + `//bazel:snowml_public_common` to make sure CI type checking work. + ### Type-check #### mypy diff --git a/README.md b/README.md index d4acf9c2..1610d8b8 100644 --- a/README.md +++ b/README.md @@ -23,20 +23,17 @@ model development classes based on sklearn, xgboost, and lightgbm. 1. Framework Connectors: Optimized, secure and performant data provisioning for Pytorch and Tensorflow frameworks in their native data loader formats. -### Snowpark ML Ops [Private Preview] - -[Snowpark MLOps](https://docs.snowflake.com/en/developer-guide/snowpark-ml/index#snowpark-ml-ops) complements the -Snowpark ML Development API, and provides model management capabilities along with integrated deployment into Snowflake. -Currently, the API consists of: - 1. FileSet API: FileSet provides a Python fsspec-compliant API for materializing data into a Snowflake internal stage from a query or Snowpark Dataframe along with a number of convenience APIs. -1. Model Registry: A python API for managing models within Snowflake which also supports deployment of ML models into -Snowflake Warehouses as vectorized UDFs. +### Snowpark Model Management [Public Preview] + +[Snowpark Model Management](https://docs.snowflake.com/en/developer-guide/snowpark-ml/index#snowpark-ml-ops) complements +the Snowpark ML Development API, and provides model management capabilities along with integrated deployment into Snowflake. +Currently, the API consists of: -During PrPr, we are iterating on API without backward compatibility guarantees. It is better to recreate your registry -everytime you update the package. This means, at this time, you cannot use the registry for production use. +1. Registry: A python API for managing models within Snowflake which also supports deployment of ML models into Snowflake +as native MODEL object running with Snowflake Warehouse. ## Getting started diff --git a/bazel/BUILD.bazel b/bazel/BUILD.bazel index 7f299669..5b29739a 100644 --- a/bazel/BUILD.bazel +++ b/bazel/BUILD.bazel @@ -14,3 +14,13 @@ sh_binary( name = "test_wrapper", srcs = ["test_wrapper.sh"], ) + +# Package group for common targets in the repo. +package_group( + name = "snowml_public_common", + packages = [ + "//bazel/...", + "//ci/...", + "//docs/...", + ], +) diff --git a/ci/conda_recipe/meta.yaml b/ci/conda_recipe/meta.yaml index 00b04f5c..3c244f9d 100644 --- a/ci/conda_recipe/meta.yaml +++ b/ci/conda_recipe/meta.yaml @@ -17,7 +17,7 @@ build: noarch: python package: name: snowflake-ml-python - version: 1.1.2 + version: 1.2.0 requirements: build: - python diff --git a/ci/type_check.sh b/ci/type_check.sh index 5617aee6..927db9bb 100755 --- a/ci/type_check.sh +++ b/ci/type_check.sh @@ -1,92 +1,10 @@ #!/bin/bash -# Usage -# type_check.sh [-a] [-b ] -# -# Flags -# -a: check all targets (excluding the exempted ones). -# -b: specify path to bazel. -# -# Inputs -# - ci/skip_type_checking_targets : a list of target patterns against which -# typechecking should be enforced. -# -# Action -# - Performs typechecking against the intersection of -# type checked targets and affected targets. -# Exit code: -# 0 if succeeds. No target to check means success. -# 1 if there is an error in parsing commandline flag. -# Otherwise exits with bazel's exit code. -# -# NOTE: -# 1. Ignores all targets that depends on targets in `skip_type_checking_targets`. -# 2. Affected targets also include raw python files on top of bazel build targets whereas ignored_targets don't. Hence -# we used `kind('py_.* rule')` filter. - -set -o pipefail -set -u -set -e - -bazel="bazel" -affected_targets="" +# Just an alias to avoid break Jenkins PROG=$0 -help() { - local exit_code=$1 - echo "Usage: ${PROG} [-a] [-b ]" - exit "${exit_code}" -} - -while getopts "ab:h" opt; do - case "${opt}" in - a) - affected_targets="//..." - ;; - b) - bazel="${OPTARG}" - ;; - h) - help 0 - ;; - :) - help 1 - ;; - ?) - help 1 - ;; - esac -done - -echo "Using bazel: " "${bazel}" -working_dir=$(mktemp -d "/tmp/tmp_XXXXX") -trap 'rm -rf "${working_dir}"' EXIT - -if [[ -z "${affected_targets}" ]]; then - affected_targets_file="${working_dir}/affected_targets" - ./bazel/get_affected_targets.sh -b "${bazel}" -f "${affected_targets_file}" - - affected_targets="$(<"${affected_targets_file}")" -fi - -printf \ - "let skip_type_checking_targets = set(%s) + set(%s) + set(%s) in \ - let affected_targets = kind('py_.* rule', set(%s)) in \ - let rdeps_targets = rdeps(//..., \$skip_type_checking_targets) in \ - \$affected_targets except \$rdeps_targets" \ - "$("${working_dir}/type_checked_targets_query" -"${bazel}" query --query_file="${working_dir}/type_checked_targets_query" >"${working_dir}/type_checked_targets" -echo "Type checking the following targets:" "$(<"${working_dir}/type_checked_targets")" - -set +e -"${bazel}" build \ - --keep_going \ - --config=typecheck \ - --color=yes \ - --target_pattern_file="${working_dir}/type_checked_targets" -bazel_exit_code=$? +SCRIPT=$(readlink -f "$PROG") +# Absolute path this script is in, thus /home/user/bin +SCRIPTPATH=$(dirname "$SCRIPT") -if [[ $bazel_exit_code -eq 0 || $bazel_exit_code -eq 4 ]]; then - exit 0 -fi -exit ${bazel_exit_code} +"${SCRIPTPATH}/type_check/type_check.sh" "$@" diff --git a/ci/type_check/BUILD.bazel b/ci/type_check/BUILD.bazel new file mode 100644 index 00000000..e69de29b diff --git a/ci/type_check/runner/.gitignore b/ci/type_check/runner/.gitignore new file mode 100644 index 00000000..819ae9cc --- /dev/null +++ b/ci/type_check/runner/.gitignore @@ -0,0 +1 @@ +BUILD.bazel diff --git a/ci/type_check/type_check.sh b/ci/type_check/type_check.sh new file mode 100755 index 00000000..1c8b2d1f --- /dev/null +++ b/ci/type_check/type_check.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Usage +# type_check.sh [-a] [-b ] +# +# Flags +# -a: check all targets (excluding the exempted ones). +# -b: specify path to bazel. +# +# Inputs +# - ci/skip_type_checking_targets : a list of target patterns against which +# typechecking should be enforced. +# +# Action +# - Create a mypy_test targets to type check all affected targets +# Exit code: +# 0 if succeeds. No target to check means success. +# 1 if there is an error in parsing commandline flag. +# Otherwise exits with bazel's exit code. +# +# NOTE: +# 1. Ignores all targets that depends on targets in `skip_type_checking_targets`. +# 2. Affected targets also include raw python files on top of bazel build targets whereas ignored_targets don't. Hence +# we used `kind('py_.* rule')` filter. + +set -o pipefail +set -u +set -e + +bazel="bazel" +affected_targets="" +PROG=$0 + +SCRIPT=$(readlink -f "$PROG") +# Absolute path this script is in, thus /home/user/bin +SCRIPTPATH=$(dirname "$SCRIPT") + +help() { + local exit_code=$1 + echo "Usage: ${PROG} [-a] [-b ]" + exit "${exit_code}" +} + +while getopts "ab:h" opt; do + case "${opt}" in + a) + affected_targets="//..." + ;; + b) + bazel="${OPTARG}" + ;; + h) + help 0 + ;; + :) + help 1 + ;; + ?) + help 1 + ;; + esac +done + +echo "Using bazel: " "${bazel}" +working_dir=$(mktemp -d "/tmp/tmp_XXXXX") +trap 'rm -rf "${working_dir}"' EXIT +trap 'rm -rf "${SCRIPTPATH}/runner/BUILD.bazel"' EXIT + +if [[ -z "${affected_targets}" ]]; then + affected_targets_file="${working_dir}/affected_targets" + ./bazel/get_affected_targets.sh -b "${bazel}" -f "${affected_targets_file}" + + affected_targets="$(<"${affected_targets_file}")" +fi + +printf \ + "let skip_type_checking_targets = set(%s) + set(%s) + set(%s) in \ + let affected_targets = kind('py_.* rule', set(%s)) in \ + let rdeps_targets = rdeps(//..., \$skip_type_checking_targets) in \ + \$affected_targets except \$rdeps_targets" \ + "$(<"${SCRIPTPATH}/../skip_type_checking_targets")" "$(<"${SCRIPTPATH}/../skip_merge_gate_targets")" "$(<"${SCRIPTPATH}/../skip_continuous_run_targets")" "${affected_targets}" >"${working_dir}/type_checked_targets_query" +type_check_targets=$("${bazel}" query --query_file="${working_dir}/type_checked_targets_query" | awk 'NF { print "\""$0"\","}') + +echo "${type_check_targets}" + +if [[ -z "${type_check_targets}" ]]; then + echo "No target to do the type checking. Bye!" + exit 0 +fi + +cat >"${SCRIPTPATH}/runner/BUILD.bazel" < bool: """ return class_object[1].__module__ == "sklearn.preprocessing._data" + @staticmethod + def _is_manifold_module_obj(class_object: Tuple[str, type]) -> bool: + """Check if the given class belongs to the SKLearn manifold module. + + Args: + class_object: Meta class object which needs to be checked. + + Returns: + True if the class belongs to `sklearn.manifold` module, otherwise False. + """ + return class_object[1].__module__.startswith("sklearn.manifold") + @staticmethod def _is_multioutput_obj(class_object: Tuple[str, type]) -> bool: """Check if the given estimator can learn and predict multiple labels (multi-label not multi-class) @@ -548,6 +581,7 @@ def __init__(self, module_name: str, class_object: Tuple[str, type]) -> None: self.original_predict_docstring = "" self.predict_docstring = "" self.fit_predict_docstring = "" + self.fit_transform_docstring = "" self.predict_proba_docstring = "" self.score_docstring = "" self.predict_log_proba_docstring = "" @@ -570,6 +604,7 @@ def __init__(self, module_name: str, class_object: Tuple[str, type]) -> None: # Optional function support self.fit_predict_cluster_function_support = False + self.fit_transform_manifold_function_support = False # Dependencies self.predict_udf_deps = "" @@ -608,6 +643,7 @@ def _format_default_type(self, default_value: Any) -> str: def _populate_flags(self) -> None: self._from_data_py = WrapperGeneratorFactory._is_data_module_obj(self.class_object) + self._is_manifold = WrapperGeneratorFactory._is_manifold_module_obj(self.class_object) self._is_regressor = WrapperGeneratorFactory._is_regressor_obj(self.class_object) self._is_classifier = WrapperGeneratorFactory._is_classifier_obj(self.class_object) self._is_meta_estimator = WrapperGeneratorFactory._is_meta_estimator_obj(self.class_object) @@ -630,6 +666,7 @@ def _populate_flags(self) -> None: self._is_grid_search_cv = WrapperGeneratorFactory._is_grid_search_cv(self.class_object) self._is_randomized_search_cv = WrapperGeneratorFactory._is_randomized_search_cv(self.class_object) self._is_iterative_imputer = WrapperGeneratorFactory._is_iterative_imputer(self.class_object) + self._is_xgboost = WrapperGeneratorFactory._is_xgboost(self.module_name) def _populate_import_statements(self) -> None: self.estimator_imports_list.append("import numpy") @@ -786,6 +823,18 @@ def _populate_function_names_and_signatures(self) -> None: signature_lines.append("sample_weight_col: Optional[str] = None") init_member_args.append("self.set_sample_weight_col(sample_weight_col)") + if self._is_xgboost: + signature_lines.append("use_external_memory_version: bool = False") + signature_lines.append("batch_size: int = 10000") + + init_member_args.append("self._use_external_memory_version = use_external_memory_version") + init_member_args.append("self._batch_size = batch_size") + ADDITIONAL_PARAM_DESCRIPTIONS["use_external_memory_version"] = PARAM_DESC_USE_EXTERNAL_MEMORY_VERSION + ADDITIONAL_PARAM_DESCRIPTIONS["batch_size"] = PARAM_DESC_BATCH_SIZE + else: + init_member_args.append("self._use_external_memory_version = False") + init_member_args.append("self._batch_size = -1") + sklearn_init_lines.append("**cleaned_up_init_args") if has_kwargs: signature_lines.append("**kwargs") @@ -934,6 +983,11 @@ def generate(self) -> "SklearnWrapperGenerator": if self._is_cluster: self.fit_predict_cluster_function_support = True + if self._is_manifold: + self.fit_transform_manifold_function_support = True + + if self._is_manifold: + self.fit_transform_manifold_function_support = True if WrapperGeneratorFactory._is_class_of_type(self.class_object[1], "SelectKBest"): # Set the k of SelectKBest features transformer to half the number of columns in the dataset. diff --git a/codegen/sklearn_wrapper_template.py_template b/codegen/sklearn_wrapper_template.py_template index f1270070..35e09c17 100644 --- a/codegen/sklearn_wrapper_template.py_template +++ b/codegen/sklearn_wrapper_template.py_template @@ -48,6 +48,18 @@ _PROJECT = "ModelDevelopment" _SUBPROJECT = "".join([s.capitalize() for s in "{transform.root_module_name}".replace("sklearn.", "").split("_")]) +def _is_fit_predict_method_enabled() -> Callable[[Any], bool]: + def check(self: BaseTransformer) -> TypeGuard[Callable[..., object]]: + return {transform.fit_predict_cluster_function_support} and callable(getattr(self._sklearn_object, "fit_predict", None)) + return check + + +def _is_fit_transform_method_enabled() -> Callable[[Any], bool]: + def check(self: BaseTransformer) -> TypeGuard[Callable[..., object]]: + return {transform.fit_transform_manifold_function_support} and callable(getattr(self._sklearn_object, "fit_transform", None)) + return check + + class {transform.original_class_name}(BaseTransformer): r"""{transform.estimator_class_docstring} """ @@ -123,11 +135,6 @@ class {transform.original_class_name}(BaseTransformer): if isinstance(dataset, DataFrame): session = dataset._session assert session is not None # keep mypy happy - # Validate that key package version in user workspace are supported in snowflake conda channel - # If customer doesn't have package in conda channel, replace the ones have the closest versions - self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel( - pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT) - # Specify input columns so column pruning will be enforced selected_cols = self._get_active_columns() if len(selected_cols) > 0: @@ -155,7 +162,9 @@ class {transform.original_class_name}(BaseTransformer): label_cols=self.label_cols, sample_weight_col=self.sample_weight_col, autogenerated=self._autogenerated, - subproject=_SUBPROJECT + subproject=_SUBPROJECT, + use_external_memory_version=self._use_external_memory_version, + batch_size=self._batch_size, ) self._sklearn_object = model_trainer.train() self._is_fitted = True @@ -424,20 +433,27 @@ class {transform.original_class_name}(BaseTransformer): return output_df - @available_if(original_estimator_has_callable("fit_predict")) # type: ignore[misc] - def fit_predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> npt.NDArray[Any]: + @available_if(_is_fit_predict_method_enabled()) # type: ignore[misc] + def fit_predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[Any, npt.NDArray[Any]]: """ {transform.fit_predict_docstring} Returns: Predicted dataset. """ - if {transform.fit_predict_cluster_function_support}: - self.fit(dataset) - assert self._sklearn_object is not None - labels : npt.NDArray[Any] = self._sklearn_object.labels_ - return labels - else: - # TODO(xinyi): support fit_predict for mixture classes - raise NotImplementedError + self.fit(dataset) + assert self._sklearn_object is not None + return self._sklearn_object.labels_ + + + @available_if(_is_fit_transform_method_enabled()) # type: ignore[misc] + def fit_transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[Any, npt.NDArray[Any]]: + """ {transform.fit_transform_docstring} + Returns: + Transformed dataset. + """ + self.fit(dataset) + assert self._sklearn_object is not None + return self._sklearn_object.embedding_ + def _get_output_column_names(self, output_cols_prefix: str, output_cols: Optional[List[str]] = None) -> List[str]: """ Returns the list of output columns for predict_proba(), decision_function(), etc.. functions. diff --git a/docs/source/_templates/autosummary/class.rst b/docs/source/_templates/autosummary/class.rst index 714c1ce2..9332d590 100644 --- a/docs/source/_templates/autosummary/class.rst +++ b/docs/source/_templates/autosummary/class.rst @@ -1,16 +1,15 @@ {% extends "!autosummary/class.rst" %} +.. autoclass:: {{ objname }} {% set methods =(methods| reject("equalto", "__init__") |list) %} {% block methods %} {% if methods %} .. rubric:: Methods - - .. autosummary:: - {% for item in methods %} - ~{{ name }}.{{ item }} - {%- endfor %} +{% for item in methods %} + .. automethod:: {{ name }}.{{ item }} +{%- endfor %} {% endif %} {% endblock %} @@ -18,10 +17,8 @@ {% if attributes %} .. rubric:: Attributes - - .. autosummary:: - {% for item in attributes %} - ~{{ name }}.{{ item }} - {%- endfor %} +{% for item in attributes %} + .. autoattribute:: {{ name }}.{{ item }} +{%- endfor %} {% endif %} {% endblock %} diff --git a/docs/source/index.rst b/docs/source/index.rst index 18365333..5477edb9 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -24,3 +24,5 @@ Table of Contents modeling fileset + model + registry diff --git a/docs/source/model.rst b/docs/source/model.rst new file mode 100644 index 00000000..8cecd338 --- /dev/null +++ b/docs/source/model.rst @@ -0,0 +1,64 @@ +=========================== +snowflake.ml.model +=========================== + +.. automodule:: snowflake.ml.model + :noindex: + +snowflake.ml.model +--------------------------------- + +.. currentmodule:: snowflake.ml.model + +.. rubric:: Classes + +.. autosummary:: + :toctree: api/model + + Model + ModelVersion + HuggingFacePipelineModel + LLM + LLMOptions + +snowflake.ml.model.custom_model +--------------------------------- + +.. currentmodule:: snowflake.ml.model.custom_model + +.. rubric:: Classes + +.. autosummary:: + :toctree: api/model + + MethodRef + ModelRef + ModelContext + CustomModel + +snowflake.ml.model.model_signature +--------------------------------- + +.. currentmodule:: snowflake.ml.model.model_signature + +.. rubric:: Classes + +.. autosummary:: + :toctree: api/model + + DataType + BaseFeatureSpec + FeatureSpec + ModelSignature + +.. rubric:: Methods + +.. autosummary:: + :toctree: api/model + + infer_signature + + +.. .. rubric:: Attributes + +.. None diff --git a/docs/source/registry.rst b/docs/source/registry.rst index ede781ca..832328ce 100644 --- a/docs/source/registry.rst +++ b/docs/source/registry.rst @@ -1,5 +1,3 @@ -:orphan: - =========================== snowflake.ml.registry =========================== @@ -14,8 +12,7 @@ snowflake.ml.registry .. autosummary:: :toctree: api/registry - model_registry.ModelRegistry - model_registry.ModelReference + Registry .. .. rubric:: Methods diff --git a/docs/sphinxconf/conf.py b/docs/sphinxconf/conf.py index b1423dbb..af73cda3 100644 --- a/docs/sphinxconf/conf.py +++ b/docs/sphinxconf/conf.py @@ -29,7 +29,7 @@ extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", - # "sphinx.ext.napoleon", + "sphinx.ext.napoleon", # "sphinx.ext.coverage", # "sphinx.ext.linkcode" ] @@ -56,6 +56,8 @@ autosummary_generate = True autosummary_generate_overwrite = True +autoclass_content = "both" + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -85,6 +87,18 @@ suppress_warnings = ["ref"] +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_preprocess_types = False +napoleon_attr_annotations = True + def setup(app: Any) -> None: app.connect( @@ -166,6 +180,8 @@ def __init__(self, csv_filename: str) -> None: # sphinx expects a function for this, so make instance callable def __call__(self, app: Any, what: str, name: str, obj: ModuleType, skip: bool, options: Dict[str, Any]) -> bool: + if name == "__init__": + return False if name.startswith("_"): return True if what == "method": diff --git a/snowflake/cortex/BUILD.bazel b/snowflake/cortex/BUILD.bazel index da7c8ef7..310c4bb6 100644 --- a/snowflake/cortex/BUILD.bazel +++ b/snowflake/cortex/BUILD.bazel @@ -3,13 +3,15 @@ load("//bazel:py_rules.bzl", "py_library", "py_package", "py_test") package_group( name = "cortex", packages = [ - "//docs/...", "//snowflake/cortex/...", "//snowflake/ml/...", ], ) -package(default_visibility = [":cortex"]) +package(default_visibility = [ + ":cortex", + "//bazel:snowml_public_common", +]) py_library( name = "util", diff --git a/snowflake/ml/_internal/container_services/image_registry/BUILD.bazel b/snowflake/ml/_internal/container_services/image_registry/BUILD.bazel new file mode 100644 index 00000000..2e875839 --- /dev/null +++ b/snowflake/ml/_internal/container_services/image_registry/BUILD.bazel @@ -0,0 +1,56 @@ +load("//bazel:py_rules.bzl", "py_library", "py_test") + +package(default_visibility = ["//visibility:public"]) + +py_library( + name = "credential", + srcs = ["credential.py"], + deps = ["//snowflake/ml/_internal/utils:query_result_checker"], +) + +py_library( + name = "http_client", + srcs = ["http_client.py"], + deps = [ + "//snowflake/ml/_internal/exceptions", + "//snowflake/ml/_internal/utils:retryable_http", + "//snowflake/ml/_internal/utils:session_token_manager", + ], +) + +py_test( + name = "http_client_test", + srcs = ["http_client_test.py"], + deps = [ + ":http_client", + "//snowflake/ml/test_utils:mock_session", + ], +) + +py_library( + name = "registry_client", + srcs = ["registry_client.py"], + deps = [ + ":http_client", + ":imagelib", + "//snowflake/ml/_internal/exceptions", + ], +) + +py_library( + name = "imagelib", + srcs = ["imagelib.py"], + deps = [ + ":http_client", + ], +) + +py_test( + name = "registry_client_test", + srcs = ["registry_client_test.py"], + deps = [ + ":registry_client", + "//snowflake/ml/test_utils:exception_utils", + "//snowflake/ml/test_utils:mock_session", + ], +) diff --git a/snowflake/ml/_internal/utils/spcs_image_registry.py b/snowflake/ml/_internal/container_services/image_registry/credential.py similarity index 100% rename from snowflake/ml/_internal/utils/spcs_image_registry.py rename to snowflake/ml/_internal/container_services/image_registry/credential.py diff --git a/snowflake/ml/_internal/utils/image_registry_http_client.py b/snowflake/ml/_internal/container_services/image_registry/http_client.py similarity index 100% rename from snowflake/ml/_internal/utils/image_registry_http_client.py rename to snowflake/ml/_internal/container_services/image_registry/http_client.py diff --git a/snowflake/ml/_internal/utils/image_registry_http_client_test.py b/snowflake/ml/_internal/container_services/image_registry/http_client_test.py similarity index 98% rename from snowflake/ml/_internal/utils/image_registry_http_client_test.py rename to snowflake/ml/_internal/container_services/image_registry/http_client_test.py index d6bf0f69..b5a55c8e 100644 --- a/snowflake/ml/_internal/utils/image_registry_http_client_test.py +++ b/snowflake/ml/_internal/container_services/image_registry/http_client_test.py @@ -5,8 +5,10 @@ from absl.testing import absltest, parameterized from absl.testing.absltest import mock +from snowflake.ml._internal.container_services.image_registry import ( + http_client as image_registry_http_client, +) from snowflake.ml._internal.exceptions import exceptions as snowml_exceptions -from snowflake.ml._internal.utils import image_registry_http_client from snowflake.ml.test_utils import mock_session from snowflake.snowpark import session diff --git a/snowflake/ml/model/_deploy_client/utils/imagelib.py b/snowflake/ml/_internal/container_services/image_registry/imagelib.py similarity index 99% rename from snowflake/ml/model/_deploy_client/utils/imagelib.py rename to snowflake/ml/_internal/container_services/image_registry/imagelib.py index 8d0d9115..be366779 100644 --- a/snowflake/ml/model/_deploy_client/utils/imagelib.py +++ b/snowflake/ml/_internal/container_services/image_registry/imagelib.py @@ -23,7 +23,9 @@ import requests -from snowflake.ml._internal.utils import image_registry_http_client +from snowflake.ml._internal.container_services.image_registry import ( + http_client as image_registry_http_client, +) # Common HTTP headers _CONTENT_LENGTH_HEADER = "content-length" diff --git a/snowflake/ml/model/_deploy_client/utils/image_registry_client.py b/snowflake/ml/_internal/container_services/image_registry/registry_client.py similarity index 98% rename from snowflake/ml/model/_deploy_client/utils/image_registry_client.py rename to snowflake/ml/_internal/container_services/image_registry/registry_client.py index df8775b6..d07e6cec 100644 --- a/snowflake/ml/model/_deploy_client/utils/image_registry_client.py +++ b/snowflake/ml/_internal/container_services/image_registry/registry_client.py @@ -3,12 +3,14 @@ from typing import Dict, Optional, cast from urllib.parse import urlunparse +from snowflake.ml._internal.container_services.image_registry import ( + http_client as image_registry_http_client, + imagelib, +) from snowflake.ml._internal.exceptions import ( error_codes, exceptions as snowml_exceptions, ) -from snowflake.ml._internal.utils import image_registry_http_client -from snowflake.ml.model._deploy_client.utils import imagelib from snowflake.snowpark import Session from snowflake.snowpark._internal import utils as snowpark_utils diff --git a/snowflake/ml/model/_deploy_client/utils/image_registry_client_test.py b/snowflake/ml/_internal/container_services/image_registry/registry_client_test.py similarity index 97% rename from snowflake/ml/model/_deploy_client/utils/image_registry_client_test.py rename to snowflake/ml/_internal/container_services/image_registry/registry_client_test.py index 4a1f3430..7117e1eb 100644 --- a/snowflake/ml/model/_deploy_client/utils/image_registry_client_test.py +++ b/snowflake/ml/_internal/container_services/image_registry/registry_client_test.py @@ -3,7 +3,9 @@ from absl.testing import absltest from absl.testing.absltest import mock -from snowflake.ml.model._deploy_client.utils import image_registry_client +from snowflake.ml._internal.container_services.image_registry import ( + registry_client as image_registry_client, +) from snowflake.ml.test_utils import mock_session from snowflake.snowpark import session diff --git a/snowflake/ml/_internal/env_utils.py b/snowflake/ml/_internal/env_utils.py index 2ba8a16d..1fd225c3 100644 --- a/snowflake/ml/_internal/env_utils.py +++ b/snowflake/ml/_internal/env_utils.py @@ -33,7 +33,6 @@ class CONDA_OS(Enum): _SNOWFLAKE_CONDA_CHANNEL_URL = "https://repo.anaconda.com/pkgs/snowflake" _NODEFAULTS = "nodefaults" -_INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION: Optional[bool] = None _SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE: Dict[str, List[version.Version]] = {} _SNOWFLAKE_CONDA_PACKAGE_CACHE: Dict[str, List[version.Version]] = {} @@ -267,18 +266,6 @@ def relax_requirement_version(req: requirements.Requirement) -> requirements.Req return new_req -def _check_runtime_version_column_existence(session: session.Session) -> bool: - sql = textwrap.dedent( - """ - SHOW COLUMNS - LIKE 'runtime_version' - IN TABLE information_schema.packages; - """ - ) - result = session.sql(sql).count() - return result == 1 - - def get_matched_package_versions_in_snowflake_conda_channel( req: requirements.Requirement, python_version: str = snowml_env.PYTHON_VERSION, @@ -325,9 +312,9 @@ def get_matched_package_versions_in_snowflake_conda_channel( return matched_versions -def validate_requirements_in_information_schema( +def get_matched_package_versions_in_information_schema( session: session.Session, reqs: List[requirements.Requirement], python_version: str -) -> Optional[List[str]]: +) -> Dict[str, List[version.Version]]: """Look up the information_schema table to check if a package with the specified specifier exists in the Snowflake Conda channel. Note that this is not the source of truth due to the potential delay caused by a package that might exist in the information_schema table but has not yet become available in the Snowflake Conda channel. @@ -338,42 +325,35 @@ def validate_requirements_in_information_schema( python_version: A string of python version where model is run. Returns: - A list of pinned latest version that available in Snowflake anaconda channel and meet the version specifier. + A Dict, whose key is the package name, and value is a list of versions match the requirements. """ - global _INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION - - if _INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION is None: - _INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION = _check_runtime_version_column_existence(session) - ret_list = [] - reqs_to_request = [] + ret_dict: Dict[str, List[version.Version]] = {} + reqs_to_request: List[requirements.Requirement] = [] for req in reqs: - if req.name not in _SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE: + if req.name in _SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE: + available_versions = list( + sorted(req.specifier.filter(set(_SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE.get(req.name, [])))) + ) + ret_dict[req.name] = available_versions + else: reqs_to_request.append(req) + if reqs_to_request: pkg_names_str = " OR ".join( f"package_name = '{req_name}'" for req_name in sorted(req.name for req in reqs_to_request) ) - if _INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION: - parsed_python_version = version.Version(python_version) - sql = textwrap.dedent( - f""" - SELECT PACKAGE_NAME, VERSION - FROM information_schema.packages - WHERE ({pkg_names_str}) - AND language = 'python' - AND (runtime_version = '{parsed_python_version.major}.{parsed_python_version.minor}' - OR runtime_version is null); - """ - ) - else: - sql = textwrap.dedent( - f""" - SELECT PACKAGE_NAME, VERSION - FROM information_schema.packages - WHERE ({pkg_names_str}) - AND language = 'python'; - """ - ) + + parsed_python_version = version.Version(python_version) + sql = textwrap.dedent( + f""" + SELECT PACKAGE_NAME, VERSION + FROM information_schema.packages + WHERE ({pkg_names_str}) + AND language = 'python' + AND (runtime_version = '{parsed_python_version.major}.{parsed_python_version.minor}' + OR runtime_version is null); + """ + ) try: result = ( @@ -392,14 +372,13 @@ def validate_requirements_in_information_schema( cached_req_ver_list.append(req_ver) _SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE[req_name] = cached_req_ver_list except snowflake.connector.DataError: - return None - for req in reqs: - available_versions = list(req.specifier.filter(set(_SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE.get(req.name, [])))) - if not available_versions: - return None - else: - ret_list.append(str(req)) - return sorted(ret_list) + return ret_dict + for req in reqs_to_request: + available_versions = list( + sorted(req.specifier.filter(set(_SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE.get(req.name, [])))) + ) + ret_dict[req.name] = available_versions + return ret_dict def save_conda_env_file( diff --git a/snowflake/ml/_internal/env_utils_test.py b/snowflake/ml/_internal/env_utils_test.py index 176ddb0d..07068605 100644 --- a/snowflake/ml/_internal/env_utils_test.py +++ b/snowflake/ml/_internal/env_utils_test.py @@ -9,7 +9,7 @@ import yaml from absl.testing import absltest -from packaging import requirements, specifiers +from packaging import requirements, specifiers, version from snowflake.ml._internal import env as snowml_env, env_utils from snowflake.ml.test_utils import mock_data_frame, mock_session @@ -294,25 +294,17 @@ def test_relax_requirement_version(self) -> None: self.assertEqual(env_utils.relax_requirement_version(r), requirements.Requirement("python-package")) self.assertIsNot(env_utils.relax_requirement_version(r), r) - def test_validate_requirements_in_information_schema(self) -> None: + def test_get_matched_package_versions_in_information_schema(self) -> None: m_session = mock_session.MockSession(conn=None, test_case=self) - m_session.add_mock_sql( - query=textwrap.dedent( - """ - SHOW COLUMNS - LIKE 'runtime_version' - IN TABLE information_schema.packages; - """ - ), - result=mock_data_frame.MockDataFrame(count_result=0), - ) query = textwrap.dedent( - """ + f""" SELECT PACKAGE_NAME, VERSION FROM information_schema.packages WHERE (package_name = 'pytorch' OR package_name = 'xgboost') - AND language = 'python'; + AND language = 'python' + AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' + OR runtime_version is null); """ ) sql_result = [ @@ -325,34 +317,42 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], python_version=snowml_env.PYTHON_VERSION, ), - sorted(["xgboost", "pytorch"]), + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + "pytorch": list(map(version.parse, ["1.12.1"])), + }, ) # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], python_version=snowml_env.PYTHON_VERSION, ), - sorted(["xgboost", "pytorch"]), + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + "pytorch": list(map(version.parse, ["1.12.1"])), + }, ) # clear cache env_utils._SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE = {} query = textwrap.dedent( - """ + f""" SELECT PACKAGE_NAME, VERSION FROM information_schema.packages WHERE (package_name = 'xgboost') - AND language = 'python'; + AND language = 'python' + AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' + OR runtime_version is null); """ ) sql_result = [ @@ -365,31 +365,37 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost"], + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + }, ) # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost"], + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + }, ) query = textwrap.dedent( - """ + f""" SELECT PACKAGE_NAME, VERSION FROM information_schema.packages WHERE (package_name = 'pytorch') - AND language = 'python'; + AND language = 'python' + AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' + OR runtime_version is null); """ ) sql_result = [ @@ -400,34 +406,42 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], python_version=snowml_env.PYTHON_VERSION, ), - sorted(["xgboost", "pytorch"]), + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + "pytorch": list(map(version.parse, ["1.12.1"])), + }, ) # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], python_version=snowml_env.PYTHON_VERSION, ), - sorted(["xgboost", "pytorch"]), + { + "xgboost": list(map(version.parse, ["1.3.3", "1.5.1", "1.7.3"])), + "pytorch": list(map(version.parse, ["1.12.1"])), + }, ) # clear cache env_utils._SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE = {} query = textwrap.dedent( - """ + f""" SELECT PACKAGE_NAME, VERSION FROM information_schema.packages WHERE (package_name = 'xgboost') - AND language = 'python'; + AND language = 'python' + AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' + OR runtime_version is null); """ ) sql_result = [ @@ -439,40 +453,49 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.7.3")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost==1.7.3"], + { + "xgboost": list(map(version.parse, ["1.7.3"])), + }, ) # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.7.3")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost==1.7.3"], + { + "xgboost": list(map(version.parse, ["1.7.3"])), + }, ) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost>=1.7,<1.8")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost<1.8,>=1.7"], + { + "xgboost": list(map(version.parse, ["1.7.0", "1.7.1", "1.7.3"])), + }, ) - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.7.1, ==1.7.3")], python_version=snowml_env.PYTHON_VERSION, - ) + ), + { + "xgboost": list(map(version.parse, [])), + }, ) # clear cache @@ -481,23 +504,27 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.7.*")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost==1.7.*"], + { + "xgboost": list(map(version.parse, ["1.7.0", "1.7.1", "1.7.3"])), + }, ) # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.7.*")], python_version=snowml_env.PYTHON_VERSION, ), - ["xgboost==1.7.*"], + { + "xgboost": list(map(version.parse, ["1.7.0", "1.7.1", "1.7.3"])), + }, ) # clear cache @@ -506,98 +533,55 @@ def test_validate_requirements_in_information_schema(self) -> None: m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.3.*")], python_version=snowml_env.PYTHON_VERSION, - ) + ), + { + "xgboost": list(map(version.parse, [])), + }, ) # Test cache - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, reqs=[requirements.Requirement("xgboost==1.3.*")], python_version=snowml_env.PYTHON_VERSION, - ) + ), + { + "xgboost": list(map(version.parse, [])), + }, ) # clear cache env_utils._SNOWFLAKE_INFO_SCHEMA_PACKAGE_CACHE = {} - query = textwrap.dedent( - """ - SELECT PACKAGE_NAME, VERSION - FROM information_schema.packages - WHERE (package_name = 'python-package') - AND language = 'python'; - """ - ) - sql_result = [row.Row()] - - m_session = mock_session.MockSession(conn=None, test_case=self) - m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) - c_session = cast(session.Session, m_session) - - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( - session=c_session, - reqs=[requirements.Requirement("python-package")], - python_version=snowml_env.PYTHON_VERSION, - ) - ) - - env_utils._INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION = None - m_session = mock_session.MockSession(conn=None, test_case=self) - m_session.add_mock_sql( - query=textwrap.dedent( - """ - SHOW COLUMNS - LIKE 'runtime_version' - IN TABLE information_schema.packages; - """ - ), - result=mock_data_frame.MockDataFrame(count_result=1), - ) - query = textwrap.dedent( f""" SELECT PACKAGE_NAME, VERSION FROM information_schema.packages - WHERE (package_name = 'pytorch' OR package_name = 'xgboost') + WHERE (package_name = 'python-package') AND language = 'python' AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' OR runtime_version is null); """ ) - sql_result = [ - row.Row(PACKAGE_NAME="xgboost", VERSION="1.3.3"), - row.Row(PACKAGE_NAME="xgboost", VERSION="1.5.1"), - row.Row(PACKAGE_NAME="xgboost", VERSION="1.7.3"), - row.Row(PACKAGE_NAME="pytorch", VERSION="1.12.1"), - ] + sql_result = [row.Row()] + m_session = mock_session.MockSession(conn=None, test_case=self) m_session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) c_session = cast(session.Session, m_session) - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( - session=c_session, - reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], - python_version=snowml_env.PYTHON_VERSION, - ), - sorted(["xgboost", "pytorch"]), - ) - - # Test cache - self.assertListEqual( - env_utils.validate_requirements_in_information_schema( + self.assertDictEqual( + env_utils.get_matched_package_versions_in_information_schema( session=c_session, - reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], + reqs=[requirements.Requirement("python-package")], python_version=snowml_env.PYTHON_VERSION, ), - sorted(["xgboost", "pytorch"]), + {}, ) def test_parse_python_version_string(self) -> None: diff --git a/snowflake/ml/_internal/file_utils.py b/snowflake/ml/_internal/file_utils.py index fb7387e9..7c05065b 100644 --- a/snowflake/ml/_internal/file_utils.py +++ b/snowflake/ml/_internal/file_utils.py @@ -362,3 +362,20 @@ def download_directory_from_stage( wait_exponential_multiplier=100, wait_exponential_max=10000, )(file_operation.get)(str(stage_file_path), str(local_file_dir), statement_params=statement_params) + + +def open_file(path: str, *args: Any, **kwargs: Any) -> Any: + """This function is a wrapper on top of the Python built-in "open" function, with a few added default values + to ensure successful execution across different platforms. + + Args: + path: file path + *args: arguments. + **kwargs: key arguments. + + Returns: + Open file and return a stream. + """ + kwargs.setdefault("newline", "\n") + kwargs.setdefault("encoding", "utf-8") + return open(path, *args, **kwargs) diff --git a/snowflake/ml/_internal/utils/BUILD.bazel b/snowflake/ml/_internal/utils/BUILD.bazel index 6aca28de..423ee9ff 100644 --- a/snowflake/ml/_internal/utils/BUILD.bazel +++ b/snowflake/ml/_internal/utils/BUILD.bazel @@ -151,12 +151,6 @@ py_library( srcs = ["result.py"], ) -py_library( - name = "spcs_image_registry", - srcs = ["spcs_image_registry.py"], - deps = [":query_result_checker"], -) - py_library( name = "table_manager", srcs = [ @@ -218,38 +212,37 @@ py_library( ) py_library( - name = "image_registry_http_client", - srcs = ["image_registry_http_client.py"], + name = "spcs_attribution_utils", + srcs = ["spcs_attribution_utils.py"], deps = [ - ":session_token_manager", - "//snowflake/ml/_internal/exceptions", - "//snowflake/ml/_internal/utils:retryable_http", + ":query_result_checker", + "//snowflake/ml/_internal:telemetry", ], ) py_test( - name = "image_registry_http_client_test", - srcs = ["image_registry_http_client_test.py"], + name = "spcs_attribution_utils_test", + srcs = ["spcs_attribution_utils_test.py"], deps = [ - ":image_registry_http_client", + ":spcs_attribution_utils", + "//snowflake/ml/test_utils:mock_data_frame", "//snowflake/ml/test_utils:mock_session", ], ) py_library( - name = "spcs_attribution_utils", - srcs = ["spcs_attribution_utils.py"], + name = "snowflake_env", + srcs = ["snowflake_env.py"], deps = [ ":query_result_checker", - "//snowflake/ml/_internal:telemetry", ], ) py_test( - name = "spcs_attribution_utils_test", - srcs = ["spcs_attribution_utils_test.py"], + name = "snowflake_env_test", + srcs = ["snowflake_env_test.py"], deps = [ - ":spcs_attribution_utils", + ":snowflake_env", "//snowflake/ml/test_utils:mock_data_frame", "//snowflake/ml/test_utils:mock_session", ], diff --git a/snowflake/ml/_internal/utils/query_result_checker.py b/snowflake/ml/_internal/utils/query_result_checker.py index f5244ecf..a51068e2 100644 --- a/snowflake/ml/_internal/utils/query_result_checker.py +++ b/snowflake/ml/_internal/utils/query_result_checker.py @@ -60,9 +60,13 @@ def result_dimension_matcher( return True -def column_name_matcher(expected_col_name: str, result: list[snowpark.Row], sql: str | None = None) -> bool: +def column_name_matcher( + expected_col_name: str, allow_empty: bool, result: list[snowpark.Row], sql: str | None = None +) -> bool: """Returns true if `expected_col_name` is found. Raise exception otherwise.""" if not result: + if allow_empty: + return True raise connector.DataError(f"Query Result is empty.{_query_log(sql)}") if expected_col_name not in result[0]: raise connector.DataError( @@ -159,16 +163,17 @@ def has_dimensions(self, expected_rows: int | None = None, expected_cols: int | self._success_matchers.append(partial(result_dimension_matcher, expected_rows, expected_cols)) return self - def has_column(self, expected_col_name: str) -> ResultValidator: + def has_column(self, expected_col_name: str, allow_empty: bool = False) -> ResultValidator: """Validate that the a column with the name `expected_column_name` exists in the result. Args: expected_col_name: Name of the column that is expected to be present in the result (case sensitive). + allow_empty: If the check will fail if the result is empty. Returns: ResultValidator object (self) """ - self._success_matchers.append(partial(column_name_matcher, expected_col_name)) + self._success_matchers.append(partial(column_name_matcher, expected_col_name, allow_empty)) return self def has_named_value_match(self, row_idx: int, col_name: str, expected_value: Any) -> ResultValidator: @@ -224,8 +229,6 @@ def validate(self) -> list[snowpark.Row]: Returns: Query result. """ - if len(self._success_matchers) == 0: - self._success_matchers = _DEFAULT_MATCHERS result = self._get_result() for matcher in self._success_matchers: assert matcher(result, self._query) diff --git a/snowflake/ml/_internal/utils/query_result_checker_test.py b/snowflake/ml/_internal/utils/query_result_checker_test.py index caca3989..d3319038 100644 --- a/snowflake/ml/_internal/utils/query_result_checker_test.py +++ b/snowflake/ml/_internal/utils/query_result_checker_test.py @@ -25,9 +25,10 @@ def test_column_name_matcher(self) -> None: """Test column_name_matcher().""" row1 = Row(name1=1, name2=2) row2 = Row(name1=3, name2=4) - self.assertTrue(query_result_checker.column_name_matcher("name1", [row1, row2])) - self.assertRaises(DataError, query_result_checker.column_name_matcher, "name1", []) - self.assertRaises(DataError, query_result_checker.column_name_matcher, "name3", [row1, row2]) + self.assertTrue(query_result_checker.column_name_matcher("name1", False, [row1, row2])) + self.assertTrue(query_result_checker.column_name_matcher("name1", True, [])) + self.assertRaises(DataError, query_result_checker.column_name_matcher, "name1", False, []) + self.assertRaises(DataError, query_result_checker.column_name_matcher, "name3", False, [row1, row2]) def test_result_validator_dimensions_partial_ok(self) -> None: """Use the base ResultValidator to verify the dimensions of an operation result.""" @@ -112,6 +113,19 @@ def test_sql_result_validator_column_ok(self) -> None: ) self.assertEqual(actual_result, sql_result) + def test_sql_result_validator_column_empty(self) -> None: + """Use SqlResultValidator to check that a specific column exists in the result.""" + session = mock_session.MockSession(conn=None, test_case=self) + query = "CREATE TABLE TEMP" + sql_result: List[Row] = [] + session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) + actual_result = ( + query_result_checker.SqlResultValidator(session=cast(snowpark.Session, session), query=query) + .has_column(expected_col_name="status", allow_empty=True) + .validate() + ) + self.assertEqual(actual_result, sql_result) + def test_sql_result_validator_column_fail(self) -> None: """Use SqlResultValidator to check that a specific column exists in the result but we the column is missing.""" session = mock_session.MockSession(conn=None, test_case=self) diff --git a/snowflake/ml/_internal/utils/snowflake_env.py b/snowflake/ml/_internal/utils/snowflake_env.py new file mode 100644 index 00000000..1dc41abe --- /dev/null +++ b/snowflake/ml/_internal/utils/snowflake_env.py @@ -0,0 +1,95 @@ +import enum +from typing import Any, Dict, Optional, TypedDict, cast + +from packaging import version +from typing_extensions import Required + +from snowflake.ml._internal.utils import query_result_checker +from snowflake.snowpark import session + + +def get_current_snowflake_version( + sess: session.Session, *, statement_params: Optional[Dict[str, Any]] = None +) -> version.Version: + """Get Snowflake Version as a version.Version object follow PEP way of versioning, that is to say: + "7.44.2 b202312132139364eb71238" to + + Args: + sess: Snowpark Session. + statement_params: Statement params. Defaults to None. + + Returns: + The version of Snowflake Version. + """ + res = ( + query_result_checker.SqlResultValidator( + sess, "SELECT CURRENT_VERSION() AS CURRENT_VERSION", statement_params=statement_params + ) + .has_dimensions(expected_rows=1, expected_cols=1) + .validate()[0] + ) + + version_str = res.CURRENT_VERSION + assert isinstance(version_str, str) + + version_str = "+".join(version_str.split()) + return version.parse(version_str) + + +class SnowflakeCloudType(enum.Enum): + AWS = "aws" + AZURE = "azure" + GCP = "gcp" + + @classmethod + def from_value(cls, value: str) -> "SnowflakeCloudType": + assert value + for k in cls: + if k.value == value.lower(): + return k + else: + raise ValueError(f"'{cls.__name__}' enum not found for '{value}'") + + +class SnowflakeRegion(TypedDict): + region_group: Required[str] + snowflake_region: Required[str] + cloud: Required[SnowflakeCloudType] + region: Required[str] + display_name: Required[str] + + +def get_regions( + sess: session.Session, *, statement_params: Optional[Dict[str, Any]] = None +) -> Dict[str, SnowflakeRegion]: + res = ( + query_result_checker.SqlResultValidator(sess, "SHOW REGIONS", statement_params=statement_params) + .has_column("region_group") + .has_column("snowflake_region") + .has_column("cloud") + .has_column("region") + .has_column("display_name") + .validate() + ) + return { + f"{r.region_group}.{r.snowflake_region}": SnowflakeRegion( + region_group=r.region_group, + snowflake_region=r.snowflake_region, + cloud=SnowflakeCloudType.from_value(r.cloud), + region=r.region, + display_name=r.display_name, + ) + for r in res + } + + +def get_current_region_id(sess: session.Session, *, statement_params: Optional[Dict[str, Any]] = None) -> str: + res = ( + query_result_checker.SqlResultValidator( + sess, "SELECT CURRENT_REGION() AS CURRENT_REGION", statement_params=statement_params + ) + .has_dimensions(expected_rows=1, expected_cols=1) + .validate()[0] + ) + + return cast(str, res.CURRENT_REGION) diff --git a/snowflake/ml/_internal/utils/snowflake_env_test.py b/snowflake/ml/_internal/utils/snowflake_env_test.py new file mode 100644 index 00000000..850cc21f --- /dev/null +++ b/snowflake/ml/_internal/utils/snowflake_env_test.py @@ -0,0 +1,93 @@ +from typing import cast + +from absl.testing import absltest +from packaging import version + +from snowflake.ml._internal.utils import snowflake_env +from snowflake.ml.test_utils import mock_data_frame, mock_session +from snowflake.snowpark import Row, Session + + +class SnowflakeEnvTest(absltest.TestCase): + def test_current_snowflake_version_1(self) -> None: + session = mock_session.MockSession(conn=None, test_case=self) + query = "SELECT CURRENT_VERSION() AS CURRENT_VERSION" + sql_result = [Row(CURRENT_VERSION="8.0.0")] + session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) + actual_result = snowflake_env.get_current_snowflake_version(cast(Session, session)) + self.assertEqual(actual_result, version.parse("8.0.0")) + + def test_current_snowflake_version_2(self) -> None: + session = mock_session.MockSession(conn=None, test_case=self) + query = "SELECT CURRENT_VERSION() AS CURRENT_VERSION" + sql_result = [Row(CURRENT_VERSION="8.0.0 1234567890ab")] + session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) + actual_result = snowflake_env.get_current_snowflake_version(cast(Session, session)) + self.assertEqual(actual_result, version.parse("8.0.0+1234567890ab")) + + def test_get_regions(self) -> None: + session = mock_session.MockSession(conn=None, test_case=self) + query = "SHOW REGIONS" + sql_result = [ + Row( + region_group="PUBLIC", + snowflake_region="AWS_US_WEST_2", + cloud="aws", + region="us-west-2", + display_name="US West (Oregon)", + ), + Row( + region_group="PUBLIC", + snowflake_region="AZURE_EASTUS2", + cloud="azure", + region="eastus2", + display_name="East US 2 (Virginia)", + ), + Row( + region_group="PUBLIC", + snowflake_region="GCP_EUROPE_WEST2", + cloud="gcp", + region="europe-west2", + display_name="Europe West 2 (London)", + ), + ] + session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) + actual_result = snowflake_env.get_regions(cast(Session, session)) + self.assertDictEqual( + { + "PUBLIC.AWS_US_WEST_2": snowflake_env.SnowflakeRegion( + region_group="PUBLIC", + snowflake_region="AWS_US_WEST_2", + cloud=snowflake_env.SnowflakeCloudType.AWS, + region="us-west-2", + display_name="US West (Oregon)", + ), + "PUBLIC.AZURE_EASTUS2": snowflake_env.SnowflakeRegion( + region_group="PUBLIC", + snowflake_region="AZURE_EASTUS2", + cloud=snowflake_env.SnowflakeCloudType.AZURE, + region="eastus2", + display_name="East US 2 (Virginia)", + ), + "PUBLIC.GCP_EUROPE_WEST2": snowflake_env.SnowflakeRegion( + region_group="PUBLIC", + snowflake_region="GCP_EUROPE_WEST2", + cloud=snowflake_env.SnowflakeCloudType.GCP, + region="europe-west2", + display_name="Europe West 2 (London)", + ), + }, + actual_result, + ) + + def test_get_current_region_id(self) -> None: + session = mock_session.MockSession(conn=None, test_case=self) + query = "SELECT CURRENT_REGION() AS CURRENT_REGION" + sql_result = [Row(CURRENT_REGION="PUBLIC.AWS_US_WEST_2")] + session.add_mock_sql(query=query, result=mock_data_frame.MockDataFrame(sql_result)) + actual_result = snowflake_env.get_current_region_id(cast(Session, session)) + self.assertEqual(actual_result, "PUBLIC.AWS_US_WEST_2") + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/feature_store/BUILD.bazel b/snowflake/ml/feature_store/BUILD.bazel index 88cf42bd..bbd441d6 100644 --- a/snowflake/ml/feature_store/BUILD.bazel +++ b/snowflake/ml/feature_store/BUILD.bazel @@ -7,7 +7,10 @@ package_group( ], ) -package(default_visibility = [":feature_store"]) +package(default_visibility = [ + ":feature_store", + "//bazel:snowml_public_common", +]) py_library( name = "init", diff --git a/snowflake/ml/feature_store/_internal/BUILD.bazel b/snowflake/ml/feature_store/_internal/BUILD.bazel index 41437b73..cfd68961 100644 --- a/snowflake/ml/feature_store/_internal/BUILD.bazel +++ b/snowflake/ml/feature_store/_internal/BUILD.bazel @@ -1,6 +1,9 @@ load("//bazel:py_rules.bzl", "py_library") -package(default_visibility = ["//snowflake/ml/feature_store"]) +package(default_visibility = [ + "//bazel:snowml_public_common", + "//snowflake/ml/feature_store", +]) py_library( name = "synthetic_data_generator", diff --git a/snowflake/ml/feature_store/_internal/scripts/BUILD.bazel b/snowflake/ml/feature_store/_internal/scripts/BUILD.bazel index 26a39beb..2ccfc0d7 100644 --- a/snowflake/ml/feature_store/_internal/scripts/BUILD.bazel +++ b/snowflake/ml/feature_store/_internal/scripts/BUILD.bazel @@ -1,5 +1,9 @@ load("//bazel:py_rules.bzl", "py_binary") +package(default_visibility = [ + "//bazel:snowml_public_common", +]) + py_binary( name = "run_synthetic_data_generator", srcs = [ diff --git a/snowflake/ml/feature_store/_internal/scripts/upload_test_datasets.py b/snowflake/ml/feature_store/_internal/scripts/upload_test_datasets.py index 4f815d96..cf4d4ddb 100644 --- a/snowflake/ml/feature_store/_internal/scripts/upload_test_datasets.py +++ b/snowflake/ml/feature_store/_internal/scripts/upload_test_datasets.py @@ -1,5 +1,11 @@ -# A helper script cleans open taxi data (https://www.nyc.gov/site/tlc/about/tlc-trip-record-data.page) -# and store into snowflake database. +""" +A helper script cleans open taxi data (https://www.nyc.gov/site/tlc/about/tlc-trip-record-data.page) +and store into snowflake database. + +Download yellow trip data(2016 Jan): https://www.nyc.gov/site/tlc/about/tlc-trip-record-data.page. +Download wine data: +https://www.google.com/url?q=https://github.com/snowflakedb/snowml/blob/main/snowflake/ml/feature_store/notebooks/customer_demo/winequality-red.csv&sa=D&source=docs&ust=1702084016573738&usg=AOvVaw3r_muH0_LKBDr45C1Gj3cb +""" from absl.logging import logging @@ -56,7 +62,7 @@ def create_winedata(sess: Session, overwrite_mode: str) -> None: full_table_name = f"{FS_INTEG_TEST_DB}.{FS_INTEG_TEST_DATASET_SCHEMA}.{FS_INTEG_TEST_WINE_QUALITY_DATA}" df = ( - sess.read.options({"field_delimiter": ",", "skip_header": 1}) + sess.read.options({"field_delimiter": ";", "skip_header": 1}) .schema(input_schema) .csv(f"{sess.get_session_stage()}/{WINEDATA_NAME}") ) diff --git a/snowflake/ml/feature_store/entity.py b/snowflake/ml/feature_store/entity.py index 44b0ece0..d88546a6 100644 --- a/snowflake/ml/feature_store/entity.py +++ b/snowflake/ml/feature_store/entity.py @@ -10,6 +10,9 @@ _ENTITY_JOIN_KEY_DELIMITER = "," # join key length limit is the length limit of TAG value _ENTITY_JOIN_KEY_LENGTH_LIMIT = 256 +# The maximum number of join keys: +# https://docs.snowflake.com/en/user-guide/object-tagging#specify-tag-values +_ENTITY_MAX_NUM_JOIN_KEYS = 300 class Entity: @@ -39,14 +42,18 @@ def _validate(self, name: str, join_keys: List[str]) -> None: raise ValueError(f"Entity name `{name}` exceeds maximum length: {_ENTITY_NAME_LENGTH_LIMIT}") if _FEATURE_VIEW_ENTITY_TAG_DELIMITER in name: raise ValueError(f"Entity name contains invalid char: `{_FEATURE_VIEW_ENTITY_TAG_DELIMITER}`") + if len(join_keys) > _ENTITY_MAX_NUM_JOIN_KEYS: + raise ValueError( + f"Maximum number of join keys are {_ENTITY_MAX_NUM_JOIN_KEYS}, " "but {len(join_keys)} is provided." + ) if len(set(join_keys)) != len(join_keys): raise ValueError(f"Duplicate join keys detected in: {join_keys}") - if len(_FEATURE_VIEW_ENTITY_TAG_DELIMITER.join(join_keys)) > _ENTITY_JOIN_KEY_LENGTH_LIMIT: - raise ValueError(f"Total length of join keys exceeded maximum length: {_ENTITY_JOIN_KEY_LENGTH_LIMIT}") - for k in join_keys: + # TODO(wezhou) move this logic into SqlIdentifier. if _ENTITY_JOIN_KEY_DELIMITER in k: raise ValueError(f"Invalid char `{_ENTITY_JOIN_KEY_DELIMITER}` detected in join key {k}") + if len(k) > _ENTITY_JOIN_KEY_LENGTH_LIMIT: + raise ValueError(f"Join key: {k} exceeds length limit {_ENTITY_JOIN_KEY_LENGTH_LIMIT}.") def _to_dict(self) -> Dict[str, str]: entity_dict = self.__dict__.copy() diff --git a/snowflake/ml/feature_store/feature_store.py b/snowflake/ml/feature_store/feature_store.py index 24cb1b17..34376081 100644 --- a/snowflake/ml/feature_store/feature_store.py +++ b/snowflake/ml/feature_store/feature_store.py @@ -12,20 +12,18 @@ from pytimeparse.timeparse import timeparse -from snowflake import connector from snowflake.ml._internal import telemetry from snowflake.ml._internal.exceptions import ( error_codes, exceptions as snowml_exceptions, ) -from snowflake.ml._internal.utils import identifier, query_result_checker as qrc +from snowflake.ml._internal.utils import identifier from snowflake.ml._internal.utils.sql_identifier import ( SqlIdentifier, to_sql_identifiers, ) from snowflake.ml.dataset.dataset import Dataset, FeatureStoreMetadata from snowflake.ml.feature_store.entity import ( - _ENTITY_JOIN_KEY_DELIMITER, _ENTITY_NAME_LENGTH_LIMIT, _FEATURE_VIEW_ENTITY_TAG_DELIMITER, Entity, @@ -240,13 +238,15 @@ def register_entity(self, entity: Entity) -> None: suppress_source_trace=True, ) - join_keys_str = _ENTITY_JOIN_KEY_DELIMITER.join(entity.join_keys) + # allowed_values will add double-quotes around each value, thus use resolved str here. + join_keys = [f"'{key.resolved()}'" for key in entity.join_keys] + join_keys_str = ",".join(join_keys) full_tag_name = self._get_fully_qualified_name(tag_name) - self._session.sql(f"CREATE TAG IF NOT EXISTS {full_tag_name} COMMENT = '{entity.desc}'").collect( - statement_params=self._telemetry_stmp - ) self._session.sql( - f"ALTER SCHEMA {self._config.full_schema_path} SET TAG {full_tag_name} = '{join_keys_str}'" + f"""CREATE TAG IF NOT EXISTS {full_tag_name} + ALLOWED_VALUES {join_keys_str} + COMMENT = '{entity.desc}' + """ ).collect(statement_params=self._telemetry_stmp) logger.info(f"Registered Entity {entity}.") @@ -681,30 +681,14 @@ def list_entities(self) -> DataFrame: Snowpark DataFrame containing the results. """ prefix_len = len(_ENTITY_TAG_PREFIX) + 1 - tag_values_df = self._session.sql( - f""" - SELECT SUBSTR(TAG_NAME,{prefix_len},{_ENTITY_NAME_LENGTH_LIMIT}) AS NAME, - TAG_VALUE AS JOIN_KEYS - FROM TABLE( - {self._config.database}.INFORMATION_SCHEMA.TAG_REFERENCES( - '{self._config.full_schema_path}', - 'SCHEMA' - ) - ) - WHERE TAG_NAME LIKE '{_ENTITY_TAG_PREFIX}%' - """ - ) - tag_metadata_df = self._session.sql( - f"SHOW TAGS LIKE '{_ENTITY_TAG_PREFIX}%' IN SCHEMA {self._config.full_schema_path}" - ) return cast( DataFrame, - tag_values_df.join( - right=tag_metadata_df.with_column("NAME", F.substr('"name"', prefix_len, _ENTITY_NAME_LENGTH_LIMIT)) - .with_column_renamed('"comment"', "DESC") - .select("NAME", "DESC"), - on=["NAME"], - how="left", + self._session.sql( + f"SHOW TAGS LIKE '{_ENTITY_TAG_PREFIX}%' IN SCHEMA {self._config.full_schema_path}" + ).select( + F.col('"name"').substr(prefix_len, _ENTITY_NAME_LENGTH_LIMIT).alias("NAME"), + F.col('"allowed_values"').alias("JOIN_KEYS"), + F.col('"comment"').alias("DESC"), ), ) @@ -725,54 +709,25 @@ def get_entity(self, name: str) -> Entity: SnowflakeMLException: [RuntimeError] Failed to find resources. """ name = SqlIdentifier(name) - - full_entity_tag_name = self._get_entity_name(name) - prefix_len = len(_ENTITY_TAG_PREFIX) + 1 - - found_tags = self._find_object("TAGS", full_entity_tag_name) - if len(found_tags) == 0: - raise snowml_exceptions.SnowflakeMLException( - error_code=error_codes.NOT_FOUND, - original_exception=ValueError(f"Cannot find Entity with name {name}."), - ) - try: - physical_name = self._get_entity_name(name) - tag_values = ( - qrc.SqlResultValidator( - self._session, - f""" - SELECT SUBSTR(TAG_NAME,{prefix_len},{_ENTITY_NAME_LENGTH_LIMIT}) AS NAME, - TAG_VALUE AS JOIN_KEYS - FROM TABLE( - {self._config.database}.INFORMATION_SCHEMA.TAG_REFERENCES( - '{self._config.full_schema_path}', - 'SCHEMA' - ) - ) - WHERE TAG_NAME LIKE '{physical_name.resolved()}' - AND TAG_DATABASE = '{self._config.database.resolved()}' - """, - self._telemetry_stmp, - ) - .has_dimensions(expected_rows=1) - .validate() - ) - except connector.DataError as e: # raised by SqlResultValidator - raise snowml_exceptions.SnowflakeMLException( - error_code=error_codes.NOT_FOUND, - original_exception=ValueError(f"Cannot find Entity with name {name}."), - ) from e + result = self.list_entities().filter(F.col("NAME") == name.resolved()).collect() except Exception as e: raise snowml_exceptions.SnowflakeMLException( error_code=error_codes.INTERNAL_SNOWPARK_ERROR, - original_exception=RuntimeError(f"Failed to retrieve tag reference information: {e}"), + original_exception=RuntimeError(f"Failed to list entities: {e}"), ) from e + if len(result) == 0: + raise snowml_exceptions.SnowflakeMLException( + error_code=error_codes.NOT_FOUND, + original_exception=ValueError(f"Cannot find Entity with name: {name}."), + ) + raw_join_keys = result[0]["JOIN_KEYS"] + join_keys = raw_join_keys.strip("[]").split(",") return Entity( - name=tag_values[0]["NAME"], - join_keys=tag_values[0]["JOIN_KEYS"].split(_ENTITY_JOIN_KEY_DELIMITER), - desc=found_tags[0]["comment"], + name=result[0]["NAME"], + join_keys=join_keys, + desc=result[0]["DESC"], ) @dispatch_decorator(prpr_version="1.0.8") @@ -807,9 +762,6 @@ def delete_entity(self, name: str) -> None: tag_name = self._get_fully_qualified_name(self._get_entity_name(name)) try: - self._session.sql(f"ALTER SCHEMA {self._config.full_schema_path} UNSET TAG {tag_name}").collect( - statement_params=self._telemetry_stmp - ) self._session.sql(f"DROP TAG IF EXISTS {tag_name}").collect(statement_params=self._telemetry_stmp) except Exception as e: raise snowml_exceptions.SnowflakeMLException( diff --git a/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.ipynb b/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.ipynb index 6098577e..8c4aec7b 100644 --- a/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.ipynb +++ b/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.ipynb @@ -5,9 +5,9 @@ "id": "0bb54abc", "metadata": {}, "source": [ - "- snowflake-ml-python version: 1.1.0\n", - "- Feature Store PrPr Version: 0.3.1\n", - "- Updated date: 12/11/2023" + "- snowflake-ml-python version: 1.2.0\n", + "- Feature Store PrPr Version: 0.4.0\n", + "- Updated date: 1/3/2024" ] }, { diff --git a/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.pdf b/snowflake/ml/feature_store/notebooks/customer_demo/Basic_Feature_Demo.pdf index c101abb3db248a70a91bfa3af764f2401c5cf07d..af5b0a63c7e639a9b0aab48167e2d5560d1086fb 100644 GIT binary patch delta 53691 zcmY&;V{9%CtoL|k+qP}n-kELN+cVp?ZQHi(o!PeT|K|Pj-lS>zp-G!IP4%1TgA1O7 zi`SxOWn^MxVd7+FWoD&eWTg0ysGu3->`g=sT}%lnMY)+7ng2hPgm5kZ8w)EtKR=N)eY6dpGd5*pVQ*z2w)kkxF9+h=?5ia$NCFzL*TuYc^RVWnJr9Ss{Y%gXyA&qi=lk{)0^9deii{IJ7)@E@$nSG4NX}T#8`yuFD))UM{ptyTb5Wk zfZ;@jhPsKAiKzN1EK=Rrt0-CGUm@`RC)qI6@ozO+llX^ufJG%>WdGwd8%BRLc{Q?= z`By6R;|6$Ex1q|!0LaBk8t3m>)>`kpdzDTBg;>WQ3&Mq0wDD3_*wA<3SA}Dm!BT|n0n=7x^pFHc+`A7w(wLZ9~mmuA~k8poEWi~OqNkmemhEZM!`W8bRn#= zOzDINWatVqSbj5rPnc0*yZ^cLN%&;ShD%&~LYMUUYb>ytBu?gNo$cy5tKXDJ8B2iO zS0>Q=gyb;m&%708OFF+JHZ~+KazQFSvR-6`FPTa&i~U~?H$?*z zgPL^Uea{7WqHXb4GZDVO)RB!i_VRmkyom`JWgPgxN&rQn)B$QnNHxf!13pB+!9g<1#Hm&5&s6E<{bJ7&@3FG{Ep2G0l zs7c;#$Zu6t5TVJ9pNC}G47XDR^zX{d?o96wrpAWN?=DuhiTP9J0) zlc6^HWr3sumR}s43T_5n#TX*L1eqDE2(!e`QSHm*J}F2`VrrsiP_aEb2W?p$5mbuO zrQhq7y$W{7nhaKxS`JkYALt~b3XS^bIQ4)P72-LSl6uDMKh8!8o5!3V`VYChfU!A` zQVl=}+iW5TH$tY^HE0=}1i!`$aq;vC32G^W#Q2~`(q4=zjwwBcOhkk83StixhfXNl z-r;XH`VMZ8q^T9pnW69sIUor~762+GGdYVZ=@l~A9{^iffVodTVrfdQihQ15-~d-v zu)b1AB`cBW}}IPGI6??ebT5;d>DN$mZh;x*N)qH&bVykT zkrab2q%D@X1tk^I<^x)As}EX1LA$60jXEMW^co^|q)3gZh_GSAykdLkym~cZGmR|L z<|J;x(rwX?f_9`v!Yj5TSqUSEA~zrvQhlQx!$8KPd0&5U2AqavmZ3d!dNHtn!lzRL z^Z!{`L<83Z74`CS*meC1OPl)1so8fEE=wz!nv8gjz_QZjyBhf6=E66jmqn zx`23Jgc6I$r*EbZmQzRKK9Fiur4&Dl9-H_mqJhkr-tJCB2}ZYuv~2-MSUg0k5^xSM zau+3hrdgTV22pupaN`Ui>(BuWwVsYy|; zV_xtJZrm%@r#W*m9-sqgebSx+vBMsa#NpBS zPH~|x1C`4Ws%Td0Vie=l8I3lk^UDPoDS+LuoHNNWfCY*F1UI5n>ehGT z0Tnn9mPjF>o@fDpUC?_)q#-ah)dQmf&~cZ^;0I1ZFnxq6Pbc5az*j8i45EQLHC3u> z-Nv{`E>$(G`YSSM`ZDvQ4J#;T67*r?f<%Y}M^0>L$-y#@3K7?fNTRHn2sCHM_^}G| z{$_&E#4%Svs|A}7nG2C_tDo0GX%kc|=r(VNs7flo9gqP^5})eR5u3_5oB|jO8wbA} zlsx*L;S24=g9Fb+M3FYyB+E%-_E%RFqlZqxpkQc5%kfAJsRvhwt083`*DQtj#mXb` za6uRQk(1~sDZ6DFIFKJ%Uooz2**_FTSPKieH7O^3e^vW?7&uxBsjf5 za2x%v7eD~z>*sTiy-N5|7Nc6JJw40K)nH#n!xXXq&P&qMeZnfw@martLW?pvua+(g z_j3gXOYBkT`3PYs78SSJy1T1lGLToE{u!Ml?YhNTGMSlrxU-@zdGdaEDUrB%9I}OI z5Vhw8wP0DBqqB}}_r>L2g_%RHWS>q=g28m=BvAsKG|woCMypeCIWQH$Pe#dw#>B;9 z9A_qYa;qFXp1N9QGOwVEkP)>OBK$T+!VLt#^ljf@EV)&%$62F|r)5RBLd0p1_kx2< z2YKL-FVH2EBqJSROyStllyJ%Fq8mG+)f=l&7o=LHU|!rA_EA9lJ2cc?%$u9oU)C6; zW;6jtE$HjcMW( zgfg33LZ3*imTGEg7ZLNF5fxf&?Br8gn+z*l#1SOSyJ&4QmV1~rx44u`vk9`uz4%j^ zsWK2lAW^fl)W-gO7em)>COQ?)@=mqZe6?(E0Z?yDN=j0giGvu4ehsN z-CKm*N|V;QnAuk`p00>&nSKThSEWWg%tITA<4?@m7M_V2qrjxuK(0Q>b{O{?xOku9 zdiLwr&ptGlfTEEX{6Z)=UzK|wZe!WH^~P=7#alRVH-C;Q%2|Va!zS5-XaXBowk3d3 zU605LYeYQJ)9R;+tCu(1qwlq=uF9HbQdLUVBGi-c=1h~>@MM5k9!W_@?_(gxWG25i`iSQ*b?jC7{%@Nlc|P{ zM?_DPYM7Z%w;9>COzF^oE&wjKvk5R^{PE<;pzVBdhM4YX?ydhCq?1M`%#Ih}py{t{ z&XiZzN?u-_90|VZ#a9g2%O~G*w^3B{P=i`_vAz%WIZFs<1-@;+8XLlW@M2soNe{-ZfCfPRw^D}u^MTrvj=cZaBU|@ zAW&Bp2=w+cWn2&A4Sz<7`r=4Q5s*LNrH^B%^tGq&La%qlZ4!v{SJ%~IaCZ@iyk&Y< zjCH+@C(#2Pbe?Q{;trqR=-I$1p&)z7kNm(-Vn45HVnGaoF2bCes&(Ev5i{cz4-hi#k{y|%1YCqxtr4LQt?ae#^$33Q8&uLN?Fegg9bHm5mIHh7Yhun?qsHLjZs zID`~waN|+4w$Rntc>926oA8;f`Howj4z`^9HZdd-nW_aU!EjiYIM1J?TYK&XLWB-h zG>PU__^^(gp$cB3zPx^cTZjV2!*MS^mr%N2KGB3pQ zaPSYD_%H+dHd^W$3h7;{iHo|xUJ13Rf`4%i{3wGrU88kBB!2cEq7vJS&c zs!tqI*tspW9xeEj8^~9b-qaeWUVRnU=7Q6=7Oq>|tlBn(296IWh=#k1W3Mn2ICyT7 zbhI(0x74lMS46d*Ts~*d0RqWCoYyO2-NIX1cYfD%rGV*b?YNZd-NVm00>`+N_A%|A zPOtkInu!Pwbb;2q|LE57vm<=dM`r=~?CZxZAd2&FOjjO`?=jT7UYtiyCevm>np(mD z$y9>fP{MuW^x<-h0hIdva4k$hKYXja#VtM&VKFm+y6^FG3N+q4^f;|5H|_1hrjlQJ z&HvLDz{X0Xg94xlog8-Qu`cMt zj){J6%HPXpF+}^R>=Z~*^qeUxAxQ(e;xV-L|tk|q>45919O=h_M4{sbtjdX zWG)ZiR5+e5YPG1RJC-ASfeBfR;PtTe<-$C)`;cckS zgn?a&{M8&BU+37i^wtg{?v1iHVC)g%_IGd#^wIO5HbfU)InFleOTdqN+z$r4z{qK~ z`;7@+pO(q3q3+lmNr%wIooAV6PgoiaLspQitUcCsM`x*3{nd*a{n#@!K3-a@cVdb- zHsILg+IpASlZ&p|$U&91xYBd69omD;ZSLF!TGEek+}8LNJ^je&HTzl?AQkjS_%64J zmdrLsaP&ly-$3uJ8P-i}2{pGcNI+*7;=JSXYDr-H?!B+TwjiBH862JTE`xivq2(ocAkkLIej@vK_zE5zz z#U_t4PEkKhrz*A#hkmg*l^W8ye4$rvLhYXS%|bri2ro=2f(2uHVLAc1M6w-TzterH z9K9shIsvO=P2~LyxUoHjKF)qLRF#JKcbJ!!*<92$L1F!H4y#oGec==}&nX4(zS%jp zl&8KTDs2pN6?k=5xqW3sYV78sjl+xAe+d7uxsSW zJ1%-LCnhT?5E8=3!AhV=08zp}Ij{}_vySDs-t_9v%iaRa+V$U~i0&7QS5wGYx^r`aIq{QSV(c3t~gV^;h) zfFq-#3JZP{@X@{)dX6j}N1R8y%cOb5Ry{q&h})(GI>gZhD?O{Xh<$K6oRMzknH^C( zl97Jm5;;%jNKO))d(g!+Zj$~WL|!UVebbNZwoxNYTIKM!J3#u5wr5ofTLyU`2N{p5 ze96*gA`inQEwkpaM9M94b+M_WSNxJJ{JC^UxdJZ-u)*d~|ClIG$v1R^H1Jn%6U|9E zzv&?xak(PXc(}`eF|$P_v_FF0j85&zh@GE6#kUb=nTytjb*0QhSOknNA zB)uuOmHCp1+YEk2{9;ezu@NrCRIsX(s^2f{1XwF(oTp1r_{U{QOPurK<_uy&(DK6q+aq-IJ&GMDLQ7PPV*7T6|L63 z%m=uS+F#@#LfV<55Np?hU>@`U7FVgh>BD2RPfoWuhHU_%|DKKv;Q(wzU$uEeM}`)= z*bTokLUzts8A15LznA0dC5TSMRs&nRE}wU#)Zq&-S_mTy*@jM^_xEdW>R*rd2ZnZ> zzwT4uO49rEF7FR!DpIza1OfmwMx@_La#Ev%FhZ(Rl8h-5^(Iyd6Kg-*N=KSAMiwK| zz(7j~N*~eKfTGg`a$PV}T2CRFoqW?&6j4|T_2 zqo|gKQfJF_bHnt`gc!a>P}p}BWxS{2T!wRxH2(E+U_EoHyW$fM=`hZtce(m=phrDu zHTlU)5jJyuye?e~P#UFT3^L5_m6h=&aRfxOm+1!(2~2;|mOq40S&};%39`GkoT~CL zq4a)XH?RMKF5|91QZ*VbhC!c?rtyP&OCr1)rSr1yNS`N6>y7Gl(jJ;5NUwep6}l*; zSo!$07$jQXwM1{^_qd0_Qb|2Wdu85b?fYzva#qtPcXqK>zniIhQZhO@V5@2ah@H^U zORCRiv9CQb@yEbL5WWDW8sWti%H#kl*NNe3hN`313b80TvMESC_8p=2w@XrM8{q>?wfm*fp zPc2Pa7|jOXf=AS-$3kYHfDW9hqu$?alEW%w%Zc_ee3E;cFy?~5x&aw$Wumu3kid8w zFW`eqxl-Ki-(PlQ*-z)tvW_F7NlKPeJsZ%C{ev76lJEVZCZEY@W7_+l%9kY2DlE$x zyT`%v%>P~nh4L71fYW*N)BYFq{6AK{a4G9J9cuSlqT8r-mi}932(u?xXA&dugwVvC zGPU!F>2~Vy>Ojks2KFNNlX1(~i<9Efrw(I@Wh7_qIcevG#@zvJCqA`gL}qtO(Ut09 z_WSOLUz6~(aZawv?Ew+(qr=dUonJm+NkpT3;=0QXIZm_zOb_Ee)Jwp$nb0H+RaB#Gx+wM+2i3Yk zPM$SQRZE4M?($ae_8kCeQVeFYg`#bP6GmHmTB=if*-_3{gvRe&N8#&S3^D=3cly=Z= zW=$b;sb3L`Q7)$M_2`fbiVqk!E!U-pKY*T>+XH>4usj6!I$N0hQn-Er08PH(3Vc82 zS#gRE_M5gqe}tbuIu1pXllhz*Y+FMv-(f2cCH~9>94_}pooiu%>vbaE0d}=NDw+4J zzHVb$e+AWsv$%ZfRX`xahI8m+;&+D16TDe<6%#_oATL{x?*~q&&rcl1447s#E6G2F9DND7-L2-xSpdd*oz4no+Fd3rEmK27|mlBgti)3cSKK18R1-#aI16# zHXBsb`Iq7Fr-zXZ;Epi%gFRWRd+NyA8i)sS{oFI4r8a$Usu_0FaJH7jJ17bi@b`24ZI}SQreeA!uY?rF5CJJS8@M zH0&dC3@QfUr{yLa)N}zN!kh*h1S*r+*y3dCCWZn!K=#L~vk087|3kY{K2oKFl*$8z zGK>*nF~GEhH-Y5rfd4n~kY8Dein+i{(*wQbjdF}u#cwCQ)mQvYQovhiyBEa(#ojk9e?F1V)?OGFyLH32P6YZ4BNuE_jq*zS>-DtRAP#cW}Q_^FEj2`DbB?s-FfY>2W6y3Jnl{WH8GFW@<=0sFx-6$=`(2|c_{ z9yzO-XGA6XDgP#?4gni78EEEUur@6lo>nG3OItev6=r^u^RF8dbY^ho8K;?gf~Ja@ znGt=cTZ9>YOe?01(CrzXQUgQh^!1VH$>cQUq-pNuD^0|M=Wxkr84&%@OvqNm#U;IS z;wGM+sl<<1cJk#@5^zD{n}O0S9Usc^RkMP{;*+l_QS;Y6ePG^x*R>KaYyT>@J)x(| z+t|fhrXT4{;{({D9bgZ8bCd0NKGR7WXA+~j*#IcS% zHX=f|oAXm`+9$7nlSFF`$pnc%SY35n+>+RYb0d*1U9b&w(r%*Cyz%B^gjC`)xfbH~ zE$Q_$Nv_`=LhHP3Sl{%2sf)SU-EOfEFbaEN1ViGlD*5Gy0wgfi1p{b>IO_#vClqyL zieQ$LLQfWqn%J-xg<{L+q>fo{MOGo{T^-T#H&(|Rw1TUT%$#&qwE=Gg$8{4-6N3AP zk&+>v=!;nC5sN>vn)1E`nbPmvtTD!N5=%zt&I_!GxzVKwH%P2<(>`(*nZ*J)awNyb z+8a_9*b)N4fYJ?YIguq%XZ+0BmQh@}Br5>|IhIEskjzPASJ3K>%X!vVHaxi~&1tbE zNiXJ1@lJ_(R;HmHe448g;=oGh^32TKq%7Lmt=E=23L zbibq=x^2w(bmGdXSyNUBqSPSy&c$^Tlw%OqG&Lu00P-+&U?I-5joW_$^{Rh8j3Lx0 zgUCATs!cGRITM;8%6SK5))~IwdNdZkpV`W?2j31>25*@$=}Pfro&>*9S3K7c2dreI z6FPB;kNOhn$XjW#SE?+L)~cQuaRikg91C_YFf++F^BQ{9xaC!?u?*V3Cq^O^vweErZF&B5ovb%x^j3z zewRF=i>FbIC5v9CMifwG)~eRr z=zma?8I7K20dKc59U2)$p;0-bxPtxFT$8C~)s@qt5M(i#HqCOefJu(G5if~q&VF~2 z0T=Nt6ystNMC46*7$U*WSe?e#WO!^gX9L^c@olG4K}?O!c-z5Pjq&@9w^3BYhan0G zB`LyM;F7~2Uw&n89k~r+#P_7gbbQL^Nv-8J4vUUdH0(>0L9^K-7u(=B;Uz|_qVe+R zN|EHH`H=eNv8>+s=8-USw0q}*T8WlDBVb9@&wp##@^U}2(rSqKoB6GvS#ppbO)W=! z-;U#^=y>!m*2(N}#&UrDTC+WKq9Ga3&qb!pY9QGPsgf*t=uhM2Wm#jiu z7?whrNZCaQqh&t#2Fcw+0Bms0LK#> z)2I*1qR99OcQ=DiMa*f@(73r76OVEwVqPl=srRFQAn&n(X9`h=F24>(V&@x$Frq4_xwu zKsF`Lqe!dzm8)SDlc)b>@?9cC7ryE0P!n2l3~F4Xp#074{Zto_$-?npE5zzI^EaXs za|tPGANS2|bzX1LWs(HHw;q%--+sxNoH5_}?Y(MqksmFsAT+{sGLvzOW-E*EXE$9Uyu#91rQd16$F;{kEumIcnx zZ7Sp6n8=@JPzeReg;0XHFjKuUN6G0JL~V80mQo7!-LOm z&IgHfl{aGn6`o3Ue`%SSdCP{d9dq*cuD$jp)nBPtH!#e_wLVq=rU0;UMFdq5*HDOg zjJ(A1h}FFZi9RusjN`V%><=H^vKcLWq&`rRBR?4xHul@{Pix_FzxzHBg&UJjmK3R$ z5e3oFJ(e;%cHkeVhz_fkW%=xu%l z40d6am6fH%E6qo@Vz`-ApvA!sV-c+lw?hFO|9*bmfEij$+XQ~DuW#*D6GPeos=NJl z(Ia~sFpBMECO$*Vpa) z9Zo>g)5R*j70Fj)FzpMzCxN65@;g)k2x7&_N9X6g#oV zou}ockNrFR+)7h$GBzq$)pz^+F>hlMV%{!AbTrQ1*m~{j`HsEZ+w1*Z(Ut3}dKTt1 zUvBsA{XKu-d1uod+q`@-YF}a3f1w}xZxs~Pu56LJjjH3BsvluAyW|eNsw>J!Y1Fll zGwowO*aA8pWMxslY{F!2fSa!Y(9)))Mz|DwuL~*B4uAOGAQs0VWp4*`P26hBq%dP$ zaWu4bCkvyb9#qbG2)MpF4@`ZRaa5n$$5|9K8E0u`^LAoIEC|S6zF)e36m}*ax6h@vc;Ae&)S7N3~n2baKl5Gy2+W!hXtqaj8rJUA3N>j)n#ZC13=2)#MIj9Ld`isr<~SDea4aGwQn6(g~^*Tz4Z z$t8iPh#~)=15rq9g#ybR0=&17?Ps#wX{^3ayS3ZLrWfsWdnktnI>ls|!5TCacrKr6 zPNfz?p*5@yk_}A#X$*z{x+eK^O}=sMMDSum56EUj%O2j#^bH_3-2L((F=&ZP=ne>W zL1t4ul3PVa@q|yWWD#NA$cf5we|IejA-`V`G&$pC8tBq^gjIMocMynZbz(R&YrJR7 z_-Uc}>(1IHknLVvZgVrrXd1OPI6D~^q6jDk45h#XhV{Nr)hW&|7v6zrFkg7I17{Knfv|Dic*|_J<4m%LhMlu z(5pCg1)%N;=ki=Uc=s01fWio$w(#nQvnt-Ld+#)+eU)*L%6nf`KGev<=6gddO#~7` zw7!IOgMA!2Ey9|9c)=I{AAjA(@9m}^UhTC+={bg|LD4qAihZ(`Cq0d|Vv|PwqIJ^ib-70+#( ztWSCE(~7a}0@^}8t&~cz<{?ja?Z+UnV*bZr+pqHT0J!)j%!p?OfyUOtRh63!>K4*l z>QlrCiTD`sic3{j^|Q&zHrc6InQ)m|XQGTWphZznO;5rB>cp1Il|!a_MqjUAkm564 zDe7Ba5kJVTfYz|O@v5iP(8(AfhxZq;Q{sQ&GnII*?G$%SxA=I*SJ(sSIth=@q1*w9 zW-WPTN2+KSFGtBY<0K?eGWsP4V_*smYT)OXg_i)|b0&dJP^sjna2XY_Y)BGKsKDi2 zerZ$8wK)`i#At7Lvx?%y_-l3}a2`aLU2p_nnDQ2p&h?rxw{gau!jvh(Z?hJ5y-bV4 zI5&KR%M%-hXB!B|kn?x5i^`@oc9jJt1zInix(6a*%FfqM_U-|aGUtHU7n50K&hG~lJ2Ju_7u}@rQ@;B0-KS6CT zmp1znspO*--bt^#^htYExF}w{1`_bGMXh7{OMn0VMTGzBk(UHdO$3+{{aRCg-7dQI z*siBIo^x9a^LO{~=}zH4Q3?wUTf z#W%o#XXSfpp00VtXv?3yvs%8^rrnSfwWblur}=r7L}@;enK==EJ9i?9A*=R<%lp$* zqyxp);c#KUoioSHA>P80m@P69EsG&74V6d6sezWUc1ECwz6p4}9lG}R?d-;4#sMEi z;fRM#q8#x`?)b>GY<7OGSlGjPe|00=t}PD5reI6`Q&F(@2ZKe4!EcSGfJn5~c;I_G zjHTXXPNYk*AQ)+f|1tzpx?EMF4InWQTCw8@!OLnn!Fnd+xG!8yG9utC znqu|3h7b^;F?m(v_XxDl0v?4+-%4Du*=Q62)AN@JmRahpA>)a zLH#AYlaKKADP#b_@Obcu%B=m(P-^$!O-|`GT5zognisB3##EZg`=!s~T1B*9!2=GMn?#Y;2teqls~iV9hfEc}}p0GMU{) z!@51V1XPX$MKc<_rFTn^k@R-vv79BQJB3Zl=P??jt~jInO(-d$e75L_^9fzR^_U=B$kD*;<9%-H15&5 z1?49xt!(CSQd=IRQ+R0r?AZgaZpG+YFFezGTQsS459qu&sqTeJQFsHjE;`FXEpXZv z#{b~G4HEH#yzy%3b!@9B97EkXeNP&vhXas`thM|XHr@XXn^s6>CGE!P@pSE5+uT!! z)EZ;_d4p;OuoqX>Gn3N4-bj*2STCuy<>vS>Val(s42yfij(<58@JxdL5DM)=b!b9; z1R?8n3zl(U1xhU@3u(0ebFNU$iV>|wI3>WOVu%OYq(zVj?x{CZH3wyiXOjaZBg<^MTABbYEVXNNuYWZghFOc zB-s|1t>mQBo&?zr@?M5bz+b~v6f$f094l5iy%ykB+)4J8H{u676Y_*4pvVo&x_ra6r9V~o3G z)HM<1Dm-$iK|%&-{1Pp!&!8DhIEC<6S;~T6($c+1%*#M(S`y*r#cNE-oN)F#1KAId!d0%9DC0qe@%K6ik_X((NUXske4B6AfB(jV3uoUknh zMF!8)8MaBrM+{+?gFE~m=0_kA^>ZGLa+1^yG4}0hoV*u-J-67;xrYwbPx(qGQn2?+ z`gFXF%)H84DN?W}&p{zXOi=9R4DyUOH!J^?tY~OdC){)ffG$@YTym=zpH+>sZc0YP zfAJZ*|L0r(N=^#OA6`7YiU7jZ7j&DX!jAbfYU869A$gPUGx7)%fUgmH>Up-5CqK@s zeb(kFkC`K(31m}EWY%f#PTo!grA?zN|31=2O2;d;1flB23Z*PsoZxChq?}yR@FLgC z^+fs__b#S-c#DufQnbv&JA!*XO?%I!2(M7Lpx6x>Jw<&shLFmKQKgwEhrN~FmY(1L`Gx*{nw#d`tDGBLy%<8& zyC2uuD$Ituw@-Ok%+&oD|MqYl0Lpt$2eTh#4@{@5N_Cw*Ua65{3g?iVoO_|13T}a* z@<(N|5r}}RbIMxp0rZ$Y;-NVAZ_g15_0d}JOF<{Hv#ALd;NdvM=K50aC=-MgFH1Gx zJ(1V>fq&1Z(Fdt#ba)d~9Q73_vO{f2{aBrxs(#vGUbQ9 zGclCjrlbM$L<7648g@pgh+1voZc~~%NlMd-)Dr&-dK+~@Dr12AAmG$Q;iz-sCwr7h zyIL+F;!%+Uur@+$YsjNS?K)xCD(Qboax3ZX{Cg0=LkZd>j8F-Y8a_S(U7JcX`9H(;7&PxY(k zPOf-P^ov#}i65a(4Yz8X*Bm5Z?&rQ=N11!pT%a)%V75Y-ho^Vjz&vX?>>Slfz0~As z+GYv?S1{WIZ5#5`QI5Q-0Ar9{j;yW(V{v=(z=GTm%o-*TO`mWNv0y5eSL{}t8!Io& z#9FBi9Jh4-Q0B|3XbRnAL)TvY3T(JLaqfG(Mri-l=w3{-@0O*a!A@8`F{!5qH1}N8FKFc*l3O#Qb-#t3nya2E2Hq z?&2q377^#BLOyZ{Qnf_&%f3vOsUmXMg@EzQ=S?N|`-vhIX}0{qBTU`deGMs!CGA82 z(!Wpd==*ywcZ0w6AB+W4=q#x$t5ldyBL;|!6cCJt<^BsOq0pVn9ZtTo38k;NM_=GC zt80RbcGS!2GUZ9*u>p1Y`TV3nl>PBycZ9(HKC(YAT_8udnr;-+wWyz19H z;5085{P46z|BzuRV*$FcB*&jyLW>rCzqC4OKEC(w@0^vcIWB=6%%41f@hXvSvbGAZ?PC)W?}sT?xMcMOiGzmkkm};yUwVo zQr=f@=^7#&4hH*RLh=Y8y{vKjzMj+4^!3vVg9iml>Cebc@7ekw-+bS%Zy5kTJI8qr zU4*dw4+I{G&v*c@7YOSioD;12sNeU)HW{D$tvLq$-i4{TU%&e}9C?ORt;_S@rw53) z_va92@oFVi)_;tZP!1sc!dAcU{*uF%ZWVoktvAwW$g(3nCCN<~BRa__>svD4$9eX3 z!Ka!&x#fbuKa}SF;`7f6G@VYD;1ZeArQs(ek$&%^J&1s|TL%4`r$I8q**(|yACz`7 zLJ%F{V=+im`aOW>yHfg|e(x{njoHpls3H+3qyod?4GaXXB`LFELLSURrL+R={lU6) z3ws~8wRc-=44*iXNlLSHY>ypPZCFvNV-loi!i2JB-ny@a4qe-b z(+dau#v^Y>5iK|24{oDg6`(%YkWRm*Vu)YF2u+?(_7v$gb5-q)L57P# zE1t-$lXuq;&UkJ-z8Xi~Y(@im5&M|~@!u<1kdDHi$zU4}VT(>3R2-ysP926Kc&vq^ zM1+)PNQ&~M6a;Hj7W>8RY9g764T8mPH2(@2WlMeW7yp@X2fww(#-vo^8q= zj5QZOhbninfJ?N8Ow(yZaS4I7SktTEgwm8~5jXoEpD$K;4OKwVrAtKN3^=6*r1t8` zGhW4%+;XiPNOv3FnP`fo!~L=Tu8+DKrQgle!4tEBdWd{peTr+T{c|g})$&SzY61|# zrHdblj(@0^vB!gvDRzPU^`5ld=-FSe+rO{m+3;;u&)*S#slio2tf46+W0Bu_^t6F^ z;G1_}8g)Fiw;cOW1tG&be6ctg>~9qogz(@rrESzC@Wtt{_}i?kU$x=8Lfv@{()Sd_ zw$ntO_T3lwt?;)Vuk0m(aGh9d%LVYdYq4SZW^$E%YnMeEs2b-2-K&{<=Mqil2Zf_Kn5-?3-ChfZicvaNROvEnI! z<)-quz%=HD;mHeb`)*X{hVN>|@L`usQ0L>sXJFO>&bL=&$CH0QL2iVBfC5nb)+<{7 zhV8IkJG$GQDPr{Cbi?j9$1;ODvF)I=cMG~(IpS{BIClI~cUOLZmg_8|DQtz6+L*3B zM`!8@ZAk@#o%38mTl-I@xLiI;o1nBDot|>CK-)uJDL)?dIMGO~S6Zlpk;$T%{gSTBXB((ErfsE8al5gZAU} zhM+7R21^*+j#wktDCF5ig@N%0xkaHZJjUbkcPu3|oTPz5aQN6daSJe2Cxr3)np-WG z6wCXaa-Z2u!VMh`ESXnRM25GffSGymQK=uK8XVxmq&2@~s{wsTlW!4Hf|5;Ct;KDO z5IhJ){E4~okhw^rm$>ML#5^RXd@hE1EuK5a4zIJc=f^$-#=ULgDhs=dT@QL5oOP8p zY4DdJ@7~6rCh;OuSsEZuM;)S@np#(km&TuDTZjvz!pL=AQq%^e2?g{fT(J*ex0V2lZ^i;DKcf>x)*rX!X=86lqiSIz-RS;Cj0qAW-mW{;l0QB03Vs&GHyz zn2nTar!0fD0J0H9XWa&OXjFHfkH9~WcCN7$rL#nJD@&`ej99{`GUZJ)G|6t=w#z=|MDh|7`4pA2`o;7SJ@JBz5c4r(!A!07)sbBK zG4WZppHM(lQ9V`PX0tFWil2tpjfdRl0KVUatwI$UZO+N->&->W7t8Yn_lBT+yE-h~ zwB^`rLr!NjneKIUwCU{01PjWFD55(BhKnHidRN~~em=Z_9G~JL5noxYKlLad0+(*60ASlw zxP~r9BK=}UQ#jedW2M965?Lh6sZiT%)X5i5hnHiTToy9HmttwiXyPZ#Y|W?0kfRD| z@58@qU1@ZPM55TXu1HB`BQ9L7GDsxgc?)HIjU7wmeL0u;_u;Aq2C$Q@V?%Y=Iu+E& zNn~B;^Y*?3FgF}?4Bh3VWFF(E0Q5g_nHn|aOeaL6rg=c$IRXmS0>rdOe|uG!CBCug zkp09+xku!)d+(?Gsy0^lCGZY6&AG~)x=QAIRl4*hqzNyJtR`rV;V9n%7KDFD<@aR) zfk{X!wBw3ZDTz|HHuwreDb6FPtAP@549gM~H#WE9OA;aB#YU5@e$(U(fC8g?*6529 z)BP;&=4IrAiWw|?(i)BTf77KUhm2xwrxIN4J&5h)d&p-kUoxLt|2y`?r0!5;9)fk1 zr7nYsbyAZ+jI>>%lh|Ty{qdVM%i_BAKAU+Uf+C)F@81T6%tVl}(x8^+sQBQSZS280 zf{SdY2Lpi}*PE$Bf(OzD!2IB`*tnli?)4ti>+QYvC>`2-Un$b=eC)NlA-Q?Z{v zeWt5^ZedtsvZOQYl5OUB(xn-3g!6Y(#|-08D6S52zca*|P;>WcW2zX30&dhm#wER< zo{wpvYyju_ug!5zL+))HK>`W!baRWjo3iZ<{4Uu3k9nx6hsS!MOatifwJyPP67T`|^*S_UCsd8!gCR8nk}EtpE0UUJmh-r{xmsSze*xCd$Pe9Yv7r zDzF%nm$%3So4owKh!xjN?)$TiA5#~98imHnw*S`@RLNAS zg~Fo_S}6ciPmsW7aE;U?BK|xS0-MbqN0?+9%doD_5(+t+vwinFGwJR$usVjJ)k++L z(5+b@nx?VH5TM!@lIe(`V_`byua&)O`#U$Qb)yv}yKBQkZu4tvSTY9^KTEQS!45yG zaRUpQ&%NaiS_UjQ?MbaCTf#Ar9&I@;KrjqP7Xy^W7cArCgfC_?vnb(QM<}004M0hxN_DvBZ3zJv$_f^k4nb*|%Eo7HgPxKqXgyVZl%J``6CnAg_^!1(Q z9vTiE1^_eTa>TmtX%yTx7ks9Z4SXM%&2adZRq%h=uYf^Byf7(b=w z4WuC(xYP-$#qkLtYn*rk6;X_9Y^0+NvA0}tR9^yxh)omR+F0#O!LBO)yh5Iwo%I^= zZ=w}WWdcK4qsA)cfOWojYTHM-b$<9Yu@82TGT<{kU)R@Zf3^lCo4-*9)g555l5Wmq z@%gEF`9QNpy&xrk;|-d!p8zVcvRQ$)#odk}sIf3yB>-BYA~lN2iZK2fV^Z#b@9yZ| z?fp5$4XJFMf15=dM(esKSbQat+^!@W!|rxOyH__vd6k^pMd~&Jv7XYsdn*FWki!2> zA(H3YstWVgk=XCkRbr9u;b5cP9IUFRQsKm}533mek^5`1iOF9zg0Wfp1{ntAdf6b1pHUmjpdfN3`q(LyQUa{UG=@fnqcG%xDy>jDw; z<+vp`SKteXQSj$x>^CD*rzBM|_HJou&*_eSE|d5#7zn8V#6Y~${KsEuAgqvMW_2Ku zg);A@bCt1VeC%ydl;{0_It?Q6DsV(RrG6%#j*z|iomN22h)*#~fUpcDQ8VduI~=en}}uj7zcjLXW64d_ID5P<8tFzCXgS`vNN;H%_eN zHa=>Ntt3L8g-}H>>H|FAbH)YryT5gEV=5Y;H_yPBCb^2Z{NFACcfI5UAd&^aX2HBy>2C|?W5_@u z!opyUL~CIqKp2`V#Gjsz&S@!wnucf)%8ZWMkrrdTJntdb3yL$@O2Cf()O!N*C`930 ze-6nyG3c4M2qkhQ(T!jhobGjwR~Bz4H{-jGuc?-5V`D&wO78Z`4zEMg2RyuBR=|>1 zfLgKuKU(=4=iz-S3I2kBCQ84zb2P#9T>#a+TMWEJVtl1;3_VD$mJPlGJq^t2dhrAlM#V&06V?Jc1fuWx$1i>7eC* zzleug)%7&}O@kcpQ9Xfd676E*YsAug}{G8)1DRKPiCD3Y!L zG^1?h&MA?|t=-}oL(W*&4799lQdtxHi-dYTx4jBW*szC_)M2Pl$s6T&T%v(Gi-uIVmuYBH0eD0hN{Jq zLnpA%M;rD=u~C%9SSh!PLo^niE~DBN+tF(miJ79qQv--ysn=pb{;Yne%0~GTV+o)p zt&1v)D<+4lr8F4!3^z64{&n?{<)^+evKF((%W+Rxz{p{z^U-)Uu+6b5eUYdK>=YUH zNcbLlop@JuL-V_$VS8FMg}pg2_OQqG0GoTt$|^K$~JG(l49^wALq$ znl>W`@fqo0thXFFk*>iJSvFJ{|8Uma#!xj+tX{}~=W%=`Sx!h`d_rhBq+T4j-5(Px zR{0&bHv<|m@0;;#2X@>=F8ekC_PtgS5T317;s(7X(4x!l3sRu#81CKuu9t~UOEoCx z1iMsvRm6G^-^Vzl%Fqpu)_W%9*msbap`cXju^_8BJ(uGBko|P%1MAwA}1b{uJEm!v;Uegg4!4^ zCohTnyS3auH>SS3t-x%v+2WuFgC|@{B;Gna6;tW(EA=q0A*SJO4 zuxmrhJkH1-Xhdr750xaDN zjY)mk(|l~7$W^9jqGqfzaFZ7V5E)A}?ws^~*1>4C*J=d-U_XpudUp&Cyh$lvRM%#W z8j(Jf4m;{cTh4^CZk#fzY&g%&w_NN0U^xF`o)~)9tzLIghdXx-yh~JUS$1+doMBh9 z@}8$6<3Mm{B}?o@C=5U~jqbA9d=b~I}`#WsR&xtB?F z-Q)GJ+k2=Az|>f?Vd_RB!x{7nMGs!To2u399LFcCek7C-_dBdOGV>!HFqpBD8<&Ti z1+}@D0=u^B#L?KxBn>ycP>*t_skyGQv{AboZsI79A#Ot%};6zFT;B?@#tGGtFTeU=i_)QTTWBhnwGr2JgppL_g=MtlPyG)HS*W zz~)9Do$r{8ZeX;g#D&;JZuT*y+q0A<*SUL37a?})l45qq0AA?An&3{N0@zF7Dk!`(ddjJ4WTrGOQE$G% z)M2%$;<+RgaN~wUG6@WOc*>rhe_Yk&d>#mX(|*0d;d~xxz$KKewR$%jxXw|kf=T+PW zzx)<18yNreq@BlSM`UO2U_Cn^urhkSLAjb_l)Zzc7{B2V)6|_8 z1kK|s0A2-B({02p2UR7(t_Q9djyVK~*>rUMVe(Jjl1vuL^Qw7N^R}$gZ~Oj0$pe}W z84g8y(MKL*nfLh;` zUbJcxUtgh*#37P?Dz$OOTXiskkO~IZTl1hvQbfK&oW7f={Hh>eG}^Ze);r3FI&#HC zA#Nr31*#|T<6fu?M$_TQyFkg(R{T~)BZW_kr8dtnWr<=oI+pNdUc(qCx+mNLkX2R$ zH*zW!*Nj5#XGYv%q?gNqpEN&5iVSt*}RgQR7d z$HI3rXc{gNP08+x-OxeoR0O$H2)8raOaQNLTrlr-`B#~7Qg}TGPdG9ifRKs#mV=+T z-y92j5)E7K6ng0_K{tO(tl1HHpcj0dTMBl5lS78VHlEtKRi-Lnx7XAI+rS#W2-e_= zcIN9;P8}E(i1u$)-AUhimO(<2djx{WS7yP&A1IUCu~hKu^u%~<%(OUJ)v>LJW(tHs z2r1@pvJ;-QC&>~eR}b+=0O2w-un+?r+i}0oaKDe>kw?tEN-2ir=!)3*OqR9@8XghX z^9z(0U0;I|d|y*HikwB&(+ZkLUGqs=JK79WuaBUVhw+IC`#ntiMqVw^KGYPnA8bac zQZBwPd{=!wzZ+XiZ7pmFAFfk%{0SN!F_)SP6sH_aF{>;*&8m0Y$i|7U7VJ;TD=>qG~PDRGbu*|0<5B>jzmk+^##Fp)pcE7 zsv$HU5foF5f`XRbe0Hi+4n8(qWrYjbWTbr8A4(kRB$BKJpaufxolQ1unEHp838W}v zDp7e>?l^{GA!oIrzq8(Cea(WzcH*bZ=qb(S?(6a+*ydlPwV$R7(R2f=GvvTUepoge z0p$X0|CN@nQqDBqSto)G$5Ql*MzNX2WN5thP}WOk(CBnPBL&(@uMjOfVz)Hte6lnTjV|oQV#`iB}yroex^@C&po;7-%n^V5+mYJ36N*8SU z({Y0i=uJpKots#3f8>Td#0Qnc3dm7}MaNoVZPGH@lS=ojvn))m%sC^)gB@Cq2W+Cj zr^2uSjUViU7{S?yqeZO>zbINv8_xVNqM>QtwQltUVTYU7n5@m`FnR~o5N*s271!M` zVpAFkl~dM(d_MB2=Z2&Gy-TA$Pj-X5(3>ippRmP7(S%Eo@&1nQD(%qRoeShz>YDJG zQZIWofGu=~O44b1$FXkHpH@S|r;7^htjXsBmIAz(q%lIf%eR<8J6pGyCEl7~yc8fi z-u~EEmPiQ5!^x~ zAcOvI6_gq4?&JiXt7=+mR}~~Pu9=u><~y1*21D6x$R8$pq^mu@S;dEj_c z@LyWwnm050{L74T>+W(W*E$Cq%#;QS20l5iaaXe|UtiGyh66cmuKZv0`Z91?X#SV0 zG2~qra8kQDe5&cu=zRjLapt1*F|0~&5I?}Um_Y_fnHoF*E+(e`k|K1q?8#b?{Z{_r zgn^Fl1%OEX&Hi7Dkou1n@$<`rUCV?sj9+eA@?!OGQM!_Qy{1~#fpI;ilG!H!wpo9N z_*Nae{qyT2RbMZ>7i!}tT(eh3dceA!>4UZN{VA6b@HKF6?~N!j_`L=2Jw82l_wRKN zw*HB2H5}dV{{9?5(ffQ~m?$N@yiqXc+I^KOHUL>OuXJ2Q09k6938?bvwu%-annB-aI|UO{@UAEC$>1 z2c05QYkxGm0qEZc3ibf9?dterX_&EnaGVcVE*4oR2Dvv!f@IREs;ni${(a|8|6y1# zk`%32(@vmk1QRh& zF-=n0#^lIIOrJg6*mPXtdbciBG&GXXRq6(1tEQ^S;zYITVKM;iX8h9Y;jpKLY|TW5 z&xW)d^$-{1k)F&4G)0=agyrWPOMaC!?iI@Q^c!oGcyv=Eg=j2HLkCadqNa#d?YRE! z(Z%qL(vdzer9|{&E?g^@37OVC;)Q>u!e7c}r2uO)48upGwWsb?gxb{4fh|41ef!cq zH!?op4;ic`NkRbQ_uupqQ!Lp8$CJ@gPseUWyX?RYn~O>KmF;vHbXT&9%Iaq5eCg%( zKTED~hNe|Q>0VDOVA%ViP$F+#Xi3>>Xjjs`vfS`$5IU$uk}Bt)GWd=(fy;d91>!cB zEh#2bhT1_Dor**iWf)ejKAIfdCi8wuUUuV|*d@2b)}eqb++ml43h(g@c_#5`{xuwt zqQ#0ow)E1^%UGjc&|_&i7HFX=Hf7Y%+PO!V*jsYR(Fo-z<)G>HDTPwggy*eBx@Ec> zlkpjqajAI4Y;|zgBO}(}FH~r;WNvrEe^|SCk6Y%a5k9u^#Bo7x{!x#tpfUw;7_bbV zjI7B4x-bCUO3555+5qe<^EK1_%tKc#Nl$3oL-K?4Q&Cg5=%-gSHT)zo3`0=R?$0NU$6WqBk*$N|UW6 zUAUE54L(L_iPkmoB+$*4O!#Y1icw&^85SK~#Gn9!;h8A4&>~%_0=DT@o)-**=1~)h=;Fa8!=}?^C3|@mLH%KYx!T8b6FC$aw;tL%aCWIy z;o1PWgIQFmBkD)52nM@ZTg{!jqLk~(BMUP<^96~8QHW*jnZf{OoWI!wAx;+zH8n{V zs!oJ0v6^9*Enmfl#OV#Rwi>O7| za+u2*S#NFYHss$ORq&ujaoh1*Sjx5#DRpGgRxB3pas4u`DNzd=?f!n{R9K?y&gnC@ z$!@mfB%%nI^C(cq(7gu&vF;pkHiM;hAb?GsDTrPiCY5&&l19&l;WE}Ogeo_juX8{O z+sgXeGSgk5P({FfN9bHgZKmvYj?Z!l4MX_$FXHhdx21l8M5`Ag4^G3*W&%B)xWLaB{GwHAI+)pm z$!U!?!wu^S-Y()Tf6EeVKig|5NIJlbs&a39yE>`NAr^0a=DM)KJk&pa>`Qy>#uy){ zfUe-Bxfnd_nbm`>^{eUm?Q7SV{F0|e8Xast7=&4>hMMy^RZ@H3p?I(QPNc(>f@*YW z(9)Idz?tW|5|Pb98P~e($79XRhN7FU2h(4{tM%G7@|2~y{(Ad@mu$X83`;=wkdKSR zb7S0iiiQc+0YC2u^XkJD%uTLnLyD&L+ghIeHB17pP^-y3x(1q69GBipE`69i#|Im1 zR2X-*{o1fSp}^Ycc~SqgTSN3mu6ZN7Z*X_p!4c9i7%K6Nx{x_*Rr;Yj%4x&s{h@r% zf|IT64_)21reVzMXXfeidkWydd8AsHZmsle>?Ka;m^Gl%-L9}8?Tzgj_soRwX11F$ zW6Vh7(qnr?s|H+Ki*EDI_tfs1I;JO2-l`{tfZ!iM4Dcd(;$8S~;Umh?LH`1w(@FQpO z!Z9j+b;Q8SA977_)efv%-IVF0$82A=(@XG(SLM95WR3d>5Z+7f)sP`Vmb;>O7$DbMH4B6>#x zyi>aQTyoX7Lk&Dz1?<38qSL-K?KxIk!al&8#HUvxMnn@$3SWA|$PBvETNYJk zH)_eQ*2;nTipg(x991& zxV{o0Z8OzQX|u(@&fiMm{k7$5Y?@?3?fSKE@?oyfd$F4A=Vb*s$;1*2LTRMzWX?%6 zf|8qqm(KFZQ~T$xa~l-7F1hveD$9&PF^|&hU?u~?-J3@)J9Ap@|=BcX1^YO{D3^r#&#N?ZtUqVCmTC=slFYVl22n(fkKup zBnPQQ@z&9~8jA5yIKqOTIGZ9`^YdeUmBDLlmT#n`4FL^1&giYhNVE>f1mqp+yXFLu zxK1{-=+eIv2w-fFAwC{CJAJ+t$fk;?Zm+7Fa#n4 zZ|Zk`Lb1zy?_Dzr*1tghnVL?rJ%^cyjD1DqWDpgD8}(gyO{bn~r^nA%I+cZ31ZpNP z{#(U_kprNB+o9MV4K4X2`6>x&p`2xbFsbc`CcMbdQr*EO=2o8a>2Wq%?}>9E@Pz{D z0-esJ5DA_*?uq$p4V$!u2jdwW0AH)Y|FN?9EeFnfyJEp5(zzO#7Lx8+$J}H4&=*c0 zgw1Fk&WLI@N+psu;S}mC=T)geqKnw${ldbV>Hx?Aj_0E2TsSr?s4uy@XW=SFGRKNk zbvDp9OUnvc#mfJ84KbZ!jje2$|9OkF*R~KNvLuB;jvz^=X~0K#A$k%HsYFT#wca%k zo1O4N$>n=esOFk)e=@h&9eYyhC%7&m9+q0*(s>V1*k$p{r+zQZwZX(%Rk~~{R}!1C zivxE0TNZE{)(bU0HsmA4Pflx`Ys<6ARQ3oc`pso|v_-U=??nt*A@1F?^Gt-;VBUWC zr4bP3XVWc(3OJbQCD}?#!Fe|cBw?i^ret7Iw$A>YcZ$>IqWm6E&x!T)lqHCvBbO>6GrrT&3Yrbe{%9i7mOC-LMn%r3DCCn|_5Ue<{oo z3}>LGf{V@D5BF=8Atkql3;d2-K_z$%x$ly7FkCcru1J(w;d4d?c|-2%P(}tJXD{@7 z@&_qDJg5Z2rUgcC@H0A%dZ1Fk%`b4};7ue@+|j9_vA3ZW_d_od4RFfYULnF7 zu5>2QA1m9Pyou(Cv9M(>VaS{iB0W<8<%i zd=BNs)j^=Y7yOg?eZWqx$5>yqI^B^o6x@*`4n;8&Eacsh|ILO2vU}v!Z=8!nIplr- zj=ek%Z8f&Eu9daium}t;fGU@rB zJVv50ZD9hAEAp63VdyXxlvN+l1LAyw-6g3R4MOD-BKu+5uGh4 zMJ&|=-kNae=XJ7xfvl4LWPfq9;#+0jHM2u%1XGk>k0`las5mFHm0nWE(mXRV{NN{u zWi20?==N2q1x0z&l7Bgvs`uB#OB#Q#Yftmd*wVbuC&q(2-cD z0CC7x9Vo2}$PB?iu#AaVpndhHjV|^hFr6j%lod(HwUi&b6GCqvv2CeE-(=7CrlX8X zzwDGdPrP0i%URb4SNwb3cHPHO-;@pu<13w+2CH0GfEMB?rn#iX%q@5hy~UcSm=s4FvMalzQf6oACr-Mj1874`&>F+jd4~bEZ3e)C&z% za>M?xOrUM9N5g`bJ%>}@zBVrAQ1gMfOiaK#AS*Dvt}_JExLj55A=^Z3sGznO0LjC6 zr2ka|Bs(foT`-8_5cPjt!{8LZl3K7iBYXSlCg0G$qpP(a!~D#xn?er&`^e2PC}1J{ z@mALxn0XqFWn8PVPthn7WTwT%w63PwHUkjZ3+uw zOfZ}4l&Q4-ZU?6{eKfm?-j*Jgok=@7e-E?(kbivZLQzM#nNbhMFY&g!1DS8x%OEA0 z9m+RRWO*lh!l@5V(+ZzqPLDyZ*~0c^Fn3D#D;s3hS3z)pjV8TR1IcG!mm+A`aj01m zu53zxyyYKIY(uurWo0WQ2#s7@Kk*sS-PQyr;J<{IY=qIWkJrdjSe>vb>P|=+0uvMh z1RFzCMn#}ErQT+P3oYzD!SjTQmQkTPFJn9hg*JG`F96faj!`;oLq2(%Nbu)Hx^JRa zg(HCSri-E?c!Jv5(!;}6Vnafbi<96vjebTiU$(3V3(U|6p+tdJUZD@L4ZW7nE8sVllWEq8m>S>qA}NBX`Z8(O+KF3W|V>|`oA957@H+!69Z{GxmTfnHq9{azh zhMjf5%$_CKKR|zKQ+5qcHM4sHeo*>LFK8aQ$8`L%8)*G)MHxkw>0LmYNAC!R9gRi! z_G7(KJ5>nnQ2FdefvbNzb6=b=>~efV(e$3KW4eVoeU`WQ*Bo}Nt_S`KtP-2F*|HlO zXQi9N_IQ`OYkB%*Tr0QsWHclm%)h-A`9(@8<^UeVOa~^+7>FP>@*+I^-wKE02*oPX8gyzlZQ&-sCK<$MX}E05iBR z?h}!3^c~;pg%@CE^U6@wOYr@(^K%=n|9Rh1svtN!vh)3PxWkzxK-aX)czdvLv-^{q z%Vj=@BPJGvSeVhPTo-Scg2f();+VmIuJ5n&A|;06Q4E)gcyuYEg?|V3cv<*;v|}#_ zebewNpadNFGlK1p%=9yms19jUqw0j}$0U4IO4vHGdjyzRD0K&-kI)bNJCaH$L?J}@ zi$GPNQStrmsI>Iv>A{{*{~JM};n`BAakkeL_QvOJ;^3nt^%3tTPr9I(S^nZN?>&nE z?J6i#b1!=0Hb>1P8d^BU7>LOCdY=a$Qgi}W}ao!p)&ek?M+3D>8`IlzHz#Xjzj4@4a57<(D!}B_z(~OTA=ARbq!lG zpYQb1JI`&Ni!8U|*=ycs=M?8OzWF}{)(SQxmgNNc3jF~F&x&(mm)In>2YGSuWbQ?( zv$p1P!p8+v^EC8yM8hXd&&>EugJvnnt9`3de?1^4dZ?csJK`&0V|^9$90Mldav&;M z7>wyX>$6#_N=QoynPM+G$ZKrAOTpL%Pw$paEC?s^BLXC$wWsteg zSdP-k2-RMq6R~L&hX;pc<9vDsW{#fflh{>Skyl?091hwbj(GOvo834l$Y4JvnmG_Z zf*8Qm*6SAvBnG9cTf|Ib&~7#cSVdr<-%S}4*k)t-A|Xn`Wp^+oCi0rkc*W6LH{AJF;r0s(UF_-Q4D#tqJ45j}EYug5EE@gBoX ztz4$On3>uwl--#|(RW8lW#u~gY8%9s(kn$rN{{u0OWttIu2u|xbVH6h+ntY^suRGS z81aWB&WF%$a0vb>H-b+*?m|Wud4@LzMbDBq?Q@$*nX~)PgIj7N?qSrC{%oVL;<|Y= zf6`2$CFiUU2lvB;wF81yKy9TGKEpgE;j-;)V*0GD!?9jNAE@LMMCpn*{BP(VsPl3z z{&m|{$zo}K^!Rap8a2yui*6xusBFNgdWKWl^nxxr{p(K$%}=#%vZ>mRo7yyo5Z4r# z<5W{TIbzx-SpE$z97nLztC`Z-(-NmBi$O8lB?+a%OTOL}g>|;EwoXH>FrybfF+xZh zkV2u(oL}*GV2y2^^;Kd{Rtvolm2eYE|39o-?2WWG_}Hf)zkZPay>Z z4mPoKnLNM^hlxX_V!x$iWS@Wtu0-QEdHj{`QO5*d;i-~5S^QDTFoIn^U$Jim@SH({ zC1+mRj3-%m!xJUF6_Q|ZvsJR?j@Q@5BGz&PQPQYR3Bz&JLu6GeLfWlF_G5B zQT$gXqxGV40T8GuX13xa7ZE^1Sy08Wi`b3NFr&>h36pV^xQUpOGRvj8(vTkmfy7KJ zZ*=A={00uA?ZE)beDln@b5sDR3dqJHVzb5x9dVb*DHOWyQEURDa860_{!_j+6c(Dq zQsV-1G@+`rO8jq%UpRxRl7JMZRcS5TzpU%84s2Ajy-c2esIRi$B{%@sCXf!>r3#77 zi!`RFn(f3l11C<0`R-b(rg2$@xkdIsV}tpq42L*Ra6Y7R%?L$uY=hShEKB#-yW$<842vm0ha3}!?cs-AoFQR&U)lF;ngj8>m_ zUcVWoYzJVN9L?E2I7GB0`Zg-woePM(7aJ#*F)ohDnqPoQ2mh#OQf>L>My>0~+EOHT zmJ=%nzR*!E;x*qeLXF;hivL;HyZ!MoKpZl^=nm@#>sS-L)3F2?p@u+5c2LDTqQ+GX zkFwwj!h1UySsxjPoBny{XCqMgQdAeWItvor2-28Xxm9L!1tz1v#5<=&zQm)^EAv6i zE6e*-W1GO5b&GpXm8keC>P>}x1+n5P_4S6PPpT^L$UCym6D`M&)C&^+Lz7bH9lJX2 z{z|$xlWFelm zFwFR}20VZ)CVr4Sp&Wz&tAQnEwtxs$kI?R)IHu@fyaQzf8?A$k8NvMI*^u(U#WDYo z4IBIG@9w6|b04OvX5gJ$E=!C-dX)R@aJW30-?FTC6%qoBUQE-i-SqW%ZD06z#O4vv zb022Fq;3BK@Q;ve;!EA*4v{Xcf+{Kz`fxMt0({v4yO2ak_JL1Xm{85Tk%cE6JjOrU zCBGjj4leJTF~aol8JsV7n4FY!iUWM|lk{hWkZpa(DD#w|?ufMiA++Aya9M|%X5e_o zx)k!Xpw9p=>Gjq;`DB!IZ+p`2_truLHqm;d;g`RA@to*4*tVuLHs?iMrKz_O^_B&S zF)#NM``C4?N?JW8Oow?Wf7yk4pO7E&;%$J3`X0N+3|7P$0W5rz6Crm z_PzK@ayNWPnDk7JE=yB!?aE5H?#9X*$|5LX;28j1mczL;<9u`W^s1%AeZ>p|VPw>E zMgY@PI^(}+!pRlFJcgsm9G#)XokMGlnu~7IfF(pb61!X2CQR7blUMzBX+0Cbn*Q4D zLK^m_Yi1SLx9}sM#MWU^(aH*tP8|v-g+wE^4HlD8yyfFhaar58cdFuCo>IaS`-T~XA3#;yh6(IIeofr zE-^?9NYsfgP3d^=<@8H?0w4ev#@g(1iQO-4OC?tbe4D$ zD<6atUfY8!@Q0d$)H(PyDxvlseQ-|n6`f;4=P)GIvud%s^oQ;C7);kziX{$R)V>^^ zGs6X`@&R9H2_~_z9_H5O4WqqB>R=hVYxB#Lb{wsPkF~$7ju)o!DRD;Rx*qZfv;lyI zVGe>`NTEPz7>`O$mVFXmRAsV%RE;=|A=H-AnUm-p;$qZycV~kf_A`bB+xBO~wiXwM zj~bDX)9I)sx^%F1ZiiYo6{(R?r}eneMYl$$E%sp3lcEM)MotAxSsq5tANVc|i?c{- z667y8>{=N*KQKG0e@7iF&3AkSP%S`0B^jh(QqsxmzB{!5j|1YjA-%8@oglI)g_7a) zn374lPL+|jKC*B|%Ka@&8x_dzO)@suMBgPim-5{?K!DW8u_xOzsYJKQ>2v-ewq1$6 ziby!iMWU7_gN42GIo^#Ky+%|BsI&}Yfrq<}UDh2Z1mD%Du&+->K77ON%oBjS5Or+c zK0|!N-c0Px##}|xUh>%N8;PBnsKJHt7=NM0-&v7L#F9Ne55sa|ZMc{reh>RnHiDPs zE^<}jys1-;j{J`pdB)#$mlSoH9N%d6Xz0Tn-_ECr7(0$q86M#Xb9XFDhKJg;NK$%g z-QO*k7*Vbh!d=XSPn0mGA$eG~uERID>?SO=JthY-j+HJlV< zcyWy-sF&l*r8)cz$AUwi+=N&pbkno-OyNXTo~B( zVdHKp#?$u5$FGmP*1Ig2`$ZcUiV(i(j24TWG}*Sx{I8};#W^2%(Nbs&A=b$XTL;S2 z+|G@x%}cqFKor~Bq90|5UEOoafo4xrWM(3%q~xule9In#C?0jVSx6T*&)H_A_-YF3 z_Ni*`dl?0Lsu{onkOu9uXOMy^QJB%FBhCTP(!^=yjz?1|0GI0}B~$bi-ZwYZ08=Gt z27;UGvtMWK*CFh&Qp$kt6eZol9wOQ}zuSdKN64XX!yP@~!zZBNI(wCoP!|p4XSy9# zm-*N|Q$YmKjjwzJ$EgXluoPjZp^+X{PCg_V5l{{p@&M@0S9039llEDXEC)F{*(cg_ zGO!!MdFr)>+-4fntnS`^Eg(Q@d$1KpK1EONaKauyl+u1eiI1-qcV|Ci;=!4Hql+4J z%1=%Yj69}$@q?Vnu5WJ;v74=3C$9jM{FDo!THF% z7!o*t830N`MvpHUN{UW~+R9}g#%mRgz!BG&J*VHWD!tTemo%PQ9ZxRrp~uk-oU_Rn z_JVs$*e;;FHJq0@TL>OLkdg*O6SI}#-b5hB4E1KyPl1ZlvzJukQ7`IYT%7pTfdM}J zp=p9`1OrC=#SmV;;;aP_;D;?rT53LK8tUlxwEz_@;yt;Vsygi~Wzr4-HEg9qko%3Y z1R8+`YqG&k#F~VdbJ+`*c0;#fGcMBZr75_Q*4*j753+m^Ei`g;^ z?Ab_p%>Yo)0ABgZiwHy*wo>hci$lQ?W2TP9`=68~3{Tlu-5F{P@|U>C-(5{m9ZYL@ z#AE2*3^JvcTGCJGCHv7mb3PF}3576MWdN*!S~|?Ms@f;ue6;pJY-46vSkYt+35mP! z*s?{k%3mjcYW1Y%mjl>++)23PGC`5Tz6TicSlBn!=1(?-Oe0ZOPo`HogK2VQdKcv! z9XWE3dx}{0FmsgOla;Bl0dW}GhSSZ%okOUF+gyR(rv+2&PzTgwu3Nu@h%nCvKLDwW z-^$NWe2pt#jLe01m}<}f_xn=8xsek9Lyc=_X}|5#lf3zNeh=uR{#li2Yp=Fomh_7T zqQjDPEgH_6ld~xC#`WEvZ00mEu?4G}l^A(7Q>pEubV_&Vl^z9Bm{_>+TS58d-6hV- zH6eU7e{%bF7QEI`FiYU?n(+dcVL*yaV-)xu){c#VyRMx$wWU?u(aWpE@gk)@AG9d8dJ1; z^xW1>zIj)MzW(ga?W?*yMc1Ekj3Xn=(Sm|nzg_-@8gM0T-ylr*v{LiP0id1aE)3Sj ztTycUx9N_d!hJgIy>-+I5y(5jn0kN%<)pG^*7k9OAISkp*)h=Y*UfGt_J@DMh_-cl zjc>yA4)vrMpMIzY(VEdkj5hqhP%K>3G%0EKQ0_1Q2a{FFepS<_w~=HRLCB-uD|VS)aL1_4ENmCWM=}uFB9G zPg9)B!+BPFozV4)yF{6Rbx`#PAKI{rFGchH!WiS+9Gq>il%l>=@2nTyB5_o}{LvD*8QTE^6wOPN<=4`7YdkyucX7INpt zlbwxTo!0zYqQkrVQft7yHX^Axz2X@qxR2lJ%#Ie!b=uQuLQ3ICgU3Ho*?Djd`^>J( znn36~&ms_^M;=42)rupBROqjiyTu>hl2f~w=at9Jhq3R=e^Z{GX$_sTp{Q|5)0$(Q zDrsSgvB5wYhjtE=#32DgTgIvP{Ed$2_&XLUmqn{=Lu7G$3g6cRO?EgI5EGq{Pl$yH z?@Y`9(eGpMDEW=+EUjCWgzLuxU}gXB9WuPJ*h9%DJ^cjpBFB+#Nz$T<%W-Z{`aXOt zvLfKA+aDCh=b>1*4diqc_Sx$3Ah$U|99sS=GEx?=PoSj<90$& zulv)V;pcTGs!b2YMM;vnp*M~Z;Qu4ogA|BK&Rqa5fE59rLV_83SwKof|F6Bt3wX1F z6j~}TIbKt@RsCP<(g5nLO6sH0zlG`5i~0_~Jq(aeM|$8;^5a}}@1sb1)ni<8?SF&? zj`t%-BS5^z$MgMBC_|cV_ZQS>-Cv;2Uza44AUl3uZ{J_f9}_Kre#_O|5{z=@UuX{Z zEj_y3fTL%yHYy5m%tgX(8gET{wAE4ehp?S=NC`f!cEtx?wQn%Thh3(5vyJOAVRQ@| zB{VR_3X~|YmfhIOF|jsip3;9KDt+^EAvLzpi0Ns%$H_i@DpQ9&Di z1p{tn(QDRj5)NbrkFR`-8UMFDN7_gm*N^mn>bmkksJ^$~y<;borBF<=XPJFL_MMPD zNp=z?v?`M|B+1}fi|}S8`s>RvuEk$6JWso-rYTezoyTA74ZKUC=4_YWLKp zmZvrur+$frtS4P5_uk)ZUEMwL-^ibaJ=VYfZL4EYUbv$x#`JA+!=XR~ z>S{R8t8QZnzQwm$u8yzEy+2l+?^o)d5Q{t<GGUw(@&4ksx(jLdgkG#vLBkrolW4%67vuaoV`W-X1$-HBO&(?J6l@Avm zIbkksWvq~y2z&ccw@0mNxVgst)ie3@;^IqMci!gu3M2{c{?hlVG*CB&G^u;^=){%y zM-4mIz8L-`)1*_xFS82C8+VBcfy#4(4sTaDJs3ZhU$O<)O?kX}?7q7VO=`!nvqSOY z7drC?pLj}NXlbiScR>=aW{8#$^L?|-9n!Wd%Ra7VD2pB5cV?nOBl_Cm8!V{N{@JFG~{(n%E>!G zc`~>^wIQsre4oie?}ZDK{n}2;D(0pY-V+_~3Vv4^d|n_gxOmg`2M$qpTgiy(#;vAr z{(RA*%6@ZYcs8nq$5_fMl!yvy$7eYouMRzM*^{=?-GtIBS`OW*w|es+`%lPbkHsVP zBGl}bO$;}y>r9QNp_E5o`Hys-F#IsRrUTiYzEaZoV`KM@^W?tU_1koLzfO=}GT%qv zU#vOx?&tZt&cpn=SA86`^Y%{grVWn?tc|!JEI8(xN{VM->wO{}M8DO?2frJ&N=3%4 z&X#3qxk_!YumQ`D#35ul%KQt#SVrabAwO1b;-T0RaucSkClY{ zYYZQ}D2enQ8e#mrckbK0^0$}%h=*1gF@N0qIFb?Ba7Ci_&FGyogCdf(SQ+dpw6@0r}%I(;V#U0>W#uij0;zNjdHW0!v3m*x_! zbcZvY1~<8G5&)h#Y~)1a!Flgk=P zqWB_S;Wnl0y|Dk|o8(28xZd{3?=`(|I>lp@G%nGTHqP%EnrTJW#uXfx>N9#6d&6CQ zY0J`@BnhM3LQ6@D@_40FNhT+m@oN>Hy{i14jcHX{7SNj)i{pN)l)@C3NU16~Z99mo z4qIlFCAy;|6c^85&q(p__VWBHEwpiIx@(1+_UW^!_tc#256QHd9#H8!v*1~n-zOfY z=Sbciz4u%=A@5{~v*+u2(*vyR&j%+b>r9!PYg2k|^hfZQR~0i*w;tmC4bh_V4aNN7A49tod;L>`Nhu(ml3!?lC&KGA%XHyLPckumvJYUIv?$Ng?iDJpA}FTY7j z*)6o(ipKrug&F_2x4}*ABvtx#FJGs#M(48H6;ABSQ89q8s4srBVp>z`pql5Shpxny zI{r&{G$zZZgqdQkJeR;PX5l`ly*j17q`ly}P0iPrJ=bbWwb>H%Yt=+Ps)^}lO?05O?k>MM_uHWD<*BYLHH=XC9_DzYvf{E zY2WCS(8Qljt@js%=neR0{UF@z*PjdK%k}n&;9JBR_x-*3bzC-XPfx6#DpFPMT55Y`U~gCA$$&yFh9NjaK50Kvpyr{<2rqKG zP%^M-t+7azw!vzlRs4>OKIyO(+Pa;-+g@qt$c*V(u6EdDY8Ur0BcYO!Glb`aAt|LZcMXItYTVI$JD@86%=g=eW18Wg?UwF7 zpwBH%zPxhh%jkj8#(hR@Pp+j$BVEGM4Dq&v^GEb8HmAf$-k~)X^tm3;DvREz%W$}} z;GL?4LUHM>i1=}*SQUa;Pk*HOA62bKk3S6@og|p7S0Yad>=V^jt}0lrv$3z5xhS

RKkDr)ABv<=1@pCnpzl zoyk*?c<^9)PuGBwUM%i9;d-r!*wOuFe&ab!ODF2qZLNR0Yv%j_SS4s};eHe49z-tjf-EC3)zCRwgLBlI81~&#fkEl0g{-Kuq+?w@$NnF~( z1MAl^#G)V}2HwhW9FXe(b? zbnndx`;{9M5_YXp?G){oxqQ?={^h_kI~#%Xy_%|it%IEy()pddKjjZMtZh%u*|(%8 zZFfTPGdZe`!z(vO&9jmjXK+jHz;CLJpFX#reyh}%(bV|nTWH8Tfpaefb9)oCw;nMZ z=;_@+9D7)#+ege?_F~7i0q_gt2i8Sbg#}F){(OD*{`Zr&MqaK=KXmWh6Ep6F_X*E# z9U9QdkylYVeWfLNWPedkb?c9`WrUrXH$pP7qN*Q%3+6w2oyE0R5#g6=l0y$tZ?^2NSkoEhB`ur_ZEqDt@-q^YR~!s` zu-azIqafst$Ja}x{jZ&@Y+x_<~#M_1$8k4Ta z79@V_(#XdvNtfzgFHBTSw6QEsjr%&8ET?wQLcz?MD3z4Vkg8KnZW`R?KR%da?zrTu zQ8xBiir#~)po}+S5vxcY_2Z{?7%;Nrwsw zR2^-&iUmLYIPvsr#^c=j>nZ~yj>M5=<1IEaacfi^LeD*CaHj?ArZD<2@iuu2(^4*w z))%X8s#iFYeJ_!5w{&Rq)-}I4>k@&ZUiV$b*M#t%D-|aOrl0zvE$(CNo&7Sv-!bJ! zV*9|Q(K{GV*P79zb~kzw3rDXKN4vU}g-pGr#olw0X78jJ^-27biM?o5oIX~k%emKfj!3N)98S1ga zR>0rNf8`7M5{+9Fll#f4i`H7*3YQ(r;7?BUmyZovJb{mi3|!`;N4qLu|e;6%6-0f$MM(p=DFRMD$X$5EqHAxXL?zmrJau1{Y=t< zP4~Dq1RIJ6W%T8(v)!rnNa?~@u%?ve9%a#Q2YcR@$S56&oFtv0mox5BL?iKAV=Z3? zn#NALWD9+&F?;ddEj@E@u42$S{?$tgHpH48uzt@UPu@@BmlmltjvJMCc75btGAjSX zneZ#H@!;vR8H>8gIw%!MUiobGOjJ2j zF{H`4HL>>P+um9NCCprRyQfXKLg$#Z;`*FsS_c#_J!kQ z^w_;y0y#2G5ueu%~ zD==|JZJP=tF*^G3eu$^v$?Phb1%vXgOc7`7V5ZE9<-yqQ(9_>4w2LQ?4m<^Z_R>7_ zLoffiSBAGU<#euH4{F~={A73GfC@Js@6utCX-bERPRy=>M)N$Ld-b@J<8-o*#-t0;LaSS;ufd@y+MrTe3dx}JiY!D?8g+bgK&i{ETFD(2D} zH58^Fdi`nY@FVf*=MTQ!)x5-mbQlSF2jy*z9@V=R_Haf1;2fK!Q z?_IWADl4gN6B#lQT6Eke@jw@)HRq}8R`(iW!(?LmDdK~Y1n<(P+B!F$c3Uk7b#G~& z%w9G;%_n)_c9z{(qex@CR&!U(?k>+VcaK;--PID{$mT#x+omdKnP)w#^r8+~ig&(E z2w%L)jem4mvyExzYM0m3qA%23HKsQN_9*S0(;?3C~ z*fk9PBcQgZ#WJvM_2=moCrh?}saZ{YB>dbz&Bgkp_N5{rEFD)FoGXxk3kL*zjwi);SMn-`5Pw>NjfbSf%Od z&t^prIqv@a_C+?%^ZJBP9M=znKntE-QkPG+CwTez zs|onI_dK@6=sr^=^#mzfS}=A9oT(qFTPj{9l%@1?82{xB^NUr%Mq7~wd*xI42AkJt z9pjIIe#h&b<@=T*efell-1lvJz8~Ck&~)oH+J5oun=QZa7YDV!GDG$K%FmcoI9?W6 z`h(x=CCxk4dgQ~VK-sp_nP+Vjo~RqpV{9usx5zcT=uo<99U<}EWL+k1{Q|}dX)M#s zoitzoiALyHb$Ya|!(SSax~9H#NK{0ZDYQN+QL~Zc(Tw4C_-Pg6d~0xN++nRna@W;^ zF-uRlVT7`p=&hx_!Y)ZyI-#YsQL*zi$jhQsvxU~!_V9(qB`SXe7r5O_Fm)>3{`egI z0Hgk9hNOVu>DII7@|2clKws(@)p1ErR<)2$JZ&Uf>ML8TR`PVhxa@QFlP`|0Fb}33 z3FwflPHJ{)?khfd^to~4+ZD<#j&<4|A6ieJac`+-G%I=^L`WE=bK6R%%~_-8EHrO zMLsvyU;CW8s&ILcJ>SZJ2hlj9*x3|~n1i2;X|I~qHo8W;>EA)*q_>>XYuWF#FKFEo zIV(pU)$+~G35U*HD*fVV|Lv42_mj^4?Z%bCpBilj1cpjK4RF_KC}7pSoII}XfgTID z!e&i|zk{D%`f6XKePxr}dn;I)vC;TpneA)Ei@(b9PfjF5SFYDlJRkoE-?+y7J|WwE zv;VQcBg-dstnlt}>#y==Tc@>$1)VocO*|#?WzXqlUZA;8Z`Wu>0U{jq0P(i-mxwuFm!D^-3YUU-vhhrn*ZTJ0TT;MCl)U|g?YA6|5$BLz{!RS z_FErF-E|8)jw?b=&=%Y<>3+{(tYN%Z)$}^+juG*I^?EY}{Kz|>nmYcQjYS)G2TLS~ zg`GHDlg7)_>)LnV_0*Ry4=QaIjON^OtyqLR|FXsahc$}9hOh2)#o8wjtHO0_qTQeW zQtK@~;@Y?}oS0IP!&n!f+_0|AZ{=358t%nU!_6PK1}8tQGv29@l5j=x@CMWU%8cU) zaR)>Tt|a7pU!&Z{1qsSM_!!d>x+KCrc6|-so!twz?f2`tseK~hY2o&WBjY=g#;YUw z@9u8$K6tsYEp7dA@B?{R-wjU7^u0u|3!h}GxtKefh@>nP+%3_(%iiSB$g5BZ{^nou z4vX#&7c^J+e7fPzWp{Kt->#rRq188Ea;;#fO-H6gCbur&<9@m(sJ!*(9t+IL(mn6` z3?ZZb0$V;X+p|hV_oJPh%5a!ruN5ZlN~QBDEzZ7?9T2qtxGq{ED_O^7D#VhUp=ZAl;(OHj(VAQrA;~nAKG^{v@|K_e% z)Tkuk)sZcXdl)h?ARKFdfBi4Q?!f4vqxm$zRq^nR^T!VO z*(LP*yw)T%QH<^+X1%2Rk*g8=bnEn+8hgblhMsvud4GG8@iilNDrAYxe@qe2jzv~} zKkFN;@x8s>+2dxR<7V#9o}-_xAB-;@aek$tx#(Ap{lf&0;y$}wmHcMCPmdMc@O@=9 z@#gxg3-8KpWEUg^-(AZuyR%!B%P8WdYJHkcMc1d6$8v?Q#%=}vX<^3lzG->fWoadakc>-lgK3@l@CS=!;x{4;C+6npUIFuFzE26EZy#gy`mZD* z>sRmUs!-AJSN{VCg$W<8w#iR;(3&UzSZ|zuhWg4MD{WzIz|8c0x}+{`LJ{$B&~X!< z-0~&2OnS|+Lko?Eh}N>!$JTw6__EIFyIe=$#4oQO?Xhv&c5VF8&fI8x%z3X_N8loQ zs@{PCuQiNg+>f@}9&>g`E3<1181~;}98Uew?s%y5!Og_tFUc}LZ$e*kZ+IFWGtFB5 zLrDMl+L~9)RNb{lgVaaac|fk2)8OCrwxC6i|VQYf>3sd(lSq!Z!kgGDuGNCH}Bcr1yGehMxL zSPJg%B@s)*|Ggw(@i-E5A}0c&Dqv)phjRrWd~IH?GKBD{U`Ik#r# zlIwBqQ7)2u_Xio4Km5T6JChW^$K+o@6uYoA)!8?8=W(S<_-jS`8UY->>rz8bRix(| zLotn=?WcQVk5$6WPHNI>{K1W>{d=!B*scEaPS^ewF;O{`_=GocSHDc#r}K-a-aA>* z-0b_0JiXz2E2Qb^2>dwk#-=RAa_07OS-=xMzfj=wS+AItf{E z3btnCofzqkohrJ&dKI_6K_u4Z*)Imp#v~&C<<)ISvWM*v=ZKh{$JZK*mTTQKwp97( zekf;arXQCO=(Q)-+OcC1C+mqO@TsM-OlcEQhjU36s&_+ zjz6z>7I=F_QCeM1+Uf}L0hio;-!5EjdRJ|_kKf*P$Zr=$gp1y_JM$2qX&2Hry42^tl-X`m8bni8vyb_*Y4RM7$xE_*YGV zL;@a5oUsD|E%3$^OPnzR^bMJeCC*sF%o`e(_*dcJ6L5pSh7d@k7-NYu280fynqY}E z)}n4SI+i%cf*~DCnq$Gx3`?40!H9+>&9Pu)gq^iR^mfMJlNlEDzzaQ4zttyLfIw|{ zvj_@>U;+&};s_N$1ZK_G2QU$bToQ-&GKU9M8hj+6MKCk^y=p_j5=aVSz8z9&c(oMj zgP2htR2l}QA$gdoF|5*nUk)k4%=^Pah{pi5tRY$sS_CoAe^zN|z(RLnh=|naK*|l3 zE1|^@Qv3y0Y3NsgtRUw8Nfl%@1rkR5lpt>=_jeU!Li$$gij|>wrq6GchBYdX zEW|9BRzaAm&>mzV3<)7o!WduVVJ0NPwE0MZNfe+l37AAhRkl344{~8%`ADlZ8@bQT zAhCoWJsVHV#zES5@=>vGE0U8Ivq=9$&g@3#FANRBp8D?EHx60DOjM^8NUrA znBcKwmKq7B1T2}QMuM~%7&pg)nGu#e!vd-WLJTXvLsz*6T}Fa-CWW5Jk?rOmJ~qfJ{m2-o0@2L`Soued0)Uv$?opOkCCYkc-l z&J5t~cJI_oC5&p_PcJQ7sOJ)*Y_0ht8Y(r+VIupmkSvn#2xTC#D}gHh>Apn)=tGwT zJ%sINjxtCT4@-3Q%`AenQl`zC*s-va&lVL*~4eb~qnA<||L>9@Dmy;x;@! z?WVEy4zGzBBz&B=+0HBZeVTb*?KklwK78AFGlR0b7gA4mcpgyQ`N=vtc>N}o!;Gz; z&QvrhFI=gT*-}UhT^q%-mEs-m{obJ*|5#mby4h{&^b`=#-ttzR$eUhKkNI;?MXIr=iPP8HH_c%}m>VGv<8 zNRXL2xvW7^A4-8?D!!q{5IO+CR00AnlpDN_As6tDh+Hs%;$SKX(KCe-U@95?yBz5= zg@TaN6yP{6W>7Rtr6NzkU!+=xrQU<)z_owtfwwjm(08QT5E4Y}Euk=kW(+2s4uhqT zUIM%h(X)ccFpYplH)L7_mTD-mg1mSbG?onml)$oJJh)~VFi1l*mi>Z^LSvaP(F_X` z$*f(2xJYOD5AYZ|%YA_F(OKRD9E{HL9boqKzsoWLfg8|S3mY0?>8yo~C|KZCvkMp* zV(F}fjmcOLR%hv^MgY&SRyH*SZ&*v4nt?a0waw_@&CKGccd;@S4gDCYGlUcyv>YHW z2+=h~0~>H2kgtzC1_IsE3$n>f=(I9}#1K(uXgT<3?Eork9t=n{&QLt!WDYz>)CCfN z5d{lyY2*fC8W`3g2r^TQJ7fVdVF<}K&mXQ;Blz* zS$e?}Ot5%7OEGu=a`1STX7EHqWMVBOjs$u`s{mO$uAhB%DzOxi-KeIT{}NZ!L2 zJUrbC5@O1|--zt-htz7z-s?ihqX0;iDfl4_5!eJSNO*43~u(|36Df!t82 zAvYA3!XW7{U_4@73Jc1T37X(+D;=O85`m&gCF6im;3*_cI(QN3$j}RT6;fCVhd>16 zvOK0>De!|CJ9EQh3HZMT>qfv5nD{pm;A?Ui1+)JE>pA32#uEP8Fv!<)Ae;)s^Y<2J zG;llC5IR8m$WK0uE&${FFoA;XEQfI@#yJ2f!z5rv<#0ZR4x$0Tt8?S(Xa8;h96YW`n9Uogr^zbwdt({FhKH@u` zs*4zm8?4&DsvHj@AwO%;Rqe zfmh4??C;uk4kkUUseK>kb>Y$O1m1goUnO(aU3JzEmT&e@<|bWTf8Ad`kpK1Sg;Tfu zPwenYwdNL=*%Qm2zxXiA`O#V#EI_~`)iRh>$op3?7HND9tIiae zNSYA_l$)<%VMg=FKUa&jFaA31Rf$%?ZCBb&G|wM#c1+wUU96|mk-pwyi=@cstoy5U zgXFi``UP;yL{|ns_s=GuG&@OMY%2f9eV{RsG~{bFYD-E zs_mf^uJ^XN44RIVs#w1CEwetW8)K~d;ZT2JH{aeJYLil=8%f$7-Bvr=*XHQ>PyQa{ zS%v)O`D(lGWXT)ub+^^D=6|8OXBg&1WZuFvfCt%M2_FTq^y)Go&U76N7elo9!K!e{ zro!Bj9Xkzl%g+b;U*%1b6p(PA7CD=uvO;KucSf#9N{lnJcw2i&O=xjXL6Gkv={O?M z1?4}iEO+mOQLom{J**Dc$jfP$mv_^8^aeiUt?CtemDE{$DMy~ zxSIbmQZm-APE%250NZ{>obYorBHsbH=_iW~rM_3idcxH1prRE=io;+96en3B$!}o` zP`sjLJ26`nmTtiG!)q~Ge`7Z&+JTY&E4rzs;N6Vf{TEsQ=jjEtRKux3m>&Yc9C#8H z`S>2*j4WCTjBNi9{1ACJ3@bP6`~Yu(klc^JaY3!S99j4gPDLOXBQe`Z0A-RWlJ^#- zX1rYjgABzgNs%i=_+i7TQTQ_kH&?zB09+!>)$asjWAJXq-Pt%pt7oKg5MG4ve1^Y6 z^f~T=k-V?43KBg8cOc?Z0KK$+g&kp_Kr|O2tEb>@@F-|vTsT|)3uvJZOG2PNAVAbd zwFxAa3zGgh=Dd73mnG-tLkgl$4lBSce~kM9{L%Rr5S5oMrjY5GKi_`twU5>}V`V$L zYaPa+bT&~#BDE?(4vaL_vR#JW3txMPLxw>6HNb-7&+&-yGNl6zx`at5gySWdPk zV!bGfkgM&mE%*BO__1Sqe%m+Re!~Kcf`l!~ zmJP4CCyJgP%>qrTI;>OBZ$_9a$cKcnjNDw0_qtBTeyg&d92X1qxphWza5qRqH$?K< z1TgkyPY+iLYYy-qUA1U)941~%j4t@=)#EGEjj7=O0GhCTVe%DHzd2;~DS89{`Y^SV z>F4#r@`4JPJN7Rra=!B8L1;MU`NFNVR~2@V?R%hdSS=ZWrhtwB$eJ;%bx6b?*c76o zMh-AW1Aup@Agej>27n!5uAzYfJHlK?0|j=3xt0bB>#DxKU0t_YwQQ*NSHw1HGnjoZ?2M__S&IDHWh6ghU8W^at zNwZobsIjp+BWQLYQ~zZLo&Nx_ximojmnSIyL0_PC@E4h)$$?Bm6wBe2bEap&kHLa| zhc`c92J}P=VbV;;Q6E*RFyq{-9e>-RzZP`NB!sJ`K%t4|SYmDCGw!?_T!GL4({Zx{ zZ)nRfoPDamb=Ph6OUqzO%)w}0Sqv$rb6lnT^kQ0Fz^#aC=$b z_TXYGpG$5231v3t3M{4FzF0e3BX_Rvx6Po?C!i>5@PlEs1SDr{?SgkD!~}p`r9J>L+1)odL*f;4`x{m6UGS6 zddN)T0;$#bUoIF>kIc_~9=g+TE*HcLoSEirNO>Q>BHkb)*&_tw7&#*ai0V?9 z7+|_+KM(1Y!o(pzWx$iP$z%MOn{zaj39MEa>PW0k80tu@MwlBBOQ6m|F2-D(M#s)p zL10T^&dZ4Zdk z9P&a2gGDNq0~-gu|3a8fLovJovnXGXQRvrIKRR|^{I-=V(XJW1}6>IEU##s>;wA~4>bO~d)9VNu?Y#O zRX-fRr@#6AuO^47_cqEuwG)$G-fg0d+%D?BW5JjT+pze3blfk2bmMHtt1>a^YU^jA znTs4oZ5BLkkp*byH60^@C;{)3sy483rE86+uiblrCxb`wx5tx5Cmrxbj~*gHLYS2+ za;~(d^RCOXTTf8AKzcN$U#rU1w0`Al|6}jmzQDg;s8-M3USI{dJsOTMco4AAN`{eU zEzoKpYVvJgio2lHPv-ZSMtk3ioIf_wY z<^Iau1AL3Ne*MDQ4ry-}IQwI**zSPBSk{u5M0iwM|1o#TMI5hx{b=orZp{N4cjB6i$||XMbJZGV`MoHQ5kh-j zvk(xE$en^s83sa8W3K({o;~**YmrS7`S7xqFpe?dZH>P-&3jP3+un#zAy>t%ZCQzP z&~0_{DQxh<#f}=H9qE?aXfp zYTB&A&F~AFa%iq8htaKmxgr z7%Ku>sFuZW&mrIELw}LWxn7KkU;-NLZb^3V8(RolDuvhZf81wdudH`vGvTZQ=G~Lr zvB+1wV9zQqclEPzN7HMX&u{E+CGzf&`$oxloHVvk``6tIW#@dqXmtmw*$J)f=WScO zfsdrERljiiN%AL)^>=mM8gX4N$4BmnW@)6q72aJ$dHj5wC;fw(->jN?lrcAFtDQN> z(#DxQT!3G-_Fq*HyYP&s{O0um_>`jZ2gkb^dE66wPkv63BOQ@gyg;So{a~e}W>!kf zssz)q>=A#3z$2IUefa+RKIX~lUArDp9QQ3axb4!BmXcN82W|c7uemnfwiwR1fQemj z2!6V+R7>vm!su(?Z2>Ot5Ok!eVP`mOIIN1XSC&)eXf3rrwx&4m%r*JovSv_C~c z2sDf?qFW9MRe}O$CE8^{N@MLk0>HEznMFg`<+_I?R|<4{X$oYE)3zyN4fbh|>msj~UfZEDX= zFQ)qKyNQ;1L%Q~l(GAa4WZuNj<<#)4n@8T%N|fDrsC3NU-2K#G8od&H?eWs$pD&BH zZMkfC(b09Nd>6FHUG?s)ZVqVwjVNGC3c-T7hXxfCR+fGr+A)_gMK#3L>MrRomFN&! zu~%$q0FM>EI`~eIxV;85h`Ew-MqA63Ry8h6%=sCUEAjkmi5kKGn0U2A@`}fIFWMMn zk2jaePH&Sc8kIW08zR`rYlc^+rtgeY3`%xtSyE5`9vFK*a5N_l*KjA~)gYzEU{Xj% zePtwf+F9kF6{;<>j8b%GV^ab0OBZ9zxC^RMAX3+l>nxfMpRk-M^AvkQnTMu!mPli~ zmB#6&i`%%j?_OKCE8BkYO-hv5z^Ve*sr~$&sTL-&ep7hl_7YH|CdU^=uhm+)>)zBW z$HUW+vPDDu2b4ork$8<nC~KC8_S`ORt5yE??bq z;B0HhIK2Oi+S*xG->v@3DuxU;_|P!Q!s?~Z{oano?y}$f;11|rxr?gR{h1M}cMew~ z6?*3fw6B(298`?0QK(z0Ds_JeO=DbGtXw}em;3ow>oq=Vde5sD9V-3ObkrORSoJ+T zy-l`g1eK~){rHUjCCj}b1Jr}TVg%m2R_6{mJwsuf4XwR2@MYl1Z`g3F83 zgVg!gq-yUR5G;_~b$~xs`DS~{$EgY>yu{^@XF=k_@82CYFG`>Ic}o_+FoPFFVG%di zpl_**t33^So*eSJeWLF!?xb6FTJ@Lv3v2l!%G$lZy?HMv?AP47CvHPR+*^K|6>7_8 zlm*ei2C@o*>w%9U=|&hs6gBc8`pTF^jG6NYxKp4L;~n6oM7;2f+vN~{&!+u9`c3jX z4IS3&vbr5~VDuqVHI=?dh>wA@!dS|l+}kLs<@xp4|HK}&sTWWwA~bQEngMKnyrUxX z$8ELSVrMiWZY_(y;5__ZzoACN@3P?~$74f5yP%!-)J)KJ2l8YX76GR@;+X zij77?bJRQ<2{%&}uxRM|IMBl(qMKFdYmON}L(dzq(MSaLCnn)Y91Q758S$g?pL=2oj>@qx z1xLrTKQRS73A@cu@PNv`FrJPcqM6SOfrOsBn4`_ep8|}5V=|F~wxZ_-q8Ah*okN5a z5(V9+oIj3?L$|xwXhbwPv(d;5P7Z-XaBxkgq5HJ+CsPQVPfQ`A#}(#}11jN=AdQOF zX5hv6kN9bH4kM({XSVU?-w^CZvEPu2BcfXi^TtteG!AB{peCF*4wXNZfI~~u`IAA{ zo`Xijqey%HIN&7gUP1+a%x=_F5*}^Z&Yw#n5*Qqd083)mJC#D_ydi~(?&rM`7}J9!#(K$9nG!5_?z?T1dw6aClhfTPXZ!3 zhb_>Fq`%SZ--FPJWR64x_W04Z_B;j|bkfYxkap5o2)P1KgJ59v}g8NSqG*bpHAPPoeb#`tO{80FBPB zL2!Bo4-TH9!~UfOKqRu;CpcLHVg~ySL7S3|K_FQ?y0?qISaA;IZX!Eg!Q&{T`OXK- z5)i5Ehzii?Xis(i4MG0kScHa0Pa(}8M+eJr(12&K(ZCTG3WLoS0e}R0WgiGOvpFUM zV973OJV<36a{)x-7)K(aM|;uNTv`C@64-44(8%aHMD#T$RuHh!Q@w07I*nr-Ko=b2 zK>nB?s~Er(Xe4$}gvW#2Gk+isPe6yVElj|1q!eITRL*e(JUfua696Iw&p-c32m~5C z7zU?AaqO`L(9omPY;%cJ^uX9W$$%n(!)8E>dQO6@w15Zah0yKY`IE^rN6Xk~M0SV( zsw*;j+H?LmDuu%%2vj!E=Zo`!1RQES60MDVLoxS-3 z0Z?K$LL!aGUTNWhtl04dpQ^gPS_MMxZZ2C&DGf$^X?WKVj4M&ytx=rjEX zYBFXjaWD|nU+fE0sT}c}gaT0ZaWpcAE0X9`jt~qETj|{{9yQ0vh-w0B}E4{&V7|fJnnGevo+Cu_lPrG)`uK-?7^f zXdZ9`Gk_M+ldmil&*&dd1+@)(#sa4n*;@d3DtTtVo;4Z$3uqv6{k!1|jRs&n`-9Lp zVI?qW29;xB6fgdV!`U0sDeM3lbe%v!$F?vX$I(&%W=>&8a)3sk4SL}D=fqD3buRmp zpfwQ3A^<|NM>skNj~p}-h)C=Vl4$ch43$6NI76eOJJ;xoWgno5 z0eOdmMr2RDU>upfUqt|5l)c;pG!hLwKieXpC`$0iwod}w$!{D;Cs_TB@afp#tXWFkj>06;qE3A2v_hR!jU ejGs&8NX$lz1ZZs|8!;*Z6f%V-pr~kSXZC;e^Sjmn delta 52379 zcmY&fQ*bT}jIC|kwr$(CacjF@-QL=^ZQHuFZQI8F`?foihs-1|IWw8)BxDkPq7Ob^ zhk=!ug^7ikiq`GMM-SyC+)4S_e z0RQ(jV%l#%Yj@Edz{6v@h4-g*3&T~E@#fdC*C33s@Au*9n6~5Vp^x&l+aeavMh{X@ z@EiN*_hlICIyyDPkYblg8UJaRAsWu_0VA{5wrcc;i4C-I` zbJ{ZGA1x0xn`v>YH~suDud%-gski{pvTt|@2-ykGjL=ih;&dg_lDR;WO9!MM2w>lY zIp?zs$0d;4=6P2lhIz!g^vnmW1<5)jxv>~3HY%5FmX$+vq+8#H6!i2@0jW_ ze#wWPBk%k+f?QyL*8+Vz=SYk?bo&pZzTqA!1vyiVuT2%>?v$92VIWOBySfxwDS1+4#9XbfE(MTSl&+m+VK`*y`CpWJl037~X}@Sh;kUc!PNS&=jcW+nb`u^n99mR~xx9)}fGTIrdy4rbrB|DKVaW za)c66Kr~+FbGGxi4MfQupOvYv0H?^d_CSPrpz!>TkwW7P(+Qm_4G07SXhHR`G$nSX zHZ^SG`Lxj;3WSib=XzA3VBkNRM|epA8ZQWg+ym}g^h8& z%VpqP@Nfk$&lh?e51#YDK|R_T5*rCjrUFq{z@&^Qx(b(PDpwpP+I)A|rJgm(MD&IA zY-Q6Pd`*V6X2)T2f)@=PdqB$I5aDQ+sScXx@~UbDpK}BrK~NnP_jn33Nm&&is7sCnCiO` zfMRMMA%jIe&#rLubc9lXjM$nwJ!C;58&6whSgs)^GztdWXgNNaF^$2_KgSHex7bYj!hePVJ{cm9?P?`-xs@c=3+w%(q$IRH7}8n_yP-4 zdlf$myEta>*BCj6NM0=UO^PJxU3==M>H)Y|k6v#NsNHc%i?C`&4*|4remP%h;6~8JR1NgW_3f z#LIw_LTj$)zuYLNBps_x#KXjjcbf{`I+SX-0xG2yJ{gb*J2*HJhh#REAu$mzWsCD~ zYtc^BH(nZsre7O2xHf@k*a6Q2aC{}OLvVri0#hMN zH0e8bq$+c1OHnQ2l1aceSB^1o!eoGQ7YmbS==k1-@iiLcLjMLl#pOtu!P-A7QO`inSZZO((6v^v5D?T*kpI!6AV)Q! z0BI7hFErcBy-z%(C#p@>{j30R1Lj~|VqLydVapeCi#?W`y~Q5IWT zF2JRSk%j`wy$tIFGzzvmi9;XHQ^NWDMQ#Va3_hyzGqNk4I7rgMc|>*9lATLX=rBR}{KU<$SLFqK;U_Bb_f*k<&CF zf|kq86>6%zsHkoeaIaSt5#Lp4BouN9!Um7A_)eo66kTeDWOeSA_~90uZM(~qKY2%! zR9?|~0RBFQkGu%d?Y{n60Oik+2IreIu{O$f1bpu~q32~mieV|?3a`u!yRHn!ld#+o z+UzowYb(!d%E(f+Omo~cE2+Rz%)4+UJj%9oy2^>sgx?+n@bt+jzb&FV2TpJRBB@LD zcjD+qulK-j5s2Bct(%d}*+xJTaG`JMiIU8Dnkv|G%6W?G#L4n$*)}m7;mx!qb4-st zpx*BCS3_CM4lx^#P~gl7o!W*>&1qwBsF|yWQGbN!D-XRcX77rAUKecGVF*vPJa@!O zxM}-LpP14I^yMDWXNexUc1nuTD-3xfU8}>)ZeHFRU``^T@|VW|u|&A?+fjz4!O&Et z)v2pAah7r;8C+KiwD*C%zkjzm3_)9Z)c+ti*S6S1eXt zQC8h`Hk%|??j%8*T!JxYShajgbMpjnzc@3LZJ+4@hW-g)_pwpB!mh$d}_p+WAx85eR*o^52xouGsI>Un<7Y5jt32Nypi) zNzx4iIx11+6kuR{J57z7bSKdRxTUGGuH0!OuJN!n*Df*6wpv&1o@YvsoewaCp{NT) zioS?;w%8A_XaN~~MOZR}mNJPou9sCEQVZxeL*7bfi67WHk1IC4E%H4gVPB5`@XE_@b8R~YTS2^yn7{}ZZR12&*-i_m`1@QcH1hQy|xTa_2q?CD*sTa0hbWm7-?;)}}e{MTbwTOAHO z4mlgYCl@lkisItc~UGUsvY(h(J z^s6b#+JQ{to)m>tBk~md`UnpEt*s7Hfw8zg-=2KhL*rsg3KlfEa=elfpXzvodE5r_r3g_85-UA|&+3`{() z&I&d>v?}kvvZ+|#lu4!ur1Y9mh3%~ZwEPiU%q3bho zLkCX)`(#L#4VWCu*k)&uNjx<`k7q{%`wH2aRR4fK8Y*ZF8;tg@SLs|Rx%#jqmK}dy zp5bhDxKk5$Y!1)6GxO?M)vLuoBgj+7f{Emg3b<3toElD2*`O9BO!a6|ll5dHekX+H zN|Cv}`{gfJOFKVdtM{$7^5#Gt2L>>}!zJbO8oW1# zu3E}W!%h@wvCnh?uP0$8o6Vt$aSacX@C`-X*vYE%QDWJ>@1OMaa?EMtW{_B4fULUx zFS@;>-QFOvFR~DuRrOg>=F=n=8hsxLqTIQ+J-B8&J^n&^@;Ke8N!XG_M<8)AA=aJo z+X5m0u8fPIH^j)^iYgi)NVv{I{yFZBbEAC#vUwTW0&(VOOyBfbsN=aK*)nAoq!aOA zDNKJ!T2K_UAx>pcENxR39KBXM8?=1tHQNXZj+m^ku_0mU*72)fEtRJM-8n$zP;9K~ zv}+)$*#(#?IIRKz${cdr&96R2{&3=drh?yx;Gk8zfP9V zd$gVda&9(v;klui(qt%KR>N9rnbRiSImyDSrVM<(_pS*&H<|Jvd)?}Bh4uHz8p2=u zyl(p%->G*_yPW~#Ro_+#Jy~uox024G3;T!ekDcLZw3_?*NW;QHySvm|bGJ)~1Ly|)&LyH11onNyf1XKm5|17@52)B~o(kJ;n z5ctmZUh%W-HNE<30YROAnuB2D_OfCL-0zMKAjrt?Gh6^7UzXxaPHUSO0hDQNzaOok zCOM|TL0wkA%iDr_W|-$y-?i!+lF%Je2;VySkYq)tq#dq}w>5^Z_q+QU1t0i4C#j)P_F5Y%^OSDv<@YGM7UAQcAU4hM7Zg5&>|7XP(=g^vyj z>m=T73)B=zw_iK3@{L$u5LIy-f_VU@J4auRu%Y^MWu8cKETNL4mg9o0=Y7;ZmKApkXm2zR?06l3$pxZ*=bDzuZ8EzMV z$3lf&SWG?`H^It%mV1WoQBFdSS@SeeZgk%lcGxPf%0|gpQ*2;O4A$cq zw>)ILu}#O+G)czMQj8bo#1(7vXcc|41cZk$%Pd@r9zXsArN)M#H5}nk^!RG3HV||s z%T?qYxn1&2%ga`Cd>{bgklRzq^3Mh!!ehdS%_bZ!5S2fxrJ?{6*jGUDdV-;BMz~>K zjSvOeNk0B5+f>X0UdMS~bn?6$zf{_Bnjj;7p{MEC84vPbh+3oSHwaE5bTrl`3D0pV zM!XziHMbF5WJau0!o#=G>L_%2a)phlY;6oGkcAFKt({hUXQ54#+;eRV;%$IgfP-lw z0#W6!<7}BGiOoo&Llemf-V##8Qy2qO7#7@qJQrTb==LHmS{%e7ds#5!fRsWl~7G3Hg@`p~3C-+AZ@2S^vnhfyrRA7|JiA)KZgoQUf|K2sd}q z4LTdF6&u0C4aJ)N+tqy%Jq9^IQ)KJ*NMyh4^LiD~`~8fV1{h*>9k}c0usAFAzz)2P zfI&0D@?{46ehrO$KU^w0>BPs4MZj4%?D9O4}*v% zM7a~j)i+xOGM+=;Y3NJK1@*J-Vk~`r&nrVcG+r=)yv}l?vhi z_ElR(CLHr-QF;)faIh>Y3L*`SUjN%HLv$s!9oX1%e}5pYj#7uyLLFvE&x{i6^HV$~ zWHkH%_c*+;?;w(a*0`HW1L*ah=6(AWcPlbP{;3$U)3kROaurrFrEck)5#S6Xjv^5& zb3*$QNBZ}q8_$aWpSKx6>^jDm7=?_yq>paxg)5A>Afw;&A$lnY8AEE`@txp|C8i_H zT#)qa=S$EvSh3Yg0o$VUs0o8@Bv4VIT_~FG@9ZcK{<0!j>pHM3TBIY%m*4Fa;t+RI zMD<#AxRdQ4oTb2e-}U0*lBbA2A^J7;5n9x<8b)tzUsiHf7J z4J+~wgqA#vDvEHnrkb)*0J5GF{*#>V8?F@|h=!=j=p?wb+=4jGU*%#)54pi{AW5}D zl|gH>P9-qVMHrd4AS_Vcd2-Qyhy}F=@4{y0d4?>UIx}iC{l_G#ek6&N;=xRBcoGn@ zK|L0w3G`I*L6~f<$x)WYQ5FwMOs{-+g4nu3p%Y<#%b8~yuVz))!AY&1;6gzn;+^!} zTAgLsg8_v4%=Asd<@mtRt3TFY?P7s{lC|zEG;wt?rTOxgX%``}w1INBAo!5llghD3 zQ4n?I)l`OwWcCWX`2yzknFIuqwz2Rq_u756O&>g35)ss?T$cPs`n+IUZ`AOUkg%md z{PmJ_u*E3N%gamP(fh6?dn3QcJ&hJi9NF3{^Db-O2WnKZnm&25i*@?lj~$anu_*w& zmFqwpL{8q)g0?Gt?TL*)6w7fHg_v4|S5c_rgQ$GR{tcXEN1c_z(JB`I(2&QGNKg!5 zh*dQTz=$QjT2K%*J47ng#sBOORGZgG<}8^yn+w;jE%D7jrz-ch%Lz+1tM#)#c9zV3 zb!QcxB#WT+P+wAx>Pg~T?^nU5ZJYug_}eo`j%@Q~X7O`(oWFKVp1-lejMH`jIo$4t z(J#?BKwX4GE%SYCPO&V=@EOG+2bEedLu)wH|0YY|&O>_}>7uboTN$FM1vD`^(0q-t zx;7p@R=x7F9!kDe@)X5 z%+NnxVd@nt)oLMXWz!JT8hr}yDPrym*}=oxamo+68Mq}!HQ5&uoe%{i4%RWuMW79X zvsQ|O?!IAy_ME-J_b;bQ`O<;j?99?*Dd81eCPdSfOJ{o4AQlFObSET!2Er}hld;FN z4i3$)DJ3;nA2PRZLKnrqefR-lIgB_8g|iAl_%%ZCXrDMVPHe7KAgvMY)JBUyZS#cL z@@#W)k@!MrVvec$1;i}dbokXk<;nv)VFxLM73{?+u~>=+vF3`Bb56X>i{fLRpbpdT zdWs@)`(@bbJqWw~x59U{y7hdM%i8-86bCq{v_xkQkH~UiO!IBUK1+bnLCrB9PnFh6 zgdOEpQC?T#ZG@hJjMt5oKNA9!w=8ow$W9gJMW0?r7Mruy$F})vVaDqsl43Ai`p&8Bw=|IZE5?b>UCrd=UehTSOXZ|#{H;Qpc&Ia$(kw{ z=9^4^+1?zS>j!uQS2c_^Y*ec%+x$BAe@T*~Fp_N)?VDUMT3geTT-(YIGe5#Kz7{%i z-xi~gNb1JNQ&y6*{nh)s7gC$gkzI<}&qxHO6Q&{WLt~`(|>&5?+L<~x{ z^j3{+w>KncCk^9q0J;gGWdJ}IbgjdxaA;YZ9`0T-xxcB~J2ow-M4Acj7TR@kE%}xR zaZtjIRHdAQ_azQ%)r9#ZvV+ppDnu9v6N7jUIT z$i2Ki^tt*ANFzAc1sMkTuNvY#F;StYm?Z6vrf}Y{Ppw%3XI?zCIE!Zpg&e$9p82^> z_vu_mn4yddmr7Gu+sWNJ+BMlV-^?I>%OkS^0;;$F6fVX!R^h2Dq1#8+=Sh8X%oyN% zLw5D(IdN0~ux%tPIf0`wn4h%Z)Z;O_=Z*-`_}|(>QhIBv<^=>J(_5XsJUe;igix(z z1y|NpkRMzv5W<7fj2?JClV*|%VKok) z4ahDhIGQGhJ7X0`i{;uVx$nHUgwrU;t1*-7mLacz1d_)LpQrk=KR}Q-moo_beM>F0G(I^v|I=`Z}@Wfh?R_Wg_bM<1AdK^wsI73@vHW(J#x@D;|&E zjmrf?)Ef_%80lBMv~w?reJIcbmza`ph0gTgev_nv4-yVMDc# zFQGiZu}&KI{YM_WKm>4qa9dpT2^0HAT=&vv(;k7RanObC(JbaXQFQw*m#O|Krr=- zw~tG1Ldi`!7zM3TR04{?)hRS^LSno@C!{*W0!0h3!U=dw zdssb+*CU#8N(mjLfnCqe3t#xjR#x~z3V?ay0g5r5hL|%!Q3YmG$%V~NxB>;r)OwEu zVw3h~36L0!yH-C1vcmg92AW>6#0a2Bd<1f>L_Pt^bp1F7%G5sg1wuz$2Ch%81D)eq zu?3>1U`7`rJ?;R)nGgWV|BDW|X;m_=LK9Za0bTITVh5{ce$SvEhtoSYsTl92p*v1q zFNM!U+!0mp@_YI`wVe-}8$uJ&pQI<{j;O?Io?mW1(V@W1Z>mPwa8Xi6LhCWGsQgb+ zY6uGy&97X)6k?BM$5o4xmIGgw@?t2LXhaP7V-gy^T0DqVcgjP|Y6)nG+Ede}G4xR| z?n2`p!p=@ZJ*FELdoQ+__**W|?FN@FB2}H^P6oreg=K3u#Q9^MYd-i?N`U6ddY|!WtYZ z=JlsOew+XigGGafmr2*gZ2gq;R&5#9-I&ik-Mos$6qlm>{Wr)__^qMxvR`8As=~1a zZaXG@oqb;BQIgYyWy6bvyB(SJlbqoMw|ts=x%0C+CSB)`OAeqJHn@Hwt%LPo9!ih( zC*yZzCdP=A^pV_j$xoVq{rf5JCyXKQx5@qqbITu|&{(@j@bNch^OEPA*>tJ(L$@QQ z?ZcG95ll3A55F$-`JgYA*vGk9HfY=tg^84>M1BzLX{Wn&%?|Q%+-EvEFXuffjr5uD~ibNHt(yZPZIjAu`HaD5cOhIYWZTj!9b%Yf7G!3L;bAG1|q2w zh<#Dy1cuP_GE@`py2eQW7_USeGgI9!6w; ztAM*E(q4Y^F){ZnCMgU@XC^6qqb51L*BBkvtpI0ykggVnY6;mcOPKWZ0<)T%VpgFO zX9j-ddx%?WHKtIMSTdBu@+iPR?3T_XD>9&fosRT0X&gl^VB1R``=$=^0Dd=b>yZ-> zkpLm>Eeb@BgX3c#?Wcp3PEea~Y=hzMkCc)-bWK&dMc)QF>|E*pCPG}UyClsXHexWp>k)y-_>55AMj+M;(y z-N^?o74{E#nmrrj%B+V5d%=$RgV3#g1HiAJ$ImN%n(-Nxo|^F>^QJaA5~AmyqWMZh z&vWXo>V2uZ<>1|L!xIn008hWXdsSxXhnw5`gxGJnXIe!daoOov>x}SkbBdR)-Hb)G z5ovojAf$!WYPL6jyl$0QWeMk?zL}3if;qIR-l1#vzarHoPJ{%Xugyy71p$=){{S+h zuhDFlR!yqmT>&at+xM2)!|PU|Da|XrKuE&V4OT)?l|aiyEkAA z5;iD*LGHWy-&$Y>dLAnN5P&n3QamR0kL>I9qQC8L=JX$1VF)S{?I^E5k!=noTIdl6PuL#um>-gp!X~rBDCEkMHZW4Ez4~c4=KZ7+`FD-=zbsHItW6)E_(iw zfP%dWTDJrX^GEO2Ser-E$1B(m4?d&i01W+&2*TTL8UN#oOz#WT^j*DtGumqgX|>}M zAB=LDx84313i@hauitN0SEjq>WvJs!xznrf@8Y%QgH2~_%j)@vW0hU+m3r7eD#M6Y zHJdB}Ohb=U<0#{)6*stbLrGRz^RBtfNq_UfHsHxnYm4F)OJ*|@+mUJQ8M{NQOg^xq|Y}(rQH$joOpj z!=k@0reH;me#@y=0{d1NQ#u@(dutBgNF@tSO$0N74n`@l5d@~Z5AxbT9y#%{7^(Bi ze~WR$+|;bA$vEj)Z$F<1Crr1pIK$(2>7~+K0FtW1MXrUt2eamgiE;J_u-zrKj}V=& z?+($1V&5TXk*x;GLa0v?G7UXy2Hy(a%wKnATzDzRB7^$&lRhA>3n^AxO!2^h66yOE z#XvYxvHf=(JBOyI@hS=ht9ck>acR($ffyMyOZ#Eh7?Q=m?K3;Ih^}2@yDd+`gJf@r z>s)O&m&mV^&E4Ew&|Me`pz1b}^o( zrG~-JKwUVeRabzBtsGMxsg`}B@eWPG+DNPOB}W;46F*a?i({Av&22lmWyui0RN~^V ze-*zg|2?1Qyv<7eET@nIgZ2pR!+b|;;p!XuI=1Y2PDAVpKB*D_pzva&o0he;98gbY z6ui#M$q{_$`+B%79nNkJ``I*;deQdxB*_J6yx8% zdXd($M>|g5aHS*_VBzQjT<4FG27!@nspVVY8wbq~GGU!u^TOL_ZC-x$H)n@h{46s4 zg~2!`Yb=UvyMny{(n*y)s>6;p8s_Evw0Eejy6$846;@R_RWI)*>!9YV(2_0#vFBy? zrd>=!JzbM$edMFo137UMgf^STWBL!YZasAnE>*b!3kCz3H{t3(|1LWT<&bbY31hbr z*#-FmmcEp1%U+E^y#nN}Z=6&ZugI?vrQITu4PA=6Yd>rNhf7qKA{7#~Hob|`;$Sv8 z{gv&}TX5sco+qxUN(Ej0{-GKV%vC7Q1EnG`n|$iSI+nXW;v*M>WE^gK!e<2k%M7Y= zU)s;@kL&eujc%|3({&V|S;f5olgV86&rMWSFW*d3?Iewhrep9+2_r=18__^7HjSu# zEgAc=LL>tyFOV|GBRi5rn~_7Q_3*r-a$IA=9n}=07rcp6LqJ z;4h5WZ?}ntonzGKL7a0|wcO7TL*fFOA3e1=Eqy91c_V1$W;sZauf^DUaja7s3NPJbk-U1kP0C^Iv*z0L@E?RnGdT zF6?p0K_cR&sH(3FgW}?HND%!w7L)8P=aPp*;ruh;G>`KRy_kfRB?KW@!G$k#kXPRQ z2}L*%6%jDR9%u(vSj{-NQ6eoUB_c^%1?PZS^>w`<2ajJs0`u535L)4i{924BKMdaq z=)}Iowe7#y+E{m++zQbUNvBJuxHg|42cD&mnOUZ$HRBCmuC5y8S%)?gO3c{?IG@Is z6-woqM0WNh;$6b&1cvm=H)hW-Ny!eBKhFD0yY0MrZuZfCEXlaT6R|TGl3kVho4_Ee zNFH&%&&>+}e!l`+y{BdWvWT0Mjrg2ECz$@KR7+-5)n!QVYUbg5_ap%9AP28y{**hZ z6p6>p&82@g`O zwUl-2T=RU#`fa`}TDf@hx|pI$|0~N{RIF*-W!9L~hQqBQkH%xxI5#0PP07FCIjcVU z3A#W(V0!Kb-$X5YpKE+m(Dtfn9oorjj#(G=CzXX2FLyIz@;$iG<8c=>zsX?9flRBe z*bZ(3JXc%FZqIg>yA@{lCDDQ$I@b32zd^SreMYzA|5i=K9Lf>1?c*-pUDh3EVs$)xSe@ zW)6KZi1B{R?kUzaZD)zD)u^%0AYLibw~N>Xfp+FQ zB|i!1O0o;&snFu}I_Q^gBtN*akP>!SFAe%WrIlVE_x+#HY%9d_(Izo^OsWL{ z<65QT3nFeI5h2~avS3_6U@n3rC|FR7=QxUAU40IbLbW+Q^*autdICsd;kX@daJ`{? z8+P{k7mDUlifd|Z87%jMi!-R-i9QDM_d-;?TI;44F9F?UgmC^)wBL`|mg)O&zbJg? z8Ciul)G@U_;78A$niT^}Juu6tCP}^kKQ(OFVPCSK+^X=Pxe`;>*d^pklwX89t4)0# z;5tC-2X>mvbF-;UUKBgVwc5SYc1FEW z#KuaucHL=^Olon#LBPQ?+r4B$2FAnOr57U}3Z3I>)3QQ35_@ZUp$~t~EHw{cD9#8a z@s!tJ^Gqphj8f?tA)s65mWbWc8?s=s*apD~`iT~a5`~%0A5*|bzLDzGZEh^QxeP@d z(hm+h5+;T%+HA(hXx{Fv$xUfdJc~fpb@y~=+>>e*+FwdW#lrEVwmf(#|I!H9s|P{j z50hKH$aL>*&7{sfp!4FSx)+)`CC&oWrsymSt>9=?gy4hkHdxdj>c+dZ*Qu?da13qx z^xd~HX`|u&zI1PkUU{`$EJ>LK0}%JrBfZKLf8L;$0qo74^~{`bAP_+s3Fj@n+B-2P zH7*4eN{EL-FEF*Jpem7V8Zb;Kwhh;jYLbIw)Fo2EiRCXhPbR9_4D#SDUbSRIqZ3Ml zDoh7Gqrv3W&>9dbi0}nZz8f8ARb_6HOI2sWPhdT!{3!3YZ7y9FQfV3$y^5 z%{k`rW!$rsf>r7Mjd3@W_Q$bHvgg{%3o=K|JPk_jf!J~`IG<*)|3YwDLzR#vQiFbA zObI!ONp|PdDTYiPj(*y+Cyaf%kk=IRc7)BtJkK!#i+EDlLIzjC>0SUB{Q+Ozg{2hwDDR7sbIwOY2JWJ)MsUCobUg;u7D+`YoxgT{`Zg%N~UT9#K6G8^^YABK}=~Q0)MZ znxnhM0|4sg0$MX2J*BB+Nf263VZwp0@AMxelu&%^-fngvq)F&B$I9bC`dA0BKN}Xb zYNGe>u`F^poE`)yhOPXL0hGPwz`pnN`Qni@B{`_t=v9r?z_E!7s``_SK-b2TMl#wM zd!&*cUei(j`8(vwxl50gn0_DuU)gg4YMLgs(EvKp-tNR|{0raaED~;NR;@9?=A3J2L(X-=MJ|h|7HTGC z831(jrAb0-eGXMhrx~X%S^qQgt69Ypu&;zW`x^ZEJ>*C&i+So>I4jYqUA39dx>2vU zU7~}<9_!!!AtmP+^5V{qIi3&+)2ZJJ{bhk&YqruYgp6>#_3TacalS|Sk=0NK5R4Ct7>06(m%L)|@8`JYQUCuPbNB>eb1% zlCSz~>0jCqtGQqJg{LvL4nduhe}v=Lmx8NhJ5+>BoRT~3w#xe62k@w|;y=TB zoO1OG5&v99rw5=(HIJpFPp3hYngHxb)~T*$7(kJW>7cP@u)7UOKv3J}Yo@I!DVBP` z?pURRlvK@@=dIwLbikFsKPRcva<0ihgm+@K!R_nP2)EP+k26O2$;t?$i^P8{&1Mu+ z(+x0&SJBdsP(Wm-F_R>?mzEelEUUHa@ZB&8`M68aKxyP+=a1n0Q(Rek$N|{I-GDe^ z0fS3~D;N!23bu8VLFZ8(a8U-bXyB70pc7bhChuM);WQRahB)el=G@?H!GWC*6sZmQ zzzu6?bCh-FJ8ZNiOE4gCtVXC@QlR*_i9!>_MDfr~jo0`N2$}%&Mu1$+V0=LD4D;|&!=rwf9Fbv@CJ;V%o{`KQMP9z{dkE|w z6#^g@nA$YiN9bu20b(!(fdXzkUD$jj{^*81KTcJRM3N^_H;=^)KOPD&??ZSV zS4iV8uH5~8>8(kshyXyg2MP-?Q%;Ed^G_EPZY26+8 z5a@~yP~fOy0fq{s$Dft4e)sS1Y?W>~u0b6vp8($RD$#E969$Ld^^B=D zowkjA>1nH5@NVWP7Ati?mLd-%laqv%l)$+!>ZZ>QKf(@B?DQBhB1DSw^v5epyJDDTbjM4l0eHaz@b?J?-=WA%-sZ71|vylS!6QbV$n)gSwM zI%-wk+umtADymJeTQ8eq+Z~zaSZ*M<-;RhTXfKDuAv zuN$7)vBo?eXuZ|_-As!MGZ->n8UfP0`?2#m`!;2QDn!X&O26H;tuctzuK`VIL|VDz z481(S^QY6<>|=z1<|y&r(qs4drdr6E-kvk_;~;fP<6m|84*D zbn9?C;Qa;r24}%F=D$JcVAd3}Pg{XEUR%3_0__?$e-V>ARWq^UMvIbE^X21F34Owf z$Kx=S@lYA`-dqaqULk}0*<%fVKK^aiImjuC>?5jTJ|#ZBWg3Egb$`9Qwh8{;JSev7 zAwdwnBk}!Oim1Y055V2mZBVk6Irjq($i#G{sX5w zYBqDzN?bsOMy3YIJk3EjLXGUG-|#de5`qnQaF-nk5C!{#4w24<7wy29=UXIgnn6bkyqDA(~W7Tx!aYU1knI!6GCGZ znKVw?;lW6iI!F3>#b_&e4U+5%9ILu9d!5%AxWH(17)cMZ(nqGxaM~b(&`;aLeskvhj}D@uR`-Gl)~-*X(-5^LJ|J%@INf_)LCz%ysAG2oqi zKwD78K;ylX)YkhWo9T9O>#0Y{c4{1xeU7;bg|>jht?C)0G6YFfFxtq1y9-a5^z^x& zq!43uUn=Ty%PRsy!4yT;!1qBlcJ!X)jqoK3UfkMZKpkRqV4=t|;BGNFhWQnd_ zaJus^CXKn}9~4YG&p}GSMd><@5Jq=Bq$tS0dHD-0>lULzo%#dWBnACzY0dp{g#$=q z@Jp>#eHVygBM)gus*zXGcz1J=Cqg?fDQbJ{JP0t~rr$7P?Irl~w+b%Ph zSWY92pu+v*uek((Q4+&#k%w75UA{DOKZnl*LCdAdYY|%p36U4z4iZ~!s|s}itK=14 zEcctM994GapmnHGoy20%1EHfF;V5w)25FC5K`587&$B^mcT^7V5wL~4%i?&ZkkX84+M+M$&w(y0g>C& znzC;sB}@Qz_#o%iNUYw?Qe^_Cy)knwMyvk=_=?Jax0Rj1V0hm#IXr+b>dfv_lnyg2l3(?SZn`eh${|bIz_+}cesk!RI=sh&!M{|=zLWu3Uc#4%MQBVX9a;FLN;xl5j)<`Oh&O1~#<1MB-6PH!r!! zItM&G9`dsn)k*@`no*@+yJQbcJi}sLCxdi|Z zd1+Z!sZ$-lkQv(5hIfPO3;e++agva^2YmZOi34DH?DKjVL>I;Xfm{_hWGZ?7P+iWM7GlGiD_K~Wnp~46&w_Mnm2qhVntl0>>l;7e z{a9Cg;%I_EvSATVXlfHJ@26FC$NmF<%EaO3&X^9*46bwb+L3NMq|oz>=&@HOz0`6^ zQT0k*;wtF6?uUqrpkMXOaJmXAY<=xit<;Tbl5JiItA7k4omox+5PFWWNy3HzdV@<5 zMd}vIN>XveCW;qZ`vWRO<0%_9}`~^z@NWH|I*(SBBNdWBGxZS{nU^x{>|u7eDlUzC4hUL4JOm=l3_no zT{1!fB$O72(V)rx%8!H-8dG_B;@lq z;1Y35cue7OONdb+;nzfU@H)mEda3-0OuF5>-^mIzp}$@T+$YO`fl)q#26$WcxOJ#Q zbQzB!ob{%_Y5gEFme+G65)f|M@2h_AQ*BN;;(EQnmOUkFXeMb9OkeNFnQR?Xej~%( z9Zdu05{F5D09+=(k!jIy8M_xCpq_ICc-pnv-;)hO7b-@XAD-z(aqiqm@JiC2e{lWv zm+_as(?m2DrOKaC3~QuY$+mcKDelK_rxKZ|%3UA~$n&v7{A_yGa@dLx<8mqJb_ERd zCT)x=vucRhCb-cUr%gHkZNi!ydVPX(K_cL^`^WRe0r@h*yI~ssa(s^#o{!yp;S^;w zo>`zZZx*I93s;imp!r%K86b_J8qMzDiUNfLx2Xn(&1^eAa)#{Wy1+|svo5~cSy@ka zftR4%7;w*c(qJJ9=6%|lYA=cKbd9cRyfX6p@6tb1i6jL06rIp)}#DlWT01 z%xre)^4_uaTF#v{a}oR*&DrqWXoc!5dI-$?njJKre;SL3$x{xc;|#lldT$(Ba2c)R(U%-N>iD1JRBE&J`_JrUQ;fv`^y z2wEz|ic}|#uWFQ5h6-S1=K7y%lwj1Jyyc;~=EMcWrIjfFMQAR@SAkW?*~1tdG^=;& z6WmKTN(Upel5%lwB_nWQv$%X@q{TrkmYVHy8*y%g zeV?BJq<`Mx43rYK(4ZAAtFs&3>Y z(Co$;XVD-Vt@kIO>MsfYtK;Kjm(F`klKr3oSVjw>yq%xT>m+F0>-o&s(%+kg6a|%l zj*t1}a78AN3^L<$tJE4nP_is!5H#i<=7EXBGbW^Mm-plGWnf^n=zAuKdj@G94yOx0*6&2W=@b~U@Iwg)k!?@WLQ08*r4%o18Wc%;FF)Q~ z0u<}b>L+QIxzox zI}GJ8jde=)1B_EwnxtiuZ`P4S7E4ByJI(@vGoWiB%CMnNl@cFQf*QXrP7V!J-*3|b zvn;AfFML6Fl`{{S8@_lnY|1{ zt9e`}d)8h(;aF2NhI;Xe+^--nk5D&y$+T^_+3z~NDKW5mD9~dsychsC%@W3UZg>SW}sz37~^urBt*n++2^V-o{if_CBWmsz3^wiOL8{+kPr z(2kVxxNd{Q@085u6i7JzfAi5Y&5AV1Z%2h2$}lDoW@Mo4=yTH;h)vyGoLGlZ90%5c z+1QW6-`1Chs>GAS#<2m6KMlKoa8R8X*r+y(#g*nC&ZFCv+LQAZbPTakrolz+(d_WB z38{r5{iKG3&f)uD($k>OMj^@6NY%Bb!rpP0r|kbM-LO2A1VvY2R5^KWX({M<&emS* z?G7|~m1l1e4f!{Oy%RqCI?KLtd)TBISLaoB>QZ?qIynx;XFCNJK8DUAWNGM8G4&!Z zqDod#$5_dAO6I|xOsIa1+meZU4K~v^I*cDG)I*Z4nW-(mZ@2by6|69t3qMmq8bSC{4ZL*~{LB*Z9h*uIpj zfIHLNdWTVc#z+HbT+rrUg2wGp;O=o;Ipmm#l{=;Q$D*1Y|KlIdvra>;_PTo7$D~@v zMB9=C&3>Y_>x^sk?qiF-{qa3s+1x{x&+V4$s;|&J6>JM`=_hz(>V<7Wi)8z7BT)-q zY>{IB@?wV!p7ns6xTiqv)e!SH3lnR#>e25^oHh2rYQasw9CTA2jD%?tcFjx{Ef}&{ zkpR(RxAA1N>e9Jc0K-Ik{Tj@SepM&`MRE70eqUyZd|^;6al*UQ7ZE4@L>5L$1lj)+ zY}a&;cMSv^k!L?f0>|02w{0f6X$wu}UWw|g~(@l5I zGTT_!cqu9{y5V`ymM*3M0?V8D2cpZ4dR7ERG*T{p|F@X+blt(9W~-w0r+4?UVn<%c z>dj_l5M)kTE0f$9lpnTewg|>9>-gEd2O=yMd}(lVnu@73$TBJB>r>w<;0~V>R#lEQm2S}{Vtx>{GIW^|U;`qT{9BPF{ zIU0IHToQ_;gu9Co8rRi`TQXJd`=a5&ok@1bLxGk2(CBJ=hNh)t0W5d|@zk~m+()dO zo3T)dVJAv4#4c#Ov%xORkjLc;DrJS^G6ZLFF^Pk9oKWPD^%)q>k$pmKiQg<&L^L70%x?=M z{*znYZGQ~?viojtu=6Amq?5rukC#5|aKpbvR$6Z?UGt{xZY$Rxq|vkG?&m||gkkCf z5$^&NI}VQbUB8l{%g-%Yw4>gs?>04$*5yfnsc9bhrDi;gMtci-ieXeITIV`{4Zlo< z>AXwy7_89t#gs%kp!&DUdZGkPom1<28vv~eP4u2MFm}oN_jp4UVs(QJ|3_IBK{bMp4JWMpMT%5U z-`&9gaM`JxaVQCKd#%?uUS34ZNy~jAjgwHZ)tl~ba zE~aGsF0{3^I*w;oNixTMu|}D!FY5$Uf57iU5K*M^lJjIcHd?{|kcIF@*{gqbICcQB zvDoHUeu5eqwf7_Kvl&_>;(DrqBXmJcF*4OqZAWgWw0tS)_9|t~b?M%8L9EVP)ys3^ zmJl>R@V+A*_35^|%$zDT82!t2ChBB*E#qqe9cSx{_08AcAd=&HHrxFyPc$94It?k+ zF`}&+r=vN%>JVrD{;v0{sW13BY{G|*Hz#JigX^N_yn+?};O2sG)9tbRb@CDEk(Z{6 zxUzyy@A50X^u)sD^w}0^FB(BrM;)a+qR9Q^j@Fl`{LeI{$L9CT0B_q0+(i^yKs|xF zdARE{NbO&m@)0|QA>Yh~T`V1d*VdIF4#~iS2aLryIQ;G*TfxTrybW)}sNa|2?L~F4 zWIhUst!+cDnK1at`f;nri{Z+3MF7`|z5ZgJD^&q2TzhuI)!7ZY^zK4&4cGP-(_?AQJah5gOx@yJmQ|^Z+II!#f@4Um~ z?Tbb1=+UzRsCxFewzd&L`rj|24j)@`-$bY!LSeH&!Zbs5R?_v3<^``0?*Q;0j5PHS z0r!$tlC%ZyB*d`bN1H&T>pv%WITvueRIKjVI5_Uv`q}sT*nfY8M0X#=?{0pMub*NB+*c>Wf?~a5bgDnH@y5`;%qArsZm=P%wAk zdysb;V#?N*11Y>1Tk8F|{cpZ{yT~1HMTDolk%HUox_;=Znp+@ghA(Rx)P{tN6BUG{ zKkbvY=FMa<_?#FuSR^V7l+v^h6SgVxCp;wE4Bij}R%gs!@P{7y#;Vc))}Wz> zeQtopM_nv>GB(E3PAQ~LG3p!8q&$Mp7EZ5;w2!)jzk5s*rYaf_7u1rny^z7T{4uf0 z(1rS4Ji~;5TO2q>f;+;vPg9a){6&nRmhfEx&yLK^sB!eyEN40M%Fgc9qn`JYsf&$% z9eK4?qP~QNZ(!#W>c7*uA z*&&3}k71Oe`7pVYWt52ZdfgQ(5GN(5x=t>~Ex{AA2X;OVr#QK)9!}#(v{n%x?qhdB z@S7mR8eN{|*T#(8xJc{pPPj-wZv+y2)nRS!?|LBju5Ss^v4+TMGF8o8$n*VA1h?4z zwPv{w8b1SI6Ny+p93)crfPP6aZc9qs=qfVPSlm_N&X}! z#qXZInY}dSV_|-?tD7F zTh0Ci=+C;4M3>|6*KE}w`^0mG545hQDX7+b{)|8%1K~PihI*l#HBXk5ucBi&QvvD5 zPOqf(OsF0#f46+dRevJ*J9&)8;qW~lL)Hq&_hoiKtc47Q%1h_B)O2Lrp@Y&jj9_9G zYy}eOP6f;Jk+@@-;#M2$Zg#+8?!cdsr^igTW`&H%37TjcWh1$8IFY!0II7)xM-5?W z&21xq3r*T?xaJnj{G>4;opfl^I&}JUks!Bn4kuI&8NNYL!eQQ%KAv=uFr^4>L4cY; z^Q>Y4W%`a2p4CdPGFU^Lp3{%vaYk{@t}pJl3l@*G>67;uH*qBFU|YeMof-JJ301t+ zD#s#<%*MRm-39$`f=%-?&&GS4@RXJBC=7nU)N4*vgB`d^osUxgiBo~UL_cYSlzHNo zL-7{&lHJI2R?+GnVYB$P?K{QPk{NIHNhTU}EhV38e0_BSTJ;QMhFhMPEsApUC2D?s zaV>GRYip6=nQmW4R_ho%h9IL@7D_oxP=#DPC4qU=4C>n=igvS?lWlPpGXJGc;xavu zN=@4M`_?RG-kEcLHWi{h&5g12y72Bvih|?n*DrhiXyv+(`Zg_X zUO=F_hcauz*Ehs>Q+s2pyoRQg5~&XO_L_HZ-YR#TanlDHXK)x2fyPQF1{=aGEt+ir zY=v1L+5N0RFKSwP^04vgY?^=}@vWPL40Gwr#c)$Wv5L)NR=cgHB55YQ8vHvbt}j%U zgBcGK1iy{YEMk<|_coqoxN*G zNq3P`UHIk=w|Qm@F@Sg4ON6cAB>75(5q=vDv6}H!g0c-QAQ;NT5q^VPN=UmEChPHjRDz>*aB8Ddmnv#lX3qTCSWY<-JJ$S|8}pmg zw6S(ucLmYuZ%eo0&-0i0)d1}5Qi9ZYZ=c89Ye&0YX_r!j@=)8@rg^wJaQJ$2Il=(a zPc!Z08_fi8nAgM1qGB%sw_1A+%IJlmuI|u9$WxhDD|d{qJJLsh?)BS1Vzc&lfQR#c z(=vKm4&*H;{>wFK#~_!7ejpMcla`0a=8RMvsRN$#uFsi zHpQ1l%YP1Sg(_WqbP(S?Y=V6xW$1o<+>J9ar4>S^e=_$3q=q~#ABapUe*XP3aJ>Cz z;K=kj37S0q2k^^seL}+chNn9NM!X2!D>Z_jjyFvH@27|JNGG@RfWi5D!pf21*es&U6 zG_3O1k;Ld*kgE}{i~Ndm)SF#qK^>eagk|GFjfumzAXDWK%b@*GI6JhY=z;{xsWY6V zgi637!m>3!1?x6cs-QLL=KE|G3C2-$ybFB|_uE7>^d9`N9BDxut=tA^@(8y~Z zotPx##nv1QMJ#ITg2o?Bu@Z2@5$WioU$5v2fQsZ z=hYl}t&S(2ky-4{wPz=bJ~EsRL_&%qE0q&%@hCAZ*dK)xW*%^(vv=q+!tSNSTN*$R zJBgag@IPD`jR&;kRhA`Q!cIF2VbJ82mhCQo{mF#%A+5c|mwDDUa+|GqHw_)=E^6E| zxg}w41mBth%VbX?b!L%^AzDDNqeuTY(G%P)&phdLF-%UilL#8Op%)1<7a7Zm zYyHx`2W`uGM6m{}^K7lIlW+{2dfn>D_u67G$>Oqh{Mhh!HM#1$;4b)=xU7?V)XfNY zpJV-L&y7E&ZPF7ustfdY>bn&pe&3Gz(x7~^l;z2U>Yzez#Czx>d-3mc5+;FaulxGa zNJ_q(B`&NM8xApsuy^GW3aYFzRqO{;y{)|U+U|F0hIRGvt%=U5lE;!?55Bm*5C+x! zn)@6+5Mxzmx|_taWd{i(f5JvO$FO;=k!4kES$PAD(uov9P8(4XOH@zD!qCG} zv4aY$?M^+c1h(4mu^Ah*CWji@xL5c{b6nY%z@Pn^^~bM-7~2PQnpT z&Lnb`U7T2@6;;u@RUyDlzxRq8Vd$d+v{bjUCVIq3x?PQPOEXIFh({qF_9Xj2%_RS+ zkhuaRC|W{`wQ1c-WcG@TT_2Mb#4ia)JiXTo(;U#;YNF#w_(%L zzkH2yDWOUtU@m_u%6Zy663SUdlLKPADa@m?%di!h)E^%aN{<06pTOFT*}k=lzz&~d z$Mg2M3xE62~218t~!<)gD^zZz0oi~I6Pz?JI} zeN9h^s)?vj3h<4-JMe3i_;~S=wLPS=c(v)?dRep@!4?EMnF9#?Vl+9sw->J&)l7sd zAiT3hlH6|@sGasC>?{lpT6#`$^%X{@hkd*7KSV8C!0Txb>gQEYF1M=jW)`)>M~^Yc zBoHNP?-PJl`@OyF4S8E(sJPH)VJ2?AY!IH&^o}=p4=4MLMY?CW&Xt1+DVE4pQ(rKW zfA-xDN9#a0qYaFfDv4CvzrD)e=66lMY~Eo&W7^IH{f`DvxrINBV1Jyr*sY|9SPqnk z*gYEI<;47UMDUp`esxr*;_SNXgkY?_hkG~wd0m4vmr8Y+cw7aOLUlHv&MXYi;ZU&% z_4;BeNHzRN%ao5iY9)zR=YpuKl%E{55Mx^eML`{i1tV}*kt#o~!Zbb#9}@Oz$w4!G zh=P>4m08`;W=fC}ZHS6^=-hS_HKaUu8Dp*>Y^|H~P)hULrVe*jgDW}`v;$dit~aZ2 z!l~C@@>cv|=>t+c`1Xt2&qgYWKFgXEvYY95MFbR`9BjoLnryXr-ll+s(GuhF((qZo{PrK15)! zQLh{f)3it0QxVH-42qx{b2}fQ3^Ya-#;Qc*H*iGx#y0t_#uBd1v1Jb=1A-z0ToVist_U1Hue_|fy-S`*UPf<^d$#d zuCxR6B`ze77?vuw>U26d!Cmf!ml9w*W;7;)IO_%$o-_l=qU^k~T-4wlHTQ7c;laew z`Q-8~U{GK;n-hs3h9fq0b03Mn?QjA^e}pmyPt9a#{e+gWh{-rVQ%-A;!oa%SM+Oj!gGuieph!SlE){cm zd1+)>Cpc4W9~XB!?WuZb-R`eZ=A7&J4$=4US9dU#l$v|mLxMpZUC+AdhWY)6NMs5} z79B#H;vF%a>I`!tN0?`mSs?gKq8Oz7Rs;qY5fhY22n+Q#*|+d46f*1v#e5#~8%8lD zett3X1T;~x0v>>0%yz1B*~O|-p0my1W8TwcPSa>YsZ2$O1?Q2b@+X3BoH%&m z_iA=-isVTj_4r$p?9_6Yi9VK6F)n#5fYN2%2WnnTZ?!TJ4Swc3{Phci&~eKRu?0zE zvVDmdPjtZ|VJq8ADC1_hw=D|Pe1NJhCe%O>hWj`87i^#)^AR7^&o#SYmzYYL)#B&}r7{dJYE_Q5JkE1((-iUX|KVI9NW*X|y_1t?MoPhiJJVFr){1$it z?v>d^CsQ)Lj&SOP=Xw{KE#Xmqgb63{B~a;h`QBvmmPFpv#_RY$gVTFg%XURnTgR8X zB%mrYE70Uu%w@>Mh?F!%x~s=z`(7hNF&tEa(yzb=2G+*ds~0bLCLMvNwTf$c4S@g`EqrQRcwXRx_Oeir7$px~=J+M`2>oqiTvO z3M@cLXBEIh0!zsr$qxQ>AyzgJirOO!b--TbFXNXPis^18UpP))?ZC{4tqO7DE1T4nlX7vX$GEA)nF z^cwEXW7HB(f^!CFaza?D;w@+9EqsGNmeYW;!c@Mj$f~69iaiMN8_oN!`_+VpsZi6u z&b7;!HgIpr%iCemmD(F}R7IXNkl{>sX3&`|U*>$|YsOIK&foEca9}HTl&e>>d|2-n z+6N7xd`nr^4oej?9*Hz9M8xdplUh&9S|x^lY}{~47mTVf)AAST{bWL1JN90fy9hvV z#RXWIh_S^tcthEhtc+Gra7Pv9R2n=r9IDXzOXDI!Qa<<8<+6kAAc;GAk>#4ITRm|i z3}$@h8k#jT*nvRjE34EI7o4xxU0XcRnW+xjBvQ74)#!YFY;ODx$<(Nq zo?nVSX7ck5du>b1a`a5s3?6fsFAv~7<}7S<>%#6lFvGeM+n5}fx`C^2T(ou0a6h#Mu?Mos?VO>t;b``}$49r^{fH3FnYO&hiEfrd zL$?LrWfNr<0{wZx;_|z^vK2`8_@_T8IuI&-!79v;fMkYRZQs%-9ZVb+lszVsYicCu%_;vmTJhhb!QN}l$f<3^rw#ey73iX_| z*nEJ63^e5N8Im^)8QTcqN4{WfMF%)NB3qRA;PUK4pQt~euQRbJ__{dHiP4^Pz~3`9iV?sm z65rM^?#7|>PeC1qPxILI>qmmM^+jh;I+AJfHXwjPD?f=u~m^BD*ne8cgN59c|6er|sD zpJ{MPGW=aNO4Q_(l}Al{Be~jI@%B~^2wh(~N%>1dJO3DYuKGyhF9R~%0)6o}5N!Wa3ghRyXEJQLBY<0s7 z+CB2AH5Nq`G5(?*;4$7daRYp|Hj4H~{uHUO<0!bIrDPmif+M@4psXo*fJ zOm;bZ9G~8w4r>&Fx5<4XM;-&}x6${$K>DkhE?`^P?#tu`=GO`Up95r_PlpbQx&FjI zd*07RH*3-d260<@e=7C_1AxDA45Nf>KbOs2BdNJL-5S$9e!kDVy7FfglLf7F3)rp4 z;|7G=Jzaxmn&4{mGM0A9QCMTsHzN85Wmp4M&9GYy_o>R=$aXX!5xMKNhyfH!()`t5 zbeZ!n70Z59WLd6w<7kwub%=#{22z@QAPSqXhK!GKfSpa{?CGXrLlf%KVo)VRiVMf% zOR~|amvhBL^jo)TK4H>oFWXcFrEl+8?tVECKD2m_6@>{sx8->_&5XTD>_@Isz%N!g zRy8*JViyLgQ=Q(dAe)4Q==(Nrp3+b+g1R2WQa=UeX3c@>+tBvPn^=nqgoK2S{JqX4R(#e*z0(|&~xjHEjq!~XRSKW2DE zm>P1{PQ3d^!7UXLP_T~!`IvJ<;U$sGNCuA<;zVNI*w+ACRV1^}S?gIy&qXT>sZ8fi zF_T9up=vBX?vc&3LyXy6zzu2%IQ^lu*@HB$GjtN6NPUvSg_*LFCS!y9+m$cEv{T}i z{}CT?g7__uk-QYP37^5KiCLFbIknR`+y z#m9X8NlKflugz6mH6l#qKaqn)VuA~mn=E@&0O4fLl_qy-*{Ssu;scSSgN>Q$^wbA3 zL5=fuEZxh0;^KMefIW26M2n>CT^OXa{zZZiF9&w!l-iBXqDdXHAxV=?hfH{rOY#-x zO-!EbD(Mbb2dBFu{4?#L$~PFO3b_t2alI)rp{yps-^#()eaXKnD#PlXQJzfU5d~Oy z|FP0>yXOZrB-I> zzi1zg#Vn&2zA$Kg$%R|#&i`$*lsvTxK4e{q=-kU;42WU79xt5u(a z(n~0)BBpd(uNt!B!a3(`!)UNE3XwlpUx3jhG$kcVsm~+LhD!BQo{F5_A3|w05n6H; z>(=VWE|1uyw%Vy zS(t*MNhu~#XsLp*0qW`<^+L@memXxM@#UNpY8vyvz31|1xRwN+-PN$u9JZ2g1Z>C# zNd>7wW&~v?)yX?qf}>Kac3s=br3G`1rChVZGVVyF09p)OE&jGPaY)AT< z`=EypC%EtJbixShDQ%dIe0>L36X*%5*1lf?n8I8=KJj#hjWy}M;tOi*c6CVhTO>fI z5&qK&q}f7C=@p%NHgVF%(9GDjSDg|jvj+P!4iV=3&AULsu_S-ZMKZ)D6uSW7@2EF>_T(cDrn%6xE>nRy z(2FvQxgb@BON7JlM<+rcV24lOXVBluv*$Xc9l2}6lZ$35;|Y-*!baYKn8{uiMi#`t zs^lAz)U*Ic!uOdt^Q@+39X_W?FDZER5pJdL1IbdmVKJ7Ft0wj%%oRHk9 z4%)?XuwzkqD}Nv3$auHKO^G8+i7}?UBsSdYQizWV^0bd^r^fiApsBl{*doA0fEDT~ zY-nEr+IO5^hXl6(y_`U~Vh zjD8%Rk|EUdg%-i%qlT2ob#6TL0js3c%Nfh6&P=%qmD~b339&&irP*v zO#LwHQwaD$Iram1x!7p^bIX}ovdT@7yZM|EX^W|@d}zjuOnS|iNXbWT0kmp1RkjHp z9=N2ZQDmQ$Ee(j=l2AT;;i)K)CtfqCDOP45#JHrM_MT}NgNfKK+quRH^&tHY{R|_H z31Z)@M#&bL1Xhm5fAQ|hRw=kU=!*sR2khLnh$XzS{bAp`XLk&Fs0QsiJiA~|#UHp9 zkK!M3*W>yBVynaJsd}$70z(M$qjdOp-eqPQNHu>cBIJhZ8w2E?GtS7pxXO4hm1B9h zkHh||9k;fq&|x}bW)r^ydjkDAn2>SM;&ZCQxf=BlcBWj4)xHhb-Q}!yBtODw49?F=qI{qzQ zmEltEMLnx&+;AHrJs9T^mLjBfoSsko&Gpv7&bv5R&tGY-u`hLbAPSm8DsTyeoeYHk)3avYYhLD#dEO3RV-v-W354G^bA= zmARpL->6EGDE;+G&&`Mlr?XLn=aFDuakOJfh-wqERb)8$fFG(tPp0IDF)a76_)x2> z49r} z^6KGGN>z|;IO56*3?lF(ibs@^3FTBcz%X7$t(bePxISA}6!H*<{tU$931qFWa|1|V zR;}IHViVZ+5qDdB2Z?|hi*JvTvc77;>4L?9J7Kb&K#-a-zYIh@iGCX$HPYGDSPZoy z6pcY)vP2Kr{EGHG2)Z);R#0LM?xVa>1L7_ll`QynQNcCa0g|mtyS@0zOK*Q>zHfD5 zpP#Yn?;#^1h&iKe`3`3dB>@&6I;g;jvE`SLpK1cLt3@~~G!kPPX~*tSS}QdFItDsB z-4`ZT0h%cv*5s8i$%cM=gfJcxrh@Yo*SbwM23i|mftkj4a0dD`l|FBYqpOTiqux_E z!JuE|vVOec_x{_L+;@`1eEVvq?rXj=xueTu@#-TtTX4Tw<|*?u8~dCUbe*8fp1PH> z)+=5qpVRRmzB#8bGfrFVWVR!rrJr${TSvG~0j?}s+WJ}R$-nKNF=XhkNbo49y#$8; zq%3y5(DDeZe($)4Fhi*QJ%xI+3o%{Ny1Z+lp}NG?Lg?ZWF=~W4I#o8ZAqqWel(U?^ z15%ZdvuGO6c2dv16`U&@-Y4maKJ)yr=sGRig(GqGQFW`-nwjyZye5_pP!;O}X z9OxWP#!!z|wH|!!pe3_T686XAQ0cR9j}1wC?BhViT(881?aR_7d#L5F{xznjTH3FS z;R;$~NN@c4yK^=jw6pghJrM3uwXxjB&e;0JZV%o~3-lL(M1}{!M(nf&p&`2gC3mp% z5onMvA5YFSU+>eOKP*bt<{LPe7UeM2bU?Sf7Z{6%KFqaswC!MU`&_A>@GmkI5_s8m z-L!v%_c0fkw^Zq{A8y~-WJ5#m_kS!K1@!q7{a$<&g?i;LP){A*iA(uGAfG$utiorX zs8M~KR8jx>GfJ1g38sTthhHRt_TP^bg>y-@*zI41w-;sD15EVCX%oOn zlK}DlIh=V?82to_nmxg0l!t}b5TzFA_flUs8B_`A_NPlRN7OKI=J1aJ#JoA44U)Oz zEaBorH13_LWxfzv{?vfqnR|P3-Wi_>W1cps|LBTUNPTy~w2h`TO>l1p%QWJ42;0qC zp-lKF+NK6Lu$@yBxC^iLxICeDy1RhO?(?7!Cib1nfG$OV^yl=aF2VcpRKvrxO>*$( zO1Qs92=CK$w$~NXM*}ONGk350XP3PMsY!K?vedB8n`=oV5-Eu#n?zGk3TE0oWmCcL z$J$L<$bNg2JCY%;^T&Tm~@uJ*kMiNpEtm)da8Tx zEdtKb&`xlq7*%QzvoAqHy5Jy1kz=CSk))y3z_hRLg3sG$OyDBB`dae3Q1*{0`@VQR zf)9M6u+n@?kAM4HZ2bBtZL}3lt30Pc6-`eiBkOl9LAtU`5R<$oR$Pd7GEu1|i#}rp z>l+*NGm`I3larHRE>rp5LbhI#jQ4)~WOyv(x)Yx;ey@{@0EsthEBjK_^yMCTWS1kCWAKC0 zN3J?2wXfRNsFa-WBU&j9jYE&Gy4M>`Y5@6V#n(J75{SSaKL~vaP6Uo23iyWo#sBu{ zl7D6dQi$eaYqCz91{McjME0`w_GR)`wS6it<(SYuvx~~9;Ywe>j+sOj^9q^|ozX;^ z3DDw*`U^#HZMaJGXouK6OVS9S zKK$yLG^(8?87q8Wi~$Cd@AK|PfGzb*s|03DOhXGD0dUL106eS^}l zsOj-R+>NKrx2nA_Ku*VI0}CnN@l4;>v{r3Q5F82Q;-Ur(47+Wrp4ug=xfIXOQR`?S z$3Cjzu-TnyNh84O@`Ay#@|7^?{s&00RAO!+OSuXakwEue!buhUwP7e<-ki>|R|x=)04u(nYNY$s$ifYz0ywz;mo?M}7s@`ajmJk7$BvRZ?Wb!a4u?^Dwoi=k3`e73jP8TJ}VD2M>s{`i{(khm+V$uWghL}0vjdr3HZSaiW${w92|O{Z#{->vW?RpP>32iw51S5kjAX3&gS z|9Ym6#5~d<1Mc2#!CCk^{e7Nq;~3NQy5C`WS|Hf-@ql>_ zo$|Dr_J>m+rnvBH5pA24tBwDY1|XWoXi(c9l+TO=h-qX=Qv|{<9SN&WrO$kX)hsCD z(B!iWaM+afY2)K?Ni5amFq>o;VjN9$S07DKjN6|$Zxj>%T?}{orSb)7jq%?G;PSaz zY^&k)2W`}Wjn(6~eQ`f@Vd!bm%A8FaD7A^Wl<)O!zI#$E{*FePB)I}%vwNPI16dM(jraCa(1l8opg z8-#u9>=(RC?DL2~(d|FRdz-(+MLW_q+?cZfIQ-&PNQLjLaCc}mBoJL$vv2w}wN$zi zyh)-^iS=!uhoWxZt5<3qy2`mO-IPeHAj>xlFW%Vobtp&WKVZ8Vz8fw$_HL4B5vHeQbX39lnH2QNK<*S*VS`RXuO#Vt@bRTAPDBr3VQecA_(k~_(=so+Q1lMbI zW4l+^=!2Ql1r`{-2&-0MGnmaTW-*4o|N5-Fy=>`a|mJo1h<;-^7R=WAWf?48eiMm*{%iU)rY<@bN?- zWbr(=nN+;E!E-v-^o(8_*IugNZ*x|Acd_mJS|;>3hO@vWhPCEQ^^PtvE%u_CfDt-; z@b?(^GGtF){DKOEGe{Vcjd!J@&cYWdHF`s`5qYULo;qdXOgUk7Wt9NzLpxU2tV%ua zw|WMYf6AHjNg-V+-H~96R|R23L?{wUUdl!qypzYHE?e&AY#yg|Zch>q6lN z;MUUrFb$RgTHF|=-Pcp`M7wPOEt$bYChz6sqBFdLGHKC;&&+Zrn|LxzvUBb=_hydU zKh0izk`FgL8;R%sQ|OF?hp%npYNxQIea6kM>_dA*^z#XgC@oeF6^kX`XHL5$(!|5w*_2U7L^ z|IRttJEO?BB0KILDVuDSElQb%60)L;q-df#WRw<)k{M}GDkJnsqOFVuQc9YB&$+tF zz2D#c$2o7$InVof&)56?JeP7&`)xn3-agIr+TqgXoKvBtUoJhU-rY+%Ro#AJpqZ#I zKIRv>r272ug5VeQGO}IU7ZW8BTl-yOD|xH`k5=57cMJ}9(a1}ZFT0mq0B zo_NqEHtsZWVujO)7*AS2rJ6$SooMU0EzW%p9=&ikonzu`nvni3?)Af(hLl*EYUiH# zANR%_qvrkOmFped+@ZSK9R3!^?0n<$(c`UD!PTatu}^P*&9e4{;&g-WynQfsmHf=U z`k+gC_?#Cj`Qs}EY2w$`+8ws0)MYj+U%M<`>3zus93XMto27ENH_vx{(v90Yc3C>r zSuHS4>Md>A!cY!=iQm~Nby$;5TlMsG(aJb!-)(if6L-I!;|vd5Jn`k14Bq9(eC&H6 z)-FX)_||!+U83jJeQK?xSHCVZmV9%_tbYC+r2}o(Am`s_j+XP~8Ko%eTxzU3BBOme zd!AEi+J;x6hO2_hHLDX|44B~WpWIM-*y>(kR86Rt541GW%t_+z-dswmX1cHRVW-Sr zZ?eWObuL=(vzJm#iyZJR!hZ{2ZGTpV8Azy5`1IMLZzQ3`IrhT!v)b;I{$W!he$H0@ zOU{~wQmOwXrBz9a^XNN;w^>G2Yix?VrB@r+FpR$(@BYdCfo9h8Nb4J72%#Hy)>D~R z@>Cd4v&+)VtO@+81m1)-s;5_5YPLg_~hsrc4 ziL8A&sOq0qs{d8-TF|>i1s1X=o|W__+*dY z*8Pq>$8~>*e30Z_np?-?aZ$diZ^C2F$}lZ+%`c7v?)h7H^si?${E{8EAN?6z7`^;C zV-=cT^hA3yIb7;s1y$|iXBfD&9 z$CQI~C+s60uDa}X(crPI`1!QO!*ex)4sQ7}r~A1N_2Ih%Q4$|Ms1H}(7|aV$kS^8l zQ2a1_P%2@;QTO9dR`iHQY`E*M@F6(7tuT2~scP;I=Fe|#KV{>Nx-IBieI|Teg?;Ne zbE`C_%g7MoWqb2uon(mn&Pze16?U}W!t1_$lyj9+=jYYFQx#OZt|B+_g<;hBn=4jS zd=1JJ_SrjOz!R##UvpgP-e#wb0;NwRkFP!LcUVMbW1`7LOPkNtux67zMuTUMG*G75VNR7m~G`7eBJ6NDBPF0lz%E&1)Psx&^3N@7QP3<9fwX?^@K? zG&KGd*e03}Z_?Fi?3la}o#d&J5tRC1zwCcr6{hH}W9V>}doM}jZBPzh%IBnKLJ{Rv zF?o3X6rP?Fo8G?hB7KxL>Axj0^(FsI89~6*VxEPYMYKy`qflp&up(*xsr<;}nfobs zJjuzcLvFk*aL@JF(b&A`v}1$n=dS`4IJjE1MK->gnVYv*Reb8{{o5)Ij5Di(-8L=~ z2#RmJPHd^yO8fC)hipkl!*4m_quX)9M+|HZeZ-q*^CMmN0vY<+^GE%Sy;IBpl(=$x zx76Nwb>1H+4;tH6>hzfCJl=jUxLLo|R&$QF(#{FPf%ayVW#`;dljDcnLWhhS>P?!W zGGhkHCzddOeb_g0-DPy0FfUd9ppwP!jQ<*zUfH2qGdLQqZu@viu*Loq+!;YhSL%af)?>6~a?*E#V1y6cIB zph%_1G#+>FbFVD@CSO*{)4b~Cy@eJ7#XctvFaNPgz{HwQS>fqTjYb z+?PN%Yt;#X52x+!Hu)Vb|2XKq@YJ)*$E-SiTYudD;1B z-QCW+E!$LUqHrwK?buLZvBj&6)th{6u9{L*8zWXyi1R#uxV0)(R<++gc1n$qHkgFp z1d|$qqnicp3tdPi2hi;NY9ntRn(qz8^7r2p=-635_|hdYhWSxuu<3S=c-RdT5 zQwduI{CA!8UYoj+HeUNS|4J6^)H0EAf3mvI^N`IQKN6Gol3HxPc9PEy%DXfL48gg?=@*vRUWVBz6*qCM<}cB z(aozN_b>~YM}=h@)1#Kl>K|@D;#QVFB=n;v|CvXq<;c8L@Tb{wACL}I;^@t<6ZND| zH@sa=b8jClSo8DHqmJ7-6?YESPVEc3+PY5r<^qd@8~Z$8IYv$hGi_1lir(Vyo5+2+ z>GYI^?ZwHdd+mcBE!Up6DUpDwkRV|@FU?z zt<$rX=&bVNHV;cZjn*69x$`w#ZRCx!^8MrG1&$H-*Ghk`GYfT)<_~t{byiLf{PCbc zEhBhevz;pA>oT#ap@~LiW~~2)yz(1mO+~w|hR18lu6<#-NT%%K%`c6IueQFK|D{p# zzk@MXa))XJE;PIox^OztM0QUtWt7D=GWYce-zdmXr-pIm3dv2wDuQG zQ~ZNNZ=FtfJFl>KL>BG-Y0`gUG6Ii(f8F=#5^*d>$FgNx_L)GaGH1^q&xp@KX7@oD%wIVabxcEBxRP{5N2wkZ%sm4XWVlE}@ z<$1fkobDb6O-^OKvayI>`>`qajz*fE@A*kMs`kL|b(iMc)X)>c^F4f+r|`-+MQzZ- zyeeh6PXSRZH|4GVW{aD$MZ<)gheGj5t_Q`RU)7Hd9~FGvpV2lHnLGUJ+9PkKrl&1IPZ7Vd4IW6~dyLyB<2n=!kSU z4Qp)a-(w0F@ka&(_(z$$0E_v>6s~sqh_~_)j!|9~L;viAQUghs*A(j4lpK4MEbDzewyxP3=Nk^yuJf@`R z>n7ook24-KwmHZ?CO?;t2S3ah!M{rn*4y1RYvRqey?s&E&VFN3&&2qz`lAkE{^jDP z^2gq(Jv<`Si5ny^i0IMmdjG+MV|RX?nP^HXI+het)Or74aLE&+mX=%vw;%kDKj-#a zY`(^G@5#Y$_P4hmlXVQr$$PaWGX6r%ep3D0JEDygh4nMeY*TtZ&?Z+=% zw{LDYi@$t{sHQJ@FNROO-Z>=n$rfgtYSA5S3DvXwlFt}BveN>$mmB<+dhbG;@(Q@G z7B_KX@6h&Fof>_a*K|{}ZTfQk`b5?a4JY#N-LYY6lfKQWn>?jvC&g0iyXR)SEiP6M zWriV6SA@v5ThG)w*j48WojI#qdh|i=O7NdDex`?&94rg=k80c06?a)5mv!;*NMoLK zzO8f5&Y@p*-uo?aA8&+a7Ig^GQU`}aE1E(KpYF39`>en2=cd9-Ls!R}LXMQX@0wds zdg}nK@`CSjp3THdYsDf)Zrpnxy|VvH@Y$XU0cP6C{Cl@DvM*TsM^yNpbzMx-xjaIT z-f-Vm{B4^5>UKtzs@s*T;&~&_9L&_ejH)MPZvR?CAK9j)DBg=`3EkFoHpzcIg1=}M z|MiQD`vE*X_Wet~HFIrCMO}qRJZ?GVP4{_fCX`e3mo<^r4jno$8t~fAppF@>HdG#P z)@dkAj+s4H=p?&(?`8Vfh67!{uYNh4DmlfobCsMqn&=HlWqN0*F$q*6;os9v_dK3# zeoYmR|LXdt_69LmdG~zpOGdf3Vm2>45x?b%kWSf>$RATiyXU8#_H8=aG0a zp6&AO8WKfuKV?;~8|-=49V`EPGIDaqoD7psVx23!7M%@}m%1)y_4+k?wp zTr3xFB-gO!lEGTRos}yV_lXD(?BABNUuto$YxfD+J=^BK&&)5$U)Zw7?ZtsxUk-H( zrOlh%S??!%zkA*M>WuxaM+&l5L@tQ^y3aWEb!*?KDqj~Im&K&k#|@U5S%zAkRxD9b z{24h}G+fv>pxu$u8?1eLHUEJ}^*MP(%xyKx zi!ysoEHf5p?>bbp|LQPrh=?qOuTkTXVEM7vhDd_sJo)tM(wxDYj*}~b#vQF1Z@DY{%-ur#2nQhi-4}?%uJ6)Sn@z$o!;xTT|=Rg@+fS z11}8qD|>uAw7W^BRP<3@C`sn*Q>~=DSL4lfk9dB(hNV4zSdW?K9RCbct?OF+yY?=5 zoUnH8bJ^GR8z9FM?Hu_yp9Sih$WKZ9J znV5-?A-4s~w{KaYmV3NqgqbS*e0X^}?Wm$6{;1;c=_guZGKaLqc5dq{5II(odw7R=rol@0VXaccMT|??$h*xofw^lgTYhiY=7BFOOVNoh>Wcnb-s? zQ&#VMeqS`+?&+3>>!0=s?ooA%`O%{;S=n!S!DqC}317BYx+{2J?dqX#HqSko#^MEK zL`7x4@w{*A`}?0*`J%#vr}Zb-JorM4**rd~^>VYd;tCtXsOEgZ=B8`TzQa~>rC!UL zy`qobuxi*rtASu}Xyoqq_dW|xP1f%1vV48F&g*8Q2Mub*Eqp+gIeJKh|B=6g zw{S~(_W8?miSJDVEws}Wy+SPO%>tQE6GO5`Hy(JodtZ{*T^UN~T5 zN1xXe@4ST(9R~a&v6E*_nWGPHNuM=YStG-LS0Lxzc)7FKS;S}V8J)lo+|?Rup;dOw zN=H@KnC;<@c%R?C<8q`YEUiTKufiHXfEWndFqnfsb3BE56G$+ z^p{X+pOUnfeH9-TdUQKLY|>Jzq2%eiRjb}Gmo)KOrYo97wHw3g`|`f&wO-%Kd;x@+EI8jK1G{TVdVu`*Sgkvi5oBmHR`L z&Rf0*ZwY>5eSFaNQBR_Q`lW?$DvHWYj=wW_uy}DsgU|2hS9x|39}L{meEz|>z&klA z^^jTvs?`}7vwMy?H8?Il)ljAPUP1Xm1LV;?_*&+&XRwLaWzui;b^8Zu`t11Np}GI< zr!7Av5`f>VwCN|G-J+yxSmKe)%s8e8(;1mJk0of*aCqEm9Fa(!{z)cI|D=$ne^M#a zKWX^P;G8Z#K|MDjjz~olH4v#KtVad)EC@If>uGRF#8C*?1K^T`qv8Ktl5q@t=8oKO z0c5=icm?T9IAV1e*62X!KHd~@OxqE`lWv?M9v zJjAsse;Z!bI4NtAC7ac3t84D_@Ni$=LA7)6m)_J!M&m8d&T|`T|BKnhcd!3^KwHVn zG^Lr7qEWq+NSipc+MmSdk!N*sU7b zq_%>rw(YWQKl$~QLTD;Nwpd}o>&)sSkCB_&@Am)Hulm?IeCtWT-9<%`rKLW1@TVF57(l#fj&|7bD2^b2P7MjtKf~_6?5+^IIQ+U7_nBgeOzBUkF`BhUHPUSU4Xc z5J(uKMkwEQm=Iumb$*eb`n&LCe*L`$DWZSaI)CRzo8X1-t0a2|mTI23e|V9uB*SKM zbmI*}U6m@tC+@%MmD|1d6g;(jzHVXS;=zC;r~iU%UhM z@dZ8)g}R5d?xM;RL<-HYf@L!C{jO+B0Avag(eTbp#Ob z6ddV~)qopw9EoLvf_ec2q8X0FGDPeKJU}8(TNJo4#*tX2X#hUh`nPP@BuGS8iNNYL z@tdIe5SUZq8%#%WpCKVM&kpi{Na%e!q>kQr4T+(;_E2`_)=>>~NeC2_d2URzrZEIs z24xO^)Iim@K%tov;~MCVEl^sf(S!!NCk+xs6GOqs{gcAz`D>6bT8ThrFo}kW2SU=| z&3y=>h#ILtI;hcHXhTi?T&SIgNn-rrmPj_ik=e^7(v5Ls_Hv1I3*5BJffW)N3>=xg zT%s`vM`kaVXbNsv%f*J7QgP%z4i`W)BjCt?95H}sPQ;Pv>`CP;z_=L}EKG0|77Lib zGOvBZ*EkA66ezRyr!cBt0MS`8${ha%mOUdNzbTCxMN!BL&ipy0f!2FNVuj`65PYNz z(x`b1{wXnpdP0y!wKOyjMo+?kxO6^b1w-Ua;|cYe^RkdA1d&k!8K|UB4(fr?J;IPM zDz5~|qrU5)c{Sq-kOG9Ni9nhtLka3dCq%GU;Gne-8Yd1kbzK?Cf++;F;2IR4iT0CW z3Xvsmm_lM1QUx?%#KWYpg+#)Wa1^$XNCaaXg)Jl!k$|JHg+wBn<0xz)k-!5Kwv0%C zOl7NyL@~xu*=izDO>tDVRg!3CI4WCBBzrmoN1b887~IUTU~GY-&ahwt-aW&Di3yH6 z!-6UJ4T}X#Giaw2gs(7ozXHNmNhp9!Sc*Y+%0mBW2CbC?lALr0HH75oTC`RH(x}KD zm4H!kB{0lJ3>K^C8xw-ic{uP3sZXyU6jcUSik}z35S8Ty6@e3rVKh|}44>jb#42u0 z!Vp9~atTte82#1?qb~;`&5Bn)B_K3Z2Rym(w>5-r)rF{;IaBLVemzLzh$^%Xg$UrH z2TDeT$v}0<5Tpp9J1O9uEie>|cG3Zpz+3##g`Z$C^wA=S1k(u9E<-~Tvmk3USOvH$ zzGk-y_!WAZ50bCxSqwG8Od4Ch6ao!LV~dwUVBlzM@lpsTI2v2L6oM&^#uhJyNXF6F z;-vtM(%2fN0KL)K8m0iX(b*cNP>pePwuUKdz`N)(EHJ>$3=0es9DRlbV*-vo!-6pp zN1tKA7`%Ii1ycqNSP4jrEDgu%Lxh@gO-KS-5@Wqcqgi>(ph;@?!(+ET2YcM}$^SI! zxxnw~x?E*DhlOz^J@!XEKmUTyJkQENzb%6l|N1K0@N`iU4Urp@yIbbz;AlP=eHJGzrja7*#h0syk1C)DU#i7*HqaP#O&4 z0TwK4&gnzzA(&1;@7sVo@^Z)p+!4`z2GCB_-448Up&_>G&8tW_C8f#@B3ND>8K zIE#YmRJ7O#+JTlku=!kT3}vJJtH7<23G@x516flj7G~fv58sULpu(bPPXI)L8Gnp~ zff>nynn*Lqo0rL8>w|=2u(d$|*KB2gtifRGf<(gsrLk~G0LcQ!U~2*dM-XGD=|KAg zJlht4YZCBmTL8F)fM?qRupI&jEO;gcY65^-33&GG#wOr~J-rDP+_2|2F$Onm223g7 zhRuOF_6(Z^b8{Slz-GbR0^G1!umH0XSS(;B!^-a~H$ihzf(sCy)(cpohTj%i1)=RO zKnFA78%VT38|pxZ-GL6`j9>+HlLrW{I0q;L)ma17{MZ2!gwa~;Qg}5uh=GQCf=g>B z$O=N0yg{f2jsghDkVq3{9(#Zh6&%;DpbsMmQI zFE=eCFF`%0EzX-a`fTy_1L%^a67*dJSU(ueo4M(PI7C37oQE&N01$1phVf`q30yW0 zKv9-Y&cFvkrn0(v&4p6fiGco&fi-G?qUAF(o`(A zSh<+v!Io%g39U%MSncd`_JMQ08>4tvpXI_YJ zDpJ4y^^yJh-k6YwC)Tg*I^VYD?t-*^)}r~9A72;jdLNq@n|`UK`Yli57A+SRnntB} zz?!Iy8j^=l3Dm!Enz`|D70*ZGJQkmCjIzqUO?j_03r~K1c^Y0{?cjO0DEzK_Ulsjh zfojA_-NW;E*Z{jjw|A$A;lpWwy(;{M-=ad<7t2K^(N@cm8w-wCmUbyFI9bHP{hh=0 zJ>iF>+Oh*f7V8gcwB-eK|GwGK;&)xx_^RW&w}H{nlQUY)EKXq}D?X_Jqcf8SKmwS| zYz7Zal1rmY;`Z&@vJtgI+HIhUi;RHjl^(WB*`QBIdIi zH3mCiUu|@EDF92&#jqCyQP6`gVLN#SO&6FHurmsg$g(pkumPGb5wkta&a_dLKG*?d z^$t@cSB`Bdn7I+zrUxoE;y9JutW_vSb9!SG@v=oOZuqrz6 z{%Ld6MkimvI8;IzX+ns2{Iph;`axL#xdIVp7OX7zGUe?daZg+=GdVleGZ;R9d+rA_ zq)Yv^7UhtW-io%Uy3J#!Q@h3dUCR0j(^9<3(;9A{Xy~QBq-Kmuyto!IiH1A)-FdUF zvcoBV!K-v7)$`kw$L<&FeDv68rnoW4if>$c&?7tkM_)^M|7BCfqI(h4dZ^GtD}@z7 zogqXFo&Orvns$=?1F$LT90Q9ng^`g>i3k;?kY#xqO?p-yS=$ z?M!5cxcSwL`s-BqUi;prM2ASP6&}Z$Qy7BbL)qKD_IVGMdlftnbme{P9SsM*LlZ3# zY4lD%;18=Z5fX_wEy3j$hybf3wtLK35--pOC9(e?J~P!bZOPu#!toE}O%nnm*B+BE zD96jRxh}mpb|SSa_h|1j>Q5)>Uca2Klh(?ro)%%p#-2GIWHQ8@zV7R@dSdb=u}^xy zVuw}fGBV@igoNrKuixpaBg0v1I^>hxuXKczh(7&5(};Q}s34*@@LB8Gv=7G&!YqL2 zgXX+}EkTTBRYOEHAq!eqBmEX$h3L*CZIE4wGvynJW)ALHahjdqSoIrLlGW4?!-5cs z$7+(_BXAJQJv~2e+x4F zTliIG!oY%>tK;xEqV~5a0|eo*Z&(N|(S{Vzv3@uYG*MQdrBN^+(>`M~a{jHI9@|}H;t~Rl zi#oqEL(lK==WhnNO0u>mw&?b)>iUwSxq+D;6R*5hc8r5Q#g0xh7b!6#Ssp`|s?cKF z&z=I0#T9UIvd!-OAM#^=7mgck6I0Buyyv8JV?*v1)h+L>drmsK3%X0fL+Ra%H}XY8 zNpN@kW=FZZJA`k3v-djd(;!qt(AtC9ixgTt1!BJV3M3aO&1eWVMvZh3K_)}!nLYIx zo7JCl?k8b!qQM+EVPnosU;nL_ggzUnWxa>%7Z+Z$Qh03A>I@nxzB1aIac>(=Dmg27 zX-Ohl&JSK%Hu=p9kia7T9lyh|xZ8!n6iA5+IbXKKW`eBSj<=>$T(~;0{Sz5kS)L^GuZ#iRqb=j=iU3uI_#S^^*&g9L9%qoMm{rWDNo!~Z3D|Pzu3p0In~G0O4E!+ zm4$$1hKT~`gjpu~MjSBy-T)Cm|5HJ9WmhgHcn#FwFtzwNAa5z>_Bq_y2DNTVuwH`7 zsv;`N$1Lt0;&VUZu!bmg`b_$cscbvt&S!GpbbsjFIXPdcT(f8T^(x}iug{03lOof; zRj|=W{7Q#wKj?J!uDAj+kn6vl&eA<<$>RQ5Z5nFTiQTv726XYdp=m$5OdLsKglGNb zjgA)XR7;PtG4$L~HR)Rk)?R|-)3289*($LvPygqgVzf@>I|{6Sg^9u^uLy>hrYG5D zDE7mt6^g!I$0B`K%9LL^tsnOk6^poTyyLENYw?)KnR8mZrkMriH=otI8<#{ZSdBZF z!s^`F)f|^Xs$pw(#e$XN#F-umR*w^Bnj{p^1|iOLNhqKXLY(fB%!>DD-wROB0IQd% z>6wo}80h)iv;)TPgFaq>2%%5upk?%e4{3v`1oYAyP^WlNkb`Kr9HLPpMMaumRC^&X zcOyk436)g@SMR_Tco!Cf)z~c<%<9=47*-RT4Ub5G{udsJc+DUlB96fBz=3Y|{}ZzR z@r)UNne7>D@D79ppd@e%Ush#=5GZ(5Vg-@|@+Js|Cd^h0uGbG~hsjjyTh#!x{xYb; zxPR10VvVrEjrt_zwG<4*%f*B4bR{X*QI)s#**J!AdQ=2K6c5i zE}B^;%Na(K#EkLO>+$ct9dAdfmPbebcXs9WIeUk~RAOw_h#t>P<9V(7u)|V2{4s2t za0ZqH)|=Pcy!qaM{ST8BH-+XWYIaPgd92IB<~A4It%4Y5DjgSQs`cJ}21<(V{3ASn zI`m4yX@M#DY> zR*SYJwOT6s$9?U3y{;Wu0$|hWkTU~`$`W5JK2>j?OW>2f$a4_imEN6SIld;#wMG7l zSj}^ziG^MI+xU+hI57X-9nGj|CT}c4v`||$Bp>K~l@?+Q#5v<(c`FLpoEjQc;+z71 zF++>@*e~$UFwICc3C?r&N#Y4o*rd6qyLi{0H8IQ0+h}zq0Eo)$%9i#V^KoufK9YLC zTJDP)Ug&}f&)&(=+B=4Md>#NgnP0JeuVO0v&A!~t72a!nrfcVUtxuLY8I=JGBG+#| z+*{XCYJ)I>)0MjXmP*${i$=7irr8Zuon|)|tKI;==1ib^>2R4kJL;8ZUgBb#M%>-U zpp!fWChw;jPJpaU%DidoKaEF=nlG41M=!CD(-1J=b!=g8FjU$2`ioJ2dS>=W=g8l; zv^L(FeRt#SCOX^*%O(Rd9+7N8xxV~nJtS=ycyci^Cj3CpSwi>8=^ID-5BEKP(Q0Wg$^6AiGvZ-3;xx<9@8 zXpFaLqo;Po7DLZozObjtVqN}A7d}6EUn_xC&@84P;wVEK1VB(i8v{V~uhHut92L5? z^CIxh8Jl-DQx{eiNVe@?{kh~AvCaMS%2xZzR*8Awn>K`XZz^x(sg)m#_IC+wa-gJp zT|k$~4fwuVI~t=JAn?L)ViFxU3h7v&SgG4JD!W0$H6v=9N?E_tJ8JSFxW%&daqaqv zMj83Ty^%_z@8I4GD>qkaM-)ZFr;cHH1#Ah`ywgDfMVHRti~n!e5Cq9-AO)cD48Q=% zgof_dN0yzE{@}>+U%eo1t^=u5jJ0Wa&G2$5aGvbu zJ|By%1*IEh7Wwmy-!FE)w5zb=!tIDm@Xrrq{hC!p+P`m zvkYsAMPe3ctw#Gy0IGPs99YhaDL4-X`|!@XlURHUZ@Yg5v5~lx)~4H5DoK1Ydectl z(1_rVg~PG*x%sNQHBpK4uoP-)3_>P`(f*gUFFnW#8q?3CtU@`urMMEuUzXT{HG7# zxeX?1Uw&0+>znib^qf-$=XwTWWt;23-gxZK)|eA#1B9`?aZ%x|*xq=h#ARRSqhAU7 z#5E&jC9W=Jk zr&fp>$S8m!%}XI*-Bom>qOC$MH9Q0Sn;hVt7YDt|~|5JeaCS#1#p z$bHx#fPSdxFA0p20edF^dLpC=q1b*)%!v&J~lvQ~ylDdI=d%z_!U}y^oA%k}y!i zL8M}v3bSaSe8Ki0XVYkSF7Z>qjsWMx6dD6N!Z7<;Iu+YCWXXSK#S{h}+f150kU_v| zC=MDo(-Z~;+i2hzN9U3Z6_3Zt_nC1l`BU*ku8FC55>_|O48$&|L=v`($w8x$I47nO z>DWfx>~SR4wiySF#>AQ?vx#I9f$L#1otqg78H*>gpQYllYLtV$lYql>PE4j^C&p$o zLk51#NuvP8bJ56b2AMO~58@!!eVzS63LR|la?q%3-Tkv->U1;%Bmce+sEN~?Xmkp; zt^3a$K>jpfTU;E{frHK(2*xo;Tw#>PpkQZ}{&^t?JRqL^>tfh1>39&UImZ#GR05Ne z2%6%YQlJxQT(&?b5oY-@YZ4NXYaw(J3ELmXJ~NV`lPFk+gGFPqlMWrYJLih&BpR0= z(?M+IG)y{~$l#I!nasUnDupu=GVydOgL@^w{JFRW=E`XcbQ+1vW1fH=4=AWF;0D*yX z5(WrnoGWHv=EboR2A+XcI5K8$EcwrfAN*@3aN37sVi1uy^~s~fCLBXos z*#p6eC2ksn6TN`LCg6A{=d)B2Cxip%P)M9%8LXDbdKQyETm0Zy3RoWJ3mHIO95nDW zXVL+w4nSJYaiC-RSJcyz2E#zpcxJ&Y{exuyEBs5uehX-1&WuF>$&S+&0FB0pi~)^~ zoxQ?7Gm9etXX2WJ$^@p)Igkc?hl@txL_P!pohu&@!1uK{4Gz$_yo5jmVUJ}CVEr>3 z5(z|3074)VK7&Nj_j9g#@nOgaSMbX*1u&H-TOLua1Fy zA_SYIoMD*&6vrhk;GCRs0gR(@hIs;z6_d*)h%~?tCo?pFD7k1{$ppv`REwPBK-%Vf zmV{?lSby;!0pJwQI1ela&t?6?4zVKFgc?X5Ehr4kU)On90v+EE(XcqOxUc| zfyIHcm3ti6CHlkQ4E`ek*v}O#$RzH_MFt^&Q!->SSJEMnDGbhhKmeE(JN-9f{TTiS zL^79H$y6Gb+W{I^@F4^6!o>`TADrGqrhymEni$MWXAnpn_z$23Jhv18K;qOVa4;^z z1T`R+5mLZ0Gfr;;*<~hsvhg1Q)PUF-;MGE5qo zwT1TY983ZY$dz+q8il}#UI+lzadt`ojjLn>G#cBuW+E#9^VoUfev6ph!+M;02drS5__J2R zz-*n1M&(M;z_GdP101U54x^w&!-@L|4A7(DVulQQ8=Mn^tj%ftz%Ds)KLKDzECaIS zKcgoG70=i~&Tjevpf1KMYb1&kQGAoI(^bfjEphwO%a5ho_ zj1obkgtG((G_D>2kO9ciobRJkIT18KVN3?6>i~!4Y8(I>a0$*AGALYX0tt{yodADx z<_K^;20&EoePI1F;s-QN2n`@1o-=O)8W*g_pdk2wT4~*e^_Q*)l!Hjj`Z7HRx6U=Zk0vg0EFD1Q#zhcd+ZFirxZQHhOpMKl6?bEhx+qP|UzKcm_{$y9BE-KlTbyZpW*^?WH zV_S&v+Vrf9OpGi{oXl)YoV1LLl>Z%S7zQ~9Q&A&VGa@QcZe~X2|F0z>oD0Cl&cVgc zPvqk2Y-VH!T3#3g^sJ`Lf9N#v7=`}-juYLjVe)oiazt`uJ9=`9_1`02K#qNh0 zXRbhc`M)8*erGAAp%5!{L8C3t6KpK8L{*v;iffkxCF+OR$m# zjz!^`DobLqLY3@^whP6?u-eDNQU{dvwm}z{WHR$qk+EsFSpZuWz5#}7X0Zd^YjNeYH`2z}!dTe{(^)7>Izk|+R?;B-u{uu`p-M(uHNia@U)<1wB#6udQBSns8 zDh%J_`ePK2CA)7TZ$x+;aT;OZmw46afRHjDT~9Z%I~{S^ptqTo-}@ zlGxEelxuu9hB>7cLyB-szPSVK&rWtv@u|1F@QJQW_x^MKegz4hwI zAom9oZG1AWl)>Q*gzD!F9W8#*Ki?M%in{ab?Q=2jXfuyN;OC_nNlBV%W0P_W zTtjItX$7uAw{ZWZ9RTr^##=PxGJTT?Jbn*r>95NJN34F$zNp+U2HMRiE!;D!*R2#0X5=5mS*)RVZImmvwC5o@ zGi(kP0wGvba529-E`Y*otCboin=|v=SOIfI%`d(zF-$5juvIvmY(bW?@K4{Urh?{>(1aCX z+!o^kc%9}s#w~5jK@B=qxEd`>ECdHL925v0Sok0tjEy^VSUX4;SUXXxg2&sUpIM8@ zmO@>&jTF2AOMhqG!g`uJ`OrT_N(}&PHb7D}9aY|y^~gWd02aRu2}`>IqvGbd&?-&K z|HPYP)YY;K`hRyE7Z1@=;6SX(vck1eZ0v4vT)eRcLYtQK2q@`A{r* zE(=?*j<>*+|8W&MwEay$hs|-aUmwp1Ir(ooDvN@9oqr!hP!Ly56@Y=>VqT$kELD4ZQ77#;^y|xCDmN4gncSVAL$6}1Cp5hBq*)tp zZ&S4Ap!3y;$BD{BSv&#VSBfNxrS*?# z?{A-;anBU)HaQ0{5dfau}` zM$z#vqXYA22?~u+p;J~S)yO-!;8tJkyfdUE8(INtJ=MUqtIK*| zU_~XG4+|5+r1$)8$=E-fLR9o+slc-6Z=Rb=MjjL;GwR6%%s}t zMT4{sr3^q6)`_;V9fQXo9_Zfm`v^_2lhbHwTGhcjLoCQS?nB%iVsn=VVPV;h9^K58VLP-ksid{iA&Vrc67T?sP;+ckK_va61%(oA#E-GBG$-hd<5_wFF89t%1tM13ok$Dh2rRW&z(Yq z!HXFRcxY~$g9wzt1Jz3&n@kytXh&5gGwjLY%#?BcW66tD0SbpFUwboU#Ti^=%SdgJ z@iL7Xneezhrd7@=_#nBAM-sstrt2G7ZE+dbn$HuGQ z7=lfSF$%aRIm(BODW>k_+~bqvA6pv70nf$Q12_cj>(ud%!_t=ylB+Jz&uUs#ku*S= zbMQ_l|8kgzI*Wk%zl(~?L0ed;HvW4g^>fbI%|fFKbqTs#n8d( zPi=;IPq4g!Linh} z1r1Usi4$azPxwSNFOM=$&hs}RV!S*1_4$5#lZ%~Lrhc^&|L03?lI8-w*$W`Q2FHI_S1oWH8pK8@Q4PvR}LdFfEb_eF{W z;u!Q*>Q||wsE`R;?m!;Pw44j7E za?b$a&A?$kK0EemJ7CbSGZ)ahRXhkxD%N&VJ>`3LC?(XW6_p`9X?StQ3cIo zd+`YcujsQG`8sk3nUm9_Du;V;gD=5RUXosES(l@pG6ywdjt{F%8 z4d*-z3AmikF_`TvQv}-8hZ2IX8{Kkd;YOM~4v{nP3NP%a1bo8J=OnwMVn0y$?2y@K zj};ga*nVI6wg7<1t=*sZ!K;<5SAGY2xgCaE_Yb9TqjHpKVNl(2Sa7ut~}=i^;O?^ez5wpAVGoYsM|L&&04b$5Try_D_a zrPbm|9^kmbub;$vcGZ4*r(e}zEXpayqpxg~ljG93 z=H{#Tc&GN&`g5JEYgxf~xV@3_>JD5hfw$^$=|}ix;b76_^?Zu#(DPna@m+vEW|-%| zf9CQzXAQXVd^o9|`HL>s+4o#|z2cyA=`+^t1K6wOo|)bcfAE+!Vbywz*jQZ;O|!L? z!5W%Spu15^$s6B*=!>nJ#~cEtEvD}MI{y5J^Zwmh%CPHX4P-ZW1h&3FOxI)h*}OvE z>~~Rq5&KzR6E*U&8hl<+dA~crNKE2#f8ZJ+b_~ilj~7k(2A8OO;*?;mPDgyb<-~2k z3n;=j!`7H-VJOOgkH!8S>Sva|wB7amwuY%Gp7G);ez-3xe9V<$pymf_AT1-^WIvJ# z^6kERCj8j+n{wRye2(4f#SKu6DHEl4@!%xqC+shtFe;B#7o_#3kmn6*J*L`AIUG)k zA={>p-}F&!Vi{C)L14MaiE;hrSXPs{0N^{C zo|ZZ&)vVjB!2%KIfsmR)@kg_nksD>3M6lOTgG=~6Hi)CuL_jn*hkP2i4oKO?gw*n_ zW21r*eBNmDF*o1curf32vr zd}Q6`A9e(a_;e5nKJc@2>L`4Wd7y(qOZ^pSLELIzcJV{Dt8n*@KIcM109%6^CcOG0 z!$K(XtYdQ9dLJDq$|?4*`{q;f7nY}?-KWzsL4{HTAF|)#Yu+VK);~=?X^u^>ZWEXH zw|}=1?rk$S{i(*3Y?wkeX%rx{^e7Y@rri~8BCMbvQ2)HWnrK4=04dXtMeXNOoUQ!@ z+bq-`Y>Ts=?#aj4F$mPo0gN_X?kQaes0iYRDL|_!h=3>{GMlTEb=JD8^SqtcT0fy14}uw4q=jQS<71vSa^TkTd$I6=dTdhyFVa}`zU~~@0?vt~9fXpDh`pcd z_?9hbmavl#k@7l~ts!t`(*y*m(xJR4 zJwdBESI0QAi_^-+>rCLfD5%v08GjyO@IbaGR~<6?@7*B+6N&BWwnFnkw1&ym=w63y z)`YwRKZ!_cAbNTp#Icb?NylYacsKQsqKRl@q~z z*pXSxA+`_ofOOBucyU5<)9ig3;uuUX3%FcFlmsGmI$s3n0FGi&%gDgU@E|l z;j}EXhEWMz1(dFdDpp_G*!M_4Na!;?@^V0GRwMSU{bLJ3>P+g^Z+d;9XTP z?Tppfpa8+tGh=l9eHcrxjim#;IhNQ0_D-}4hYMb^(`&Z9A?7N~eWI3apw`o$tEp4} z*zM|oy{toV>Qu#du)t2af^|N2F07G50KH*VeDY$I~!o@0JC;9s5b^VGi zP4SgTGq_Lrg)Yq!U6BL)D6>##13Hz09Z&z>mTl=Hb{CE9A*L;^9jk^aJo`f+Q6iGN zHnZO@l09cB#(&i3)HK?E#!J{VYsDhyKjauN;25w- zR?S(Q5M6&PHFSBv!Ph+DS06OVtq)GOGOr_Ls;etC|4K6>(ixERwMKxt6WcYu@Y80R z*bP3Auvkj+J8|K-{C9Kf@47#OyvpuIyw>heV-pM zBAz`g0t}`e6Z4}=qBDX0eBV*O-j73&x~RsHr_%={=@`-4&qAb2(341I|7h>Yd_ioX zYmj6SG4I_CGkd=K*tD=dOuz^2hl?LGhl!w6kvm}6EWkPzl< zmZjXCvWH+fnVc`W9~A0@)y<#Ql_I0cbeuobPWR!4rl#K2Zdm(orcR8A6!vePiGGDN zG{1!)E5}=8YHvNfQKYIdKH%`}CZ$O?cH_X!JILAYtFUw;(Z<`1HY8bv0b`-PubC%I1vC9H7_jx^fN@@2Yp<+%YfHM3s93VGD9e0*fx z&~3~4wN4K)*Lk0N)I31whhw+;eQSW97@2 zltT z;V|1_(J`yhlHBq8>`@SM!)86kGhP`N@uSR2dB~Ytr`=$E6Us_=yNAN89J@#(L>YJ_ zN5tg;UACLhNmaPm8sIBDxHs(Lyu8~`G0iDWvyuL$aOIWa6Wvz#&!=LjeWSZzFX~nG zUiw*|YL0!-FuOYAvv`BA7pcY2&VCaFRApiLPJ@9WvXk7BE998Q&fw&=zd_}XTLD}; zy`*&?j$Wn~7n~LvF?TGgp@RjU6zguf9@nI^sRMbukaT=996*e?MIhEl(vpHPmzT6p zGZl3EhmAB#aZzZ6_$-^)5R?RYd@_RFWiGC3+@0GUYu5&< zG99xD@>I8GVI619lgdw{dIJ2s+9cQFu_?6$MlOY_fg^`TqXeFj*tgMt^aWU<&H0I9 z6M})77{W-0UxNHi=cvgV)#CK{e#cJr&I+y&jQ(K~6o9o2cKEX%=b)ZTU@Q^wNNleY z78&<%6aH33X5jN!XR=>^S)5z7pqqK&wp3$}bxnY28=xyNc&C5wS;wfYRMwe=EvLKt z^!?hU^tv4Oc>DWvhJsMId~-0FK&IVLdznstvKD=odK z`#7Uo4wzZLWX~?v7VL2Lz5H_BTWc`ZM)v8hlCM*YI`V4A&ggk7aK8^qaSPBj!&mWb zXh1;o{jX>;(4rierp=)_aW%L2+P>&`GJI*FS!j8qNl(@eUnu0MEdMl{(3FFxw$j>R zpQlpEwgqZZ8}U=@_F!?;0^;u?opas*u}<|O$&mbn(gy5_LQ@pM*FlnkmUk> z2ge{yLoF0TX}@0SS!-|b%ny9M6pV^6$_}ah-GPk`IzvOw-aBjm-jmyNzD1Mj9xOt+ zc@49f?uk9)f9Q#=f23Gcm|Qxkzb|qo9+ejOcEeV63i_6%c=t~gy@PLB$yD6L#o!?a zWYQhfuzvKUKohkY(MW`i`V1; z7l`ya5zwZ{&L7aXMv6-*!$XNZwgr04cO9VB)(($>h|8}wP#>Es-O6t0tfSWeQqr?z z)zO+$FgaGuSl*rYKGBy3x%Xf^hf?(Q< zVq_mWtk!r}NO%=P_n^b)(f2n4ylY3YGHFs?@V8rX!5#1n4|?FHcpII0`9F7H9D#R( zctnnIeg~7M!WJ$aQ|-091qbRf1Or`JyD;(lU!jqE4sAyB{7xwv^~4n0C1EK zZ!Cw9;{315Rqqy91i2G{SdRJ}fePhoP5-e4zbSXduw4g_IwK(yTU-oTAmn)bfv48H?fSSDr%HP-A%*o5;=^x3Za`F_gK&btt%{g&8r z?9;{Fx#LFwJF{Vr5Gf20s=|wwgg^`! zSNswGwqQInQ9L@!Ie0P5*6(!NuU7gAoCo6Q4~}$=-u~nC_mr2j-iOV=-yPgReUsc9 z?>P6_>eu_kn$l4zznqL@$OV#`!#1q2DFoEN5xdcC544qSj5No?*jihmIyB8+`JXS; zlxA{Y-xO9w1C%y^1xnX3a=U#Thk4b$2(kKL=frLpVk74G67CRbKe)z0+)SM01&Dm= zR?EPxx>WYgYA0HbWHU;$z&8@)X)p5*Vm{;8r~_s1Ey^VtiV2E+_VL$D-7L4pRVgRE z4$-YD6K@8sJiv3_s+&Vh&^MO809@!=-gKkGXGhQ*9DEocMHqK{C~YPjNbf>cN-ISd(U2(v+J+g*>jyTme=7Cc!j zKNMQukTrk830gi82(xEl#TEik2wId3x$?y7A#Fen3#;UPXCqkkYuQIO?TwU`{B|iVZ?+@Z2rmUQ-h}QOxhTDE3+-uyn#a}0{#Vml zjMGJnj<;K=Howb@tMZ^wzTZh=Pp^ z$}tuIyQ?Ty?m1^!9h2&t0cEN2zv?vEo!Id%vB5mkI1&T&2?%#-D9 zT2qjXa;J>4XG^V|&AkvYti zwuOiiS^gw_VoYGvBzIyO{PlV}A)2+vIo4=4e7Q!gDX~RCZ|2O%pY4J05|X5DVjU24 zza$(Ia_CAsk^XSkYU#@)NfLUb$_p++$VA1ypc+g-V_a5thqwHE=Zxx3bn5En~6ejIFgtiJ^VF)-J zX7&+|GdU|Htb7UG&@d?7^2$0u2HsDzU+T}l?U_I=NsGTCio$$YgW0dt>3|7Rp8}Ab z|Du1Pm!GspIiO%40DgG(i3a9;t5WCOc&mct?{XLm7GYEl{Fn+m=zV|7DNBHVO-1Vb zchgeXRbB?$?e7ko-|do8yhkClga<;&!gRqBaS;icvDl}AcZuyTg(P4onag*^Pn1Yq zW9@t9HP%~$?7r-b(w%ao-rR$dAY-`wn4|mHUV$C?<1_l0WBXXYehovpQh~ZNcr@2p zdhoe!0Sx7JHVc6!R`-kN&!MNwOj-Jgitf~rrrb+53N;QR8T|kn`2wdQ)UCZ^i-nu6 z*=?VSd_*ypdZ8f6EjfTtB{Lma?89YFyn#(ko(i#BVy_D&1tq&wbZ2YgQrlK$PzvHLet^N zrS?*-o9|Ah#!~!kF2c z{$Epzx}| zEezG$)Cw#<^ffyu7=? zwcbhz1dJFc$%@aQCrzZi=i_>5a>B+GKZ097tXMKevvdU#(Y)!uz;**6bZkHX77sCK z%7EROJwaBUQB%JVC$9#Pu%eNrpA8%*UnD$Jn1 z|JLqes3>Q;Ok!HQ+DPB*#NZW=T3@!2;gVF=ohnfk zP~Nl!Al>Jy;;YGPBAi%Tr>&`VPg82FCU0q~e6~nh^o0hD+{Jl$ES;X6ro~t@m`I47 z`CD(1Y>AYw6Imz>9a?6>vo?}I5gd~EA^0(iPB*@-hiR2D4nb1DR@_*+57K|p|`tz3d7p}ismI=&+s6;qq7uW>4SMLo^e}TC~ zn3Y<%iR5PgIkzY5mTdO5k8GRHR0|HW7^dGybJTLKt8G9oiHV&EfcKMF=Fgvs{RS?H zo~of52vux|eq^=)^@qaboubAac#QzecC9!&)1>MP;TTSIlJ6KK%hfOpM7^st4S{an z0M*9TFbiR|%rOSxOQ}yhr6+9HQ7&G3WN_E>4X&Tc;*4eXwH!=h&>?LE6iYTPNh_)@ zUud*i`8{DN6x~M;V7$S>%_teal8hpH%*F9BZ|U=;JrB2 zuMAah%0(mU_-d-@%;%Qw@AyHMujCKv@uuS$@x;vgjnM+#aw0cZgfzGdr~B&ZYUahb zfgkeR>I6fJOP6$ODO4&|`#2U1g@h*?fy{Enlo1WJAtgf(IQ7M$OhOh460JEcg%SnZ!C7v`Mys%lUB0>O{ zLArU{=7a4O&J+8QC#tU5G*$-L22CLjgQ5x&kL;kHuk+i|e_|dA$q2ElsXY`;rojqG z{}T2;+-i6RL^cUMYG~=bP6WC4Osscc&}ehbGM6xS)!6LU&4CH{8N-rf7@tT`;KVq- zOycl+fLziqlQnv@FwfJ5D$_FEU!}2?_)i>xy;<`-Upl8Th+DwQOrxK=oJVPX3Ewe# zna{|vq~aP^lNiJrU@kM42@$4H2I8&yt_1DbiO(hh{9Cu69R3Rujau5tp&Q zbaO&}gAep(Nw05yxcQ1nF+zCeId}s~5QGdA2}qK|;zzWS=gEn7Lm(xpcR&SLJslfS z+5OxA=Au-d4QT|3RLzY6=G<%`(yb_5Ty&1Ql)^NG7y0~~e&ZKrVPr7tW6E6_>FDzz zMy9IlVheoGTannH!z4rJ8E%;NSoaQ0_8c?riDq0ka3arx3d&L1m?ZP$sf2s%O{{I; z8+ko?w}P2+&Eo(*j)*JtyL1O^_#U~+&cI^8*nlIUtpUaKtMX$ zoCW)cT`iDE$tg-`!Az6>oN{cpvbbuKj+%rbT+mf{C5)WBn*LAxuDaQPAvTsa-i8{R zQdr?!-d1|Hposq||I=Eby-ggNLl-~pwb{zINKXS2KV&-;>o5rlbFd%%yCM4BBkCC- z?Fq_2C}n_fz}^$It8u8V==&%H!iw@8E+EFLRNv4isDZEa<<(lWCl)6v9eu@xT0saS z!lLNiwDAx3)OoRNjDTA^3ukMW3e^7A=#X!=XSd6o8#1*9SD4g6=U8&p)`0sVeM&>h zV`r{uhHqnr8x;+_Lj#Vz%NVi^4gWO&y(u!CZ?l2>yQ;v=}5NFDV4fI z224Dn10rwe1(Io;j|J+%ecnrSl}G{zIf97VA1?sHv$b&cxL9Q=yl$R{1Dbz4*-vnZ zgvwYvOvJ6pYsMN~KD7$CnobCMyW|VSc(63H^~{=#JY3A)e49QBUwVmc&*vGSmmDmz zv|E{Dqf&^kU9TdJF})4+%dj+~6LAD)2d^@`t*Nt+_C-oxV!Mni39~9$CsiST8@sd$ zj#-Udyd*_+d1>i?`0VDYVvFT9OOPq4b@H)2zNMm?mL;jT5VTKh-WWPRRo~4jW>aE(L8}Qs8Aik)XIKekFm`|&(=x7SFZzSj&8nPiY63Y#jT|Q< zzY*566SS<<)ef_-xK}81jvU9Et(w_S3auq;Poq2Ra!61~@g7c=hB~o6UTG-xWqxPY zNU8~ZsT>EQ|tz33vVl!2zCkGGGrbiaZLJ|^&%Z!pyEr@#w@@Hp29^B7D;kfcf9S7o|8A@kvk*kvV zkC0EY_}4ZGfBGmSWs8M%fpA0OPHn?B%1|#~qa9NpZrGbUDw}KERZNO}N(I9`mF8*+ z`sFkU?G5Ivg*P2l7(DnN)aZC!?oG4RxTx>Oe;H8yH%NhTR_kUYrvePty`l;IXRrW# zB1s z@ZPoOYG?EtV z21*Az4JSRIsd}n%?!YO9DlmB2JkN6rL15eGOim8S9+}(CZqd!jXSx_I{P$lH_E(^; z50E2{f2}ViV`V3FBN1vK70q;me;Gp=Q%)m=no-2A=_ikvQDmwSitS9BaOdUOS}|P6 zji59{OmJ)ofd_b{elzl1cnlY6qm;tr6vKdmr(i4QEXMb|ecSM2P0tNLbT)Z_Bl0bk zLuFlJz!6#9pELslIkku)_gLk;t=qP14C>)KCw;C4L6;pPiBina)jsJ6{ZjgX~ z8+j#3;VH<*?hRAE*Rc;z=WbERExT)sQ$!|?Kn>2uV8L5$3mU%bN~RGGi`@6h zAmWE#=K$DMxa*F6dnYU2X_tx4_N7_@ldG-He>;27{bI3$dK58R>N^p4}(|oTA zG~bv^CSK8xnJWS;>#CX#BXFl>8CSYcH;^I5x+a26Cr=p%abby`Tz|t8$bo6a0%;CD3N3NvZ6pmyP<5Xn@MPX4 zs9i6g2xdY?T5rFi2+os(P^vuBeP-(-ZCD1MNaml;nHQrPV{6oDaeEl8-z^9vT-5Y{a)+L9fJ();LjDKoO|1BOBE4Y=p@l4`&5tgbs&sj6vsJKl& zmsQ~(DHfn?`kAheym#=+_gsXhpNcLZozRe(r+@Y}y^O z-5*d6kwnx)QrI6^Hlya299dyaAEIeZbTmxnsoROxBQ7<$A3ES^TmaXirdl&LfM zKg=zx*LbBe4UuPt-z>HDCww~W2Ce(!u^KXePvP&F`#Fn~j0R=YCt_UDq}$cZpRTMh zN15kDtSC4~xGIO(S(W(N?A6c5+$0&NKg6GbJ*tI^T~^`F5$uj?)Bt_6POEUo^fp&D z$n#j8HpQ$XHyhC-#Wx!`UZdO15N?BdxYeW49k#H%)L(3lHQYj2uB&~OnT-)cTA;rb zK|b3WQE@dQQ2e6`pdJ&?I00v0cOfg=uZzBJuorTKSEW|^?j;9ovS(#ldftEIP$||$ zJgx*IEx4>>o4iBungK7YMWtNUHVOnJPD?m@D$5td7IG;g3as2IwdSJhSN~@Dj)$)V zj?K#GEa#GF)(l;IZRXgD!#>g|^n+=+6FO}1@pzUAuUVOG6w2FrNH*6KPqrd{LIL6h zXfYL=mv<;&qtJ=lbSAHEr}e34e1j*icHew`A}j z_m3NX+K6hn33Q^X-@JwfLKhx{v9l^T?E$6U#xgv93su=F@{1?**{O}}9`!3H1T_!w z$OGLsLJG3tWdKB;AIAxH> z!djd6y)HK#oSS?sUvrJcT-fWwQc)O^16GU>>q0W=d~B4o@>3&fy0=&@-Nr9PBi=ix z-tM2(gR_zx%!7&C`U_P=DnuCtbQS@G%_ZhGs3 z0SMh_ynqY(9hf5sbU&XOVBsL)V;QBE5PZy$HT?yZ&vt78>!pydxbwf9>+&9rMDss$ zeMai0CnlXxMX@)}a2ddJ&4edyTV$Y&0*uy@G$6s2_gB&$@I8ylUxVGQMbEkL`+n|m zcWR&zJwI$I;yT=<#9=i(Z9Pj_XJP-& z;M>)6Kv4Q~FUAdW7Z-e5EsO%tjItgt)Fh`Ak~#*b2K4?W5W_O$<@Eq4IVdo&zR!!cG6QHyyv1 z4LHWI$BYJ!IgrO1GOKip(K72i2~l(Q-%03FQrQk`|5;;`_+}OArkjgf>#@{e(iq{1 zK{`h&05(CCTCgBCij_!23WcH6hVDGy*%qpg6Z2Zum!tO8bzHQ+QVwkH|D99m`_n>` zVhdP~Z-XUKRa0mbmgSQFW0qxpHZ<`0ygEgmQmtnSNPa;Lho~Ug8NrYXLH0G{!M*|7 zgsCn*sGe4B3t-r=lwLUUF}lV=;-W(rt*%Oz0yr8M%gKV74VcL=p0U!TzSm#)?7(d~ zn_IEe=qK+Fw+ZoK5$@HyW_Y(k>LJT7I`)UbF%pxNPH!>67C^{p&o)hE`*VTPrt#Oj z<$7AJSimU5reB#egdmv@6+-b_>a3`SHJduGv%JsQus@k_ohht$%Zmt9|LtsAb(>Yc z1x!OB#WkZQ2WUG^Z=I~x3lBE?*DS-cV?(RV^T-pm?k{R?C$mZyiCSazp9tu+Eyv42GA8(GBQC%{64??U8av)rbsBA9k| z_KLe>E=C9J{sNl}m=m3Ql*g7qUy0p_hI6D;KW{0+Z#4f03@w7TZ1*~7}NG3b>|LRh)tyds}DxjbwW6!E%nXTQxPn^>`t zp>mu$n6Gdn*r}H6i?nTxz^a@+zBWp47e^zNfjRsN>qyq0yX8_V*_g)n z8F-tkU!Y^?d{3x>=d;nruVfQ)s@2gYMy_Q=X~AnJ}7a3 z(WtOs9>x}V`vrb63%-hCq`ziRBkm$sFg{-73&c9=E*fH$*8ZYE+CMo0$}oTl{<2m~ zyL%-8Y0(iF_zwq{z>5qXSJYpG&n6In@44_-;W4}wdw*EqO5gy>U4%iERsJF{;~Bsg z1_*)_cNrK*2^5MVX<&ZtQmRzLZOp9h*xiGG<*`?$W z9#RTeucQ%+=8Q4X2rd?3wIRGxk^t9|=)XPv-04N4ir{U&@9w3|*bqZwDAw{~jJv+&kbGQR6<7E>Q&ipXfEC1jI|UX z&{%`vW1728+(z$8v5p76&G2YL^Q>Z3O4o^##MV~4gRL=W3!gp*(+se6xcMly7AtBF zTL)^xottAB%Ka)u=@|8Mouv4+osaDe|8J06sWs$QY6E|AD~KP$+8Pchy$QLM!zQwZ z+{$5-8jmRVSv81Txua4WL`xRt2^x3VqZ&UX|*F4HPNUpa085HpVj zluW`DsIwHiK&VMW1(eOIEMRY`_5vbj6Y(u$ifHZzZY#9`x0Ta@XvTFqXwkNEDlj$L zR$c{--2Q)ai?)^6K-)@epl!Igg|?O0K-)@WTZ&^!dqt5a1dr%QizstObSoRvAJs!)2vVvSmFaWUq5 zj`Fz`thki{GqAXDUF(TbSYKd?Tc2hDE}fnRPzpvQq7ff%62%bkK}fRNj4IgxLRR9! z^aTb>{@u{g44}#S9lcV~4JES45v<+-au8?*NAq{VbqedVAgM_~M9mWjK8COALw96I zO*4O`!cYbX?U*f9E{?V(uokBf5Q{W=Vbr~eS_L(@-2hUM?U6AQwfj%kmW*5Hf7ze%$M_dpkWk8QJ-+ z=c~<&eTaH;N_{~0?yKTg&qn$JU4y@pI`7ptU$zt6PDnU=BF$F*1qgq9 zp;ZC~zg<1o^4RH}cnF6(y%TsjcYG&O2*-B{U~BXIP6pZBq8mYHWtpOvW$KV+=7T#~ zl>^nePCU-Cg=}FG0QLr}lXQ6H#H`^c`0nRiiC3;H!?Npr(Q=x}@1&VwKD5b4FXxh5 z`35{JjN-6K!>A|XCXfhm6EbmI22P>a zg50W-TWb7Ob_02Z{kSupDM-j*;RIII&C2pH>Vt=4`|wQR25ggpuUshSILLqZexBt@#+5$_gO4nA z3@=J)Ab|Uyj%e{~qNr9Bsa?XZMnRa;MDzl)40i$@crHjw_$a`75|kr!_^jYxhAk7} zT9S#}77h|PSzzq)*Z#naNC#vp>VXuh}JJyvF=} z@j8*$+yVk;%gy@)r{DzqyiwpzpWx!MPb3mfgHfYAXif$ni4!hpQ|f<9KEs$@FfHcg z_}OQ`93EVF)A-MKsBs;R&?~5M9k@58ho7^%GE11VS>H1Z&$L=vy8G-d>1Deo5@>T7 z0;zZoGYe_9P0k;Rv+OVvK( z0y%mznK0|~ZH8+YzJ!0EniT|LkB14$z!=N~von7UzeBf&tdg*2|Gm2xpM%fr-T%0I zxxdfd|1v@GLaiBAWS~8v=~6#Jo-a*Rp#j^f+8$G;sxW$Kq}w@FmAO1s6N!j@+?0H) zr>s=hmT{{W8y4F%UKNtB;iyEDzs;ED1&+!JmSJ%+e$hg0$=84D%YF|3d8oPJzwnV6 z%E_^#)=dXFU(^rB7C)g}$epVoF2%ybGBH<09P&9DmWh>)&}Y(ON5brix|0DUnag@} z5wo3fg3Jn77ztNa*%z{myd1I;2~=28;Cm6iE@mAT@@aS6M={oE1c|}VxZbIP+Drrq zc$YScnG-^|uI+!d$G%u6zi8h~B2ET}BGoj6SlBm@RUpI*RfTNpBT}5Rdq$&$(x(_c zBi7QKm$>!LL2ab=Iz!v88jCTmAk5EBQ^z1zF;?o3tk@ZhVzDVWcN$^eoCxxzr_Poq zL`b&W_YuFCuohhlK(55#nr}<#W>mB|1r4}e&qI21mXv?|avVw|5s=8Rw`F=g=*&i*HmY$D4+3nOKg_Cl_)w zxg2s5i5P#=llEe18X@lcwQ!WP@jTv?XjJ6nr7}ikuAFb@r!K}K+)0h`O;6o8a&TKV z+WWvz#2H^&<4uV~Aj8s_E>>^l#Uvh6?baC6rYl!=u>q}aINb%bYV?E9s#)*EoZla< zDN@`Xp0ea}kG*;f-@!V#p4E?CMHYZ^Obl6B)G2>rA`&9J<@W=j1*UOfEz9Fiffks? zF`pnN%8!H=7@1m4&cPNKnnD0MD9O2iWqHbI@_1y@Dv9VNW&;N%7KS8A?(fMS&3Oyx zkx54-x)hNG=%HuKHG{9e)l!V4#!`){A6l2;M*t&L$hJm4z9C#GOH{if7iKHQ0r%F) z5&eI-Iqp5RQP7^v#o&cU?BTqS-QOX;;)RLy$~R@E9pQcN1(4f7BIm zr<=LlvC!KzheX$Et+K-*O)Oe9y3(U3ac9XdS4T|a9FpJP2fgBuiL|i630(zkh9lwUj!-i{ipzdt9!^d59nQxlu;IX zzp6*qq+pKn;+_Y!~a$$Guba6a#^S)H=6Q$UCf zR_|?~^>x;Fmh)x!M)rAC_=@ZUi&I?d*u9m<%jpmX6ZdL=2rTaN9co6#J+e%VUaNl% zxa7ds8w!n(NmT+@+U!kd=p}T zW|j7)L$eKzkHW|TvL$Mq#!c$$4GQ>9h;DQ-C6mUG=Z~d1pE5+QxS3gc;WZhpin(+_a|VAtQAC`G zHa_IWld;YxAeqFymETX-Q2;E$X2ICCRlf+TiRPsIgT@AkEpE>@4u;8*ZzVY0ewHA28jv#lnYe9G z%CdXgAU`x?uH(w(UvMnsoHE;S{qmV`!Ew9N#?IJxl3wkbFy&pA*jj&&yqpA%4%xT5 zr{Y>r!aA5Q0u~%jX<^Ab=UeV|CEVqm^MdlT$4VE&lxyp0nVu1&+zT|VGIm6$GPWO> zr7l9r?HAx57opOC^COhp-&BwzRC;JmO%2~`^YyPScFx5;e5K9rkQX$k^ey7nV}Kj` zm510Uy4zgs0mex2V3vROTwz@_zDK4s4q}mAs)MfQ%IE=F&efjVQwtdTz*KNyKtHU% zCX-HYt?AIm;$AJC836Q&{+fcnYPbF}L`ig~LzK*Mc=5%(E8t2c z)xHwP`JFb=N;3t{6aitsE0zHv>ma_u^jdBHpYci8)~_1RnR0(Fgy1Z0a1`>+0a-$- zg@lq)rafh+QSnr%-LTT-Q?T3-X5}eZbn6r>xXYh;m5;%Ki0Nt>A9o6t7Sf%91qw6% z7_2id4{DVvB-2}kiiIQ(v#S|9zIc`&b6x#$SL4}po8F#&E1HCw`^udU@iqcS2&sGt zaEsxiTa*a3d})8M(DK6?gt*^lKA>sW!Q>)deu!_rnz zUm7yiFa14E(ztfSGlD`;wFIA9Q58$-K7!DDz` zSd8*+f&W*mFgp)8f%8qfk=`&#y;j{{D1Ch;Nv}6)8k%EPGZiWaU~xRih=+rXuvY8# za?vYZNP0Uz?&Y?~%U$cdXzliXRo>@yS~J;m*Pa)ii5_c;-^2cLnxiiTA0?(OX~gsA zHfXkc*G|^5XZgL5>3K#xaQf%#?p<8={|irPwY!r}PvL)*R$+kemtXwH?-wb>U}rO` z4L@R>20v6kKZBdZl|FgKig$=ZFDNx@eKpQrg-XMIzj)^Cl|BUm+jk-Xkq8VXlJH-Q zo3BE8lAg#vmZ|DV4uRc?vnyv=+BDbkI+!?~e>Q*bpZfgm^5fUh z3+4LlyVrkkTwPwfqrNI@{F}dB{yL6&_Al#S?r+y`|G9kmcJVB`D}4$KZQof*iyP8% z9`xjw1s`BkE8#m2DKkjx#l#;nUaH0{oJs%E{`D!n^826C`(|dp=lsuN9ncFyDRmvD zXE{ilw*4VU|qX5EtAEhvY zfO-U?CpB0%7RvI&h@bj}qGwl;_Ol%w5oR9quk%*}7`O#OGKK9{x$~q)RHwGj(P&YG zgYbXCObE+(7-v5cbK~4c-9}$5Ofk;PcI#<{gMAie(uXR(@X6&rA8US^BvtncfTY zla}5eO&91{GnBJJK%P^BzPo4cAi^kyovVL0!hxNuaPaK)gtT93tG@F9vTIw&(PT|$ zl89{~h00Kx35P8ZZJV#lYdJCi#=$%dLZ@SaWRKVi%<3S*;b?P<9k!hJdCQA5AG5wE z)9*72Qbv^Pe}l@fNQ$AM8S z2W;PNGuETODmO(C%m+X)ZW;>UZ8}?=|I}w{5`}%HCLbALY7#xHsafOmiK%%{B%P)v zkp-KYjNgauS8r@Umx>342exWD?m~aw^Hbe=n4#9Jn&aCzC(? z#uDrRd(&t)*n3$2=und4T!)7SfH^!w67290;b5mFU%v~8)x41~!mZJIumyj!VIxcL z)kwcWl}Q^nZeYDAD{SgzGpl|lKSVZl0#st5Ya_>)rb$o=)oqM>Ca;SKXGiv93C#D! zH3PJ|QXBwHM~`tVgmO>SYFbo0rk{f;9hbo8^n0D=w48?RibMGr!4+5z1_neexaH+? zE^g%kaJZF8qH!w|UX3lY1TKHf+=X98uKp#HHv+|ig*p{NiGJTty=yhbeN;iY3!P$6 z6|cUKhblyiMJA=}a3f=Y^5OwdDlVKW5s-p0xp)C`x;=aF;1qK|%Xw7O`0*f)o%5;0 z(53~PjA;g^o~Z{fcyaV-M8M32mC4a#^Jtm&EFT6qb`}8ez(=OMX4QXVFwHLBC&EQv zr_AQ??5r49OFlo-5|)VvLW&LU7%a-yx|*B0v#3Ic5|J~DD%b$+MHTF!7)1D30`y;s zz``25*V`o!m&7!piYr1=B7!rY3nGFN4yRH`fFz1POHwF;^9#zYD`5t?Ppfg2N%as6 zo<=(vgD{jdoV~d>rj$kG!$+C>Sm`k_K!JHF^XY3(@p%4kgtvM*D#C zi%3hWaj+)sy4sREZeRt&Eqbk-ut|nhFtCDQCBH0&)q;%C0GqmvsgM{nXi0_K;Qcj! zh)+G1I|33X)pigC_^g5t!j@l?MH>~MlhC8ZE0<8|2O?Z*%v^scp@@|p_qS&?sF?8F zgFX??b!VN_hC<7Lre2A$W57(|UW#An1`{vV3U1ix%Q4MM%9)WwoGWBjR(X98Lx>qK z_K>PdtILDV?mim^;zWmx)M7u9w$*a%ndRvFU|;1jFG!*-h8K%zK|Jhv+>m@Z0!5#C z+KLtCQWx?H6Z?Pqc$u0}Es5D3YdfLHT+g}RLzP2Y+FNwN`i}FOp?Txr-^*kOWWsR? z8*<*__n5q~nuk%P-84fJ$02E<3jtwDU~Z%HRSPWM5ZDIgb@Z^rcwl`Bnvx z6iY~hFIX{eYB5%qyf3{H%G*pj)|i4vSTzromla^~43vMgnTD~LcN-?Twv;OjlN>Uo z#xW@q&NU#PSQv&!>OL(VsO`V^;`h5uCYh_G+m2&YE<|O7@upDpM$gbo@J8LT9y4LN zrUeb93wb0yz8@j1dymGd9j+6E-A4#;gHl3=%~#_LnJi5@!DZK7eoZWGh? z8{8%)a+KS|NWRf+Vq75IrtR+D;Wo9e?Svw-ErE$LlOA3=f38;-oQ6qxJTxY0Ud-@v zw~5IS>oyVLQ{5)4UYOfNgkSD9F`Z!DCcbI9-6kgaa8s-6(2O3=M(7Y!B5{B-+aBj^^r$i5a17`;fIyuDWlT`QUn z`T^-is^p6?WNhiKnYh~*e(auj9uI6%@QF0cTZy(1 z$4<9{b{P$Ty41!o+ZWs+!c z%7l^NL=wh=6Vd(Cm{7zL%!GpKnwwCh>r+gqn${sE6!CoyPGu8{DHCr(G1zHADchFV ze?mpyXgrNbK~waNPo!nu?(vP&L-S-Qe@4G+!wQ`u{ac!l_W|$<|?-Pwn06V5cEN zfC+kMf;?9ujDnVo0X3G7V@q&c8Oiq)aTf$ug-{}HjtL(lR*F|)XWXVGH>hJ>>I_MCSH51mzI=V} zvF?y*O@6Fr!ti6g#&YMe?%dvEe|>G#_&MDV!yW6cl5&R}>+Uu~+k32MlIUZ-!t3*6 z9lYZBu?}5w{8-;Cy!X`8@?+h(6OZ*~rAA;h;#i5mNyj>|13uP?@M*_76D9UoXN310 z>(EktI@TFC_+yO~ax zT{q_=cjc^?>*hocd)>Uof9L%ZsF>QHqL)6K0k)U^J=GshM5SBej9+Xo{pylK^wMX9 z&62HK`*^7IHQJPpxkmjdpOBSV_B>Qzu>pQ^>}vu$+>#?5sYDXeRX=p)NRE9)2l8cY z^(S+y3s`6U@E50ry@#2Pz+KhTZ#%#}Qvfu&=j?PLK6Pv)lHiVwe>Gko)SWI%Fh6u^ z$3_|k+_8~qTa7R8*vLeV>e$Fgz8n*%VijW0mFpj*X1$J32P9;ZYqMv1^nS2Ql5A(Xo*$7#A>S9UHe`(cZC< zKi7guQ(FcShDj6he+DSWq)hlN9UGY(u^k(U@Tnaev3g;Zi$wV49UGZWupJxurs3g9 zb);h>lRT=?J>w7C=)N}bN8n<6TQCv@K%;xk4$5t^$kvS9ZL0A4+-=Gao$EGPXf~9& zO{DF2xJ?$BC{1pY-Q1Ucm)m43SET`N(`I+?ahsagc0>`seB6S+k{=C%xz-2J;QC{3O>bc+JZ&9+r*!1!6dow{%toV zdczf8Fk75rut~2Yh5ituKrsE5 zGF~Q01YVnbA99C^;+miL{e8~f3QHOf+zo(Fq-Ear|f9ae_I|3hEON+ zE+PGj1m;&N;KVA#MX1TOE$1_$Y{^}2J1Fgc=OAc&IVQsswjgm*2+2byge)#Mx~KTr zl+&{KKDy-e`kqhHrc<=|o|l{_xjNg?kmw#$&R$Axl-znbl}qlI+{#8{$sa+i@hKauDRhdqyjOIG%&5pDQ1KE~k+yl66ku;f@nz?Xthj6 zE@-Auc&vjcnHBC1rJph^bDpEegOQFBwOsI)WuH>ml+rqi_|8)^S`0F6wp}4(R!~y@ zHmZifXd^#(x_D&Pf20!W?@sqv2H(zN(F7zZd^>N(tS?;5snhbf|4^|~il%6L$p|*3 z8t~Hl`+JUbP=Ym)T-~X?25iN9{gP}nL~680M5#9!-h|mEL#%Exyp4}3ShUNqsqy_I zw-`!?w8fA@Q?z{>W$XFD&VjKRJzz&PX%5&CLDB>ERYvb0f3Ra#?&No>M8dT0&+Xr1 zZ8kL$V{Qq~kYo@nKl^k<4GA)Sy-8_jD0aV)Se%k6gw*!nXJ+9vu#Ar!&V|~8NqP{+9vhQUDb`y=4xqA9xOap=bgrE& zp5?65QUuw0?%c?ZS1cdHk0|G8$N~h=Nx1CD?qP?!f07A}kIC3k_uFg+uv00_cTQy! z?pn01nqNGO_ZW_9{~O|gBKoEd+oH{;9#X7YtezC^G;MJY>)JL8SO{=md3ZCjV94Hv=OA;hORNX zvkh%Sl1|QLs3`|13#`&5X^M_XKt5bW2A*8f)Glw^iZzDs_Gjqn@c@$jb$Ya+Qz=P{ zMo4E?2TU86LHss_bFRNpcua?9U4NUOB#UgWf4>nP_Ye;OA|`L9lZ9*#Vmn!ewjsG5 zG(IRpNv=QYeE>?8LVFsgWDm6tPSr@(<{~+C^lcAy3pOifSjY=Wa|4P9%FUa$4r|A&|G{@|)e=v1V3Ak!xiXXnn}$T76|Zgq+^nsLnD}C6=~R!NA!+)hdBwd{73b zK3jxt6h5@f(xxRXvmj{>Jq+=1sjm1TO<6rgdv9#t9SpU7_4uHSA$zuf+$emze{xIX zfw~~dp;n!cAt=6_iPx&rxP|kwN|51XC}m^K%22?nX*2_+arovH+b?KWD1SsN2izW)n|vQs~o;cNjDlMQag z0ywji)NV0Rgs6YyHOhrWKgGV_Dt}tCJwA2gC zC6zpX3iL`nK^jrQIZuqfxyv74y)N<#V7633&7BekrTB6a@ivMS;hM7rIx^e=eBnw8 z$!d9L5DY1;2sV)+#;>7})J&KJILT8bz+k!{#&`qe&B_{KqAjyVR}Q$h7@PA!Afg?NmMLn3DZvm5CB{e^u*tN>I}2>X+I~Pr6(P8i zj2lEm1{x@b1=d|6kuUNRmOV=bb`5ZgKZy#lLB$4p&aX^b;=sLNMiOn~0?4Rq28oh? zTDYgKdWjA73bxZ)Oc%9a;-%;-WAEDJ+jVI=mDX;!FLQ0g(t3j+mR1-k0_XL#B#dgZ zzuu^jTCEBJxAofN3ePuON{X-%f3W~Ka(yp$=h>8){vS3W|p4kv;?|I%0&{jAt z+eW4SE!{vT1Ly`9NZBAK&zFO$2*ikgy?v3fbv0;9&_STB7(+&_!H}UMAZ#DIQIXvp zth<2Ubd};I=USzB;EP;}_a=gMN^CXG)I9n$#@-RtfbAVm54sW=HJvI~t&AvO8AYZS zA~OUStwHCK;I1mbG%0dX12v6^k&8#P2~NCGSP5bxcgbtGLnpiyk;){zKVE}>r!<&Z z4;mHhqgfhm;GBUC)=PsOE5=wVk_2rr6398yByVOKj3kt4);?l68mJal0tAcroH5BX zm4VwM9;`tW_%5l@sp<&f;V5dvLdgebSc{e0q1MKz=}rO{ z^rQf`SiT+=4U1+vz&E|gNn?nFqM+p@-RES;PV6w#Sd*Ei0Z?pULz^MuZhay#6h_vR z79C!V6s3hvd|2OnBldNB*@3*)`JeANP3u{eSt8RG#BjpA(M~&mGDE1@%G=auQK@x{ zmQ*8EBz1#omuu(88|T^6mHo-O;Gvzn^|CNow#<)JpZ73Z2fM+DMW3U+;kJ}fU3hFI zy(>0V7wL7^5w5o#97&PqXYzWf-Z$8KJJ}LJcv?(Q&a>OgL53x+X)ms}psTxXNdrk% zVy+DpsY+LG?c&mZ!YE8_c=2MD7LU9NORI3(m`gA+raWN^;)hB@4&#wB@dU$0GuHrI z1Q3!1OC_NUFBCKZfe0EgWub zbIJx5qOwcsI`z?%DXP6{)!yZz5C!}&=o1aGKI2Z4{fTZVr8+{MPi!a5C?1^Hw$Ht_ zCfqjL&nK0`?WUre)NCwM4O{ItSFx-YHXMIpPhcpPxP<)1K;!Xw@Zsj&ZW6mSfk~W& zB!>FYa^P%#oBVEYb|)|YST(i{`cp_bJWBh0h__Qw*7#i8O=S}7%E&wR;vCEiHk{bE z+dQ;OM~gj4ow(9VZ498!XE@_g=Wy-(?RIh6v#NtkiD+~XTrXJ^OT zEIz*@I(d>}?5oQ7bbbuGwUgmJ7I9TEHomi+=g#zh#xU^FTmHK3Q_w&V2)pEOZZcL~ z$&)e4F{mYQf-r!q{b&q3rG>}*FYfLsIBYuYCbw+E6Sr?|pT312sxU%*f0 zuXF0J3z|Q(V=zJBOIC)o=mjt(k=+8vip3tk2mfukRiWnoepop^b?t3|NJ(C6hTFqf zTQk;wr{BQ#(7D5J_YGquk<=gZ6ZqIP)k$HH&g&X_ZuRJqS)jbqz`V3n`u zLe(>*iFX*IjPao;;j({{K&G=yU*{f$z(e-PR*BprCbzM0JF)zTD)&&gJIwq1mlD^^ZbRp z^Z8H?!eK(!3~A|4ot2hP?QzuM8Sb_P*saNsgGEZ;H#h|@ff=eK+2_<%MunA7M(DI5 zqE6u!k;j|03460&8^=vBf?!G)wcZ0Z1N9CbjpuON(E~Ri+7`k2UHQL{xc4%Z?j*W@ z(+53qbalNH-roh>*Tut)-Saiw__QErcXj%{(P1F1v>Kj`4yj5rtH)Kny_ypaK$Ssv zytwNe_-q)_ot5;OM_Xj%?22ya-*B(b6qN_KKu5W zI}30@j4Qw)r#)JM1Bg(86Z=YuM; z1XHPP3^?yqv9`dO+5V-a-=m%W>eGp2WTI$Dx@4v)bD4eAAbQO>D>N0)Eo> znV41GFMq?A_x~*HN*Q;0qOd7{20rYW=47OF=ENsilL_x50%bB~6Zn|=gKG-ePXU;a z8PcbA7~$2lz!^-JzgfKDON9ov3C(Wquem0-$PI8|{4=gm{#w#}NY$y-w*ThNk0-qF zb%tRo8McU)6zkBgSAlX%;ai}L1gdf68jeWPZchb2rYde{M~2jbDm6%d75J}EqLlm+ z-mx)jR6vbQR4qmJr~R9PeCY@Lt^GADPRWcbf_(m0;xNNWab?om1Ttj6nB6+@i5s!x zct5JSV-vOww-XVM*>aAT#R79|nX_2_7ND&SI~%=)pU$Swg&~8H^F}3$uE3d(35ray zeXksIn`1L{tH@x6lzSO}P<>iiA#X*#hAG#%&2TUhBi>55P@5CH0jKES`q1*mc%E28 zXEGQ&^*lStC&n}s=Pfle_{6vj8PaS%F~JezjUiY*IUW)36djMkPj_9OcYq9Z$`zKg zc?Iz&WWcj|#kmgIaLcQpQ_kj82oiyZHi%G_`9{z44k{X?z%f>TcyL?=kRuLPD27^9 znBs1QpE9?u_Ufx1@p_6I(hh%k!wu_Wr-V~ipl>>;(>Xt6SFB3<$y(A+;%VZX_N%8I zp|(lCBra2d8k*}VS-&K@R0Y98E=h`QA~wG?z3f9u=SJKP6un~Y=W6HzW|4ZHAU)O7 ztJv-(5njU^w$Y-0xR6_{JH!JQnRSEd6ldTfvss~La_Ys1a89RSbe(ve2*(T~ZQRkh zd&KDU-st^I`FMCL^W|b0;R_ZUljzOoIe7?vz^1b8|k16@0%|w;A z%z}{}#+SN0{*)6JCE^+4DwYvm1Wrs(7gvGDWO0>@92eJr9Zz|2G5Qp7Wni@>!H z)Rb)5&Jb9$@)QA46SA9nnA z$;sy!Qo+a}#!DGvZ0OtD23<w$*Y{qz1%Fo=QNZ@>WYC@yhIbE+V3GAHo%Bo5Y*9lkW>^dreP)*911fLVKvq_k;lWy<2 zGSg@DrkNJ8DRqZ9y-Db^^Sc7XSow0S4lxS(Eg8jnb8Nv{E;&TW$q_}#{3#JEjVLOy zj;H~UdR7fGh`pK5*_l>WgS=if(mf!MtNO_omY{wbX+l zy%GE4?-9Kb-G2D3aAVOKw8trBFEUAtgg--nSt5P1Rh*K$40BGJnv=UVY>(#(g4Nj0 z8ONKrr=!I7xowCTqeUJI&KRj;D4o5%&k}>T8=1PXQ}^eUfX572-`8j4ICj5|H&u?= zT5h>y9Om>5a^odKo-s&DEymdh+csn&hWx6(h zQKo{H(`5OyE$D8V3^H=lWCE@)zIfoieaY4c1&c=RN~c&_O_|dckvT8Z_YK9iviz6} z=%mQnO%cpLd@v^@B!sv8yNIqM2K&OTxO-xfz*g?j9aL=|6`Bfb1l}nRmphS$T`|dx z#jcZ(nh}mz>W1GT9JepXS6BHdc7}(40yDIb_F*1+ofhILIxWWIU8&RQu}A52&HoTK zMJSvW&-Wybp~mzX{Tjh{%7+sbUq4bv>0vr9Q%ma(VYzL^J_X88(G8xmSV+82-8UgE-GWmsFi!&`3$?y%iLx6`LSWUZ~4xOB<&-F zF%#S^34D(o?{Gq_D~A};)~Grh(h5mDA*9_Qkivos92edHW5+CbYGo7{O*v*EB=Tgygq4;t(=69?;& z_ZhaT!wXFBQW&Sg;`H!7gBa#%@6<=y4qKu(sY{(f&KY6ZF7+Ckdz-PnmEO;{IWq=# z*wE*v8HXt?dw1B-?@!5pkcT7=CVCeez4EI4aX$DG;$6A>W+|z!L`o>$Rq(uw^h>zJ zB6d_*PW$2+o!Dh@_geC={yJXl-rU3l6VLk1*XqG*XCws zzIZ9{n6~%GHb0lr%=dr0!gk@8ac=<=lTU#u0XCC)ftvz^6tfnB9|I6B z)CV{<_sRnRA(aQXw@g&CLWP_R5Cxy=%BhMU>Izu?2hCnl0+(_30TYsvD1SCEGd?~b zb98cLVQmU{oaG(aZXLJvU0-pZ3xwvr06_s=S;});1MPd;1SpUeN&g?{;l2&;ETm*7 zwPERM#5p|s${8cdjQexL@b^1Z{HP4Q{r%^sU!I7-2*V#DZpBAR{bzb(@b&iVKcD{k zhugnlIYR_~Wq)Bo=AVjtzkj1lyPw<+YXP3qMeNhTH18t(b>kK6aJ_ppvOP&txZ zDFYny3&rQKkktzn(#$9m;XMntj4*@&7XT2h8@~^L#l~_c?9Sa)frQ`+(+I?n0a6rc z&X|cnH69U3Fp*fZuzwD-unkpI3#G#47ctld?x)xg5=qwycg(e*tlgeH-ab&-?bDLY zNaUTMLSjzCb;`HNgoWG?VJNTZ>{O&BoZ?6jl~h`*_1ES7n9`(u_~U&T*=7)>$Y^?& zJxAI7nGM;cr~;TfYH^p__)u)w;EnbXb)EdSb^m#!ecp5oi+}2eGkJLJ5+ z4bT(_OTm~Xo2Cnj5zR!$>jo}Mtq#eJM6pS~KOt~Cs9L%`h|@_=m`Fr$GoWz_B&woj z-(TJ3*Du}$f_KG;$PM==nR9<{Cg3KEl@xKH%JlZD@*;^8xN4KQm!wnzA_x=BL6)_H zsPTe$z@!$ewSO6%*MN^TLX*g7^4fzjh;T+b!y5y6Ha!r5IK&sNMGdCC^S(hUMqn>g z=+2~7MJmSxBu;9L>G(k;F-ffwEpkZ9wtx&X+-OrA&Eki zP$N(AAsRlYJ{V4q+9RDxRxe7_6k%Z9q(xs1(dVBxg@39j;~FxB_{F2;8Y9t#zhTlOseu&|kc00_C@Xi{oz`Zp_1tK`C z(+G+e0NH4;LYgZnB&(H2Bc?$EOc$A;`1kM_Vs-&*${Db+vY5-Ah=>P=^q*m@bZbco za(|0b3eAyZfxtc`$DPkEkONNwi0}t!7zPS1n{-)&-b+I`X|Xc{k#E+2&F>UEvWb!M%}7G zKu@5A&i%Es9xe!k*Isi*G^Z*j(G0Q4S}2xXpKM6RVe>B)s&)hcQwV8rC%fe@F5FaC za=3w52`(9wnOM}c4>#C3Y`D#TaI3RhC$#nJ%xL69oSZwvpEryJh40Vs&wl6dGk*jR zq2Tii{4@RB?OacbHnC1FG} zal%0dDYv2xy=qH=f!wI^jIBFN4fN7VY3Tqy#MvdUYz9twe-qvY#g^245CgBU@5yrC zJv+(TZJ!A02}T>$_k&w*0OM_!x_>W;mwOAs773+y8MxFC29j0b)~b9u7K)=Sy^~h| zSrgIPXtU4i0J?>DKmtkphKsI0a>M{eK6^iizO-o-Yyw#?GmX3LqJd07+e$Id4DNvm z${46-x;ExGr0O2knjiZ*v=8G%a&TRhPgv!CGP@(%R-XFharV|E3IomC0e_;5)>L=x zz=Xq_ zncL?Mqc#`}g50^-m#vwNtl0#3wQR$U91$XDUfp|#c17~6Ll1MH7qV+F2k25Qi~))5 zjROzE$fnL7=p$?eiY6=F%hCpA=Uco?HV?WKylFzH>i1KO7B+XeuzyH1tlST%J7HaZ z)hf^+=T;^-8)1aXb^j=HSMgW|Y!XSV@9g?!;H4-nxWuDL5$$X$67C5wNN{Rok_Cw= z9pEf=!h@->+IgGIz3t0`EyL*=K+sONl5Vz+(O zfj+nHwyp#0H{c_FUo-N(0|R@aIzJ7Y7tnk5OTMxB`8TTQ-&o#U;c@x6)LB{<^x6;4 zEPfC3e>Px()PL&-QhY24k!sSSjW*>dSz1LZT8yPlQM8as z(ymQuBW>DzpF3klGxL4@Ua#-#=O1UDd+u4D_5D2O81&l;gN~nBHcQoKoWhPdXAFy; z?>*YD&!yR{iyqvYv$7-O){eAN+_dIpBkf~bZhU8E+u#)~oZwrb@%1{7>_c4r&J zJ9jGIsG2--b-re3MAv3v;p@p;>|WuLWuAt*bz_uY*Ylzi`^{N*YU{U({Hzdl_l1i3 z_xyBjZDM5UhPdc=j*Pb8k1wqqaCk*)lGhXE3}#t}L4l6@qg`V%?{E3Yzpd@)Q_xXt z?qqzm)WN{YqvKE6Gm3hidzZHNkp(rSWebuBQ1xryK2|L zWxmM_ZHv|O@{B9aSN#mAIdSLb=cn3-)+SBW=Ooy$=Pnwsc8Yt!s5Ull^Q)MRqvLu0 zPR{GLSbC%+T|MwER%h-y)!+SBbTm;CmgMB0{64umcGN>9{Gn@X7%OY zI52srgL!0pNLNezH`>`?Pgk$$H%8YiVjn!Ydg+ZKwS0whOO_YgC4L>X@I>>Gu&$6f zne^I`$5Z%r2k-BBaqZAVt~zn0ZLnJNyk_TCO}DMIqWTC z+hwZ{Mc%hU((H-0MgQd-UR7rHc1;Y=F8S2P5BxW_zor=v30?mFg@aSwu;{y#fnTgD z1JY_}iusk@<~6i@@3(DUKbjTF?zH=))#VqgFmuo$n1!!eS}cq|q9s>c*--&n>6Tmj z)+#MZ^cuG{DSZ8_p-G&iqOz3h&cPFM1*-2)=C6HZ`AJYW#lx#MB0SzE-D|Xh?e6o# zw}e%B@Ne!};qNyv>)h3_l-qX<)5hA-FOSRf4ITd=vh0dkjmfM*6`psRt{AM3-F5gx zstT_<#3dn6ef_+p4O0s|8$Po8&X{Mj%J%uO!5<4owvL*%$Nb~xe!*3zE6p^9>J2iv zcjj21k*kg@a7|p5{Q34ue&*&kp=TypK|DZFuY_z9QmsYEgv|Z`((ban2ryeT*kj1{w_o;ljX$fbM z4R3v3-w(=fmTo_t`fJ~@?WUu@Pgr9fSQ)krzIdf<$*+jn4v)gBE*v~=Ju|)U7w6+M ze3wM5$jB&BdamS`?RbN)yVZ4#6%oLoTYQKmp1Lp5yfP@IV4?OG#iu??_c^ z+oyItqK|%b{y2kCYhq3xHct1rHRAxMVDQ&#e`Yy9ocJ@tuQTO`vT0d5-(YFLGdz0W z?cW-iioO$yYhP<;dhlnP)faWGKd=3?uK3uwGG57x4E_@J&!4PS)0p48k_3YOanuQ? zJbleVyk@N&FsYjTOLx~ApT(I=zMIYcZe&$0@ELP{*|9TPWA&7uTxvaXv%Zot|9F1& z^#O**4F?b9?`S%X$JP4PcA!}v{H~&@qf%jM!J?Mgm+s#q=vIf`nD=kGZIhUC$@tEQxyNfuVy3N9`)_Gh zSZ#@A-_msc?!v)JTUn_VVRvn=HuqciUL#gdUB9Z<)iQI~m|sja?y%U9;a1;&oM7>H zDr6WQ1rLi^c5f8pBvaZ`;Pt1#`;IEc3rtK zV41Q;?9f4fH{Lh57`4M}sY&O)gJJf``R%uSk}mI=X5adaUy+}`WVvRxa-w2VTU4rU zQ`OEOT3hGv0Hp=Bl?t!Jz8ijy7*)Ca?aGg~-TaxO>_)3wjV+x#s=V{a+{K&ppSq6Q z{GP6R^xn5mslRfJWwtBN? z=wD&SDa8i{EIe|&&8d8bp5g0m_2T`u+TU{jxTmk1qA^D|y6k2Asl)U*W)|Ayezf#c z+-Psz@mGxbr=CBVp8w<~`vvvP{XqJGy5PG5-wm7nI6JtbBS`DvMgyH;HA}f(HA70z zq9OLG+^yBh4D*l2^fIf)Oc?-Q9<*M`VRL&$3_Eh9bFu2@<{3}Ir%gJd)c>lda=OK) z3D%F2^1}Z!*RCF}+p=qj!O8Z$!w8_+GN1ZrF|NUHZw$YOA)m z+T*>o6y1Bby0B9?|L^nuH#}?5$@d4958R@Ceznh86773Xil^*(hnR)$QoK<+mf603{^Yhg1Q1@@E+EHkM<{e5=f4g?uol)Dzs_y<_UmHWq zy}>O^uo$9lq0%q>o92o9)yEdEwwm|Dt03>ow=JHh*H3XQUNqH%a^-K#HzS`X!C!f~ zs$Dcx}wK4a~h8ZHcuT4%rRNH{1p5f%s*OwCqsv6>>E4d*^i5Y3nLv24fPyn9NgeWE4;V( zCjY=AN|oY#x9Qos!4`-SlVbva-`emovl-#OY%iKI2CmaRk-n;HqisCgU!`kxO{OeBB^s7@p zbnW}pp4NCJIW#zMT6bhq0jqoU!Yhu%yXN(#zn;$z z9_h)9s%rUcp;WKVr;bDcdk zsPK8H8h$!(y??F3drz-ZAr6(vp|tn->_g!j{lDXl@*W0@1q8dJ_t}Z`0o?p~FI%nE~7>@}Dbyo%6 zy*i`$L}T2Q%Al1o!yhfv$w!uUnG-FOqh5Gym~9!IyHhbPSut=#OSFcr_o72jSubq+ zHoEu!x#!*W|)HiZAvG4fA9|IO^NH=8WlSLS-`C`(utPKE?W9m{T>N_x=v@#+=#q0Zfp8m2$$ z+eQm3%)3?H`o}0ur7NiP-lFVZHp?uR3Th55rx^*RZaN=Cxm=8zKJ8RG#y5Nt+wm(Z zrLS46(b8Qr>Z>O?CbEN~zM95rC;EOsap7SOA8dm@2>i!=h&K7~XvR`KgDr-iu4fK@ z6>#q4&_mHjk5JtMv{mM;+O+TlSVE9me-QFWPeZ`FGo&1EL@BkB>R&y!nQ=)~@c6Oo#ki<1-Fz=##JJwcNP6 zaJG8E$dQxgR&!tOd-!MYhatBj=REdv`IPV6{#O-nXxT5PU-6tMgYNlpb4QrfhgKM> z6Jrf-%o#rILr7t3E?SVjbj(kS0?xFB?SCpx1p20;UH9+B&+0P#RAJ#4Y1aJdLbB^< z{x|dW^yLLx)OMYZo##;)-ID@$7+j`*9P zSg-Zg@|nZ!(1R_!?#`d_+81}Jp4qvhwqpGO^I>P)mW|uJLvNU?@}$XMDv}PC7Y}u2#Ba26vtleIOr)KvpyvNkvy>*|P(Lvwo6U{ta zU6<}ZHEqBZ`=+^v9!;9)wrf{(yGq{)jQv}t*L-lUzZClFp;y{)_SPipsD_{JP7E)* z8=rU0Q9aqx$X`p>_t`S=+LakjzC%}pK7E+mm8(*|$Dn^>Ye(VGeluGC9_b1-9(8Q4 zp>`?f%Dbzhk6d^C)5xo8DEYdo+PzRMDC%W{-m#s<-fD%+`;klMeop^gpgr@0Qhn(y z{AKx3M{4ksn{}nkt0En6J^r0LJm9h(pA`7n7U*Z8#G$pP^v2veQFy6<7G z?JgPpbSAsg`E^xQB~HguVvIVs%oxedx@i4k`pp?S_isMbj&t9tlwJO|jTY&tTb7to zuu`Q!_k?5hjmLQ{<-ZiFo&egi4n?{q2$7N{l$+a+`j#`eZB%t-`3mbpE>$IsY zo@Ki4^O0}C|E+$rH+bO7)tsH@6g1)A+o^BRj-}D=Z(tUDBUB@|M8|J*I^BBWQx(NM z^QU9zvA=l+4K@}^S@#SsCwpjGojB3`ag4v|a?_Sv%x8w(pQBFcuP2r5t$ug>9#ybY zY3{~BD>~iQ3Jf$aF{a$R#=|kp%G*w7{3`#)EXU!+Vfdg6*W=Y!>`lD$h~Zv&bO`@u zr_0%bm>7?|+IjvdxO%Vw7v zk9jrRpEhk%zi{8q*`DwZYHxVkSe5(k=o95S`PRH^Ws{qBJ%0RsO5^2UI_E1!`tFZc z`TpQq_smOk2c4}Or$7DvzOa&?V{|Rq=W}BS{xM~^8P-1CxyZA5!m5A)yQm7szGdoJ z6{3TYR(_LbU2g2d-*r3hVCk&ejrSgHjy+b>9J2aqRNTX0`o=Y>-vYMQ{+jSTV|+qH zNc6FLx|?clMe3b@efPm+jfQu(or4RDPt9BZK0(d?agEvChr3kDlWuk%&8X9_cXj5YDM_=7f2bFT|hyJId@qNUd!~3$weEZgrnt0+c zuOiStgrmwGdb2C_V0wG~n2W`3U4&8k*7Ff#S0`p+H*;pKb<^9}xFzSc^VF~JOYRvS zRmt1B>CNN12R|xTx<>ImU$w69qug)r0F&D*_bzvvZ$jO0XxX8cyfdb0Sp{9mF?OS! z{Rbvmcf9&^?)fd_@S-}Z@60gHt1)3wCMHpW2?ZX@bLZ@2=Rb&y&WxG4WAjZ`N#)7~ z{v-IFYmWK8YsiQ_ahbh$y2BNR&M8rYR|;rHJX9-_cMZ>7`}EBIDzyP?`05?0*SeEm zCuy#9-y4=aFTC(^sMn2lv#pmJ%zu<`k5>=+^J`2&WlY6Gnf#vfemRL9XGX0}#^1jE5zD|31^TrjAc;D00exx`xzq-+$ zxnAd0XWogX!-{S@+NZ_XH11Sd{mLbUXnU-m>>KjvBl^7T;ni^;CuIKW^R2vg^!VaZ z{?;cNYs2mhDlNZK64sKSxcSl#)dN3mXItEvoXye2H+kk?bt+=%;-Tfm+g`luTxoti zb)B`xi>po_>r(PH((M`-yVyr$_-QSBuRhGHIc#F>{@ZV7n6J~;Qch!XtPUR4KTCTy z@zf!wrAr^Z{u$jk=HS+wa=3Ud6^wyvDwLKfZ9X zufvpqt6t@-=O0cu@pj<@51QTt#@pJ3d){U{-nn$6d*B}35A|+$#y4*^cQBaH`7!U- z>5~mirVX=o-`htE3mZADknlfLdtTfz{er~~mMI)-GFl`RD?q9So z5uY$(LBEC9OolGIwu;-X{V7x3@vhz9d~0goNizm_YhO$JnvYiSlDFHj*=<^obB^dagT_OZg~Z@A;!G`HNKzi@MyIfa zy`jmVaK%j~g+^tNg8-RDp@}AfHk(4D^LpqU3XLi5!==#J!pWfz589lbJ~S$YCK;4Q zqtJLgPqf`&Ogfb-?#H0eX{@w4B^+gX&nD=!Q@JG!9IxA-)>vwW^PZ*sDy5jLrd{*6 z{xkUU`*l2vQy=UFiH#4B`$U``5pyIS&(^R)4RZ*Ch6RdgfpdpzmEColHo@SDjh`a^wRvOmqapeJTz1BZ znAt6kS_z#)7$fet`%b-X`w&VHr(9;aMccPjTE zM_YX}ie?5MJQMpq;{cU&?R|xM|99zsRQtV}7_jd^OW&e-Rv9R80j`DeF)R|PnPJYz zAIGSIRc4r5Up}LUgCPvyJ)=hyAq<)$g&`EA^2iVdokn5wFdaU)QW#7T2SXSv7KOo* zeBe?QQdQy4v54`FazD2yI)hA`Z?Jc>wz$d?@9gJcPgZWN|u2~J#! zSneSVCnpM1vIJ*%!a^67Fs|UT73Pi!{8X?c3_aDx)KG>EHXD6a#ViDaZ81$86{%qs zNTU%`7c8J+Dj0gM4v%)zu;&<}420$Z1~vpo+q9thfrSmj(MWA*j^bjP7`ma2siH?p zn2}%!54(;dPaVus5MoC@D%OQwxnnR-3@y=v83u)80}#al3ryF?EKz|2wi%V{V^(O+ zSZp<#ZvdT@9I?R|T0ICldplvvP_q#Xt`a~5que4)9l1MWY@EqK+k-G2oXJIwA-FLr z+DU8?IJ#hUefdl=|L9C73QH_OI@giH60?uab)&Gv?4$E|6qZ<4^lM2Xo6->jESLm3T$f7 zd4h{3T4OUYCMx#C%+W1tEFYP+VdjDZ6R-drDO*D?V=KZ)VBv$Q@N&p z1CP$CVS@$hr(ha5`ep-ecB964MU4*KwU5syN{zp1~9Z>5H?T{AAlKS zsG0?z&&|djpn+WS)j3!IhP-${bP2>VkiDIl>o*02I+_`Tv2ZqB#C0}9!gXuZ5rpX} z(CF~L-;{!}*BHT~xd__lVeUAeEoLN(#-XsqjAYSx6tZ9#(ARN)_jH@cYyDl^umSt~S9s6e_-2dyo>Skow+AUM z?!R+k`U5piVZ-e8mP7I6 za8W<3b=EM7%0;g$Z364*f|RQ#U$aezUf638!=C?cL(2`WV8}DCz#p;N<%g$T{q=W|Yupw;Twqo_H$!7RqxDT=(mk%-yV=z_3(QI^19mVL7!+?`8j>&Y zw)d@S)wop{H_FCuLEg+)xyq&E0#9scovC;|sBhCH3#);%-_6^8t{?-xOuJuX?D_WW zOJ6V8ook|;-|FCbVdVw%%L`;;-3D%~BpS2FF%EJZhnowGVlguUB~AeDFO0_mv2-73 zbtGVZ7|vlL&qQnu3YY@&Ximfu;hnlf7-h+FY#~yd0o2yz*h*wS6IzQ`U@^!(0K^G) zu8xdXV!P3<+3@k{O6&*Dr6DYU2uIBu;n{E;%#ABFH(g9zOtp z#L#F%a{>(3(i#c&Qe)6m5xy7SD>%Kq1=Le4Ix|@a2@t(b#zC z8?A#mfSDygk7iwL5AsL^0a_2lj8LW?HXq$2KiUn2$YZIGZO3>>(*)f2bA3>Shs)uS z&@@vGu&)?A1J`aTC>(^@VCdBfSSRVr^E8Yx9F?rZtOPt0a;hdi#AH={(7jv=_6Nsl zR8+Z?&_SyDqXIOfiltAZlH3-H zfkt>~SWQbcmPe7Gvbg^xL{@T&#;uU}f)9B(QkXlO@FM-rrx^M|n ze50XaHQ+?ecFY9>u?acXUWNr>G-Nsu_(BQ8uM@Bh`E!x-{A3eAgi@cS5|7J=dk^>yV@?6g9901-g~ zEmv?mlguz(g2tdpqWu;@WIy~6hA!fmvS7w3Y^DNV%uEt((IiNigj+NT6ejT&O@f6< zz(tdQVG?oCBxsm~Tr>$BCNURHf`>`aMU&uR5_Qo45ci0W4oHh8g2YmyV2I#ULMebQ zV82)t!N|R$M-a&90`Nbg0xKfW`b$7eTl%NUO#(B?X5w%@?^g2vy04%3I?K?>;2U7(J+gB4=P`W{dLRU2W*xCzuBEZi7P zxrfzCta~PzcBrGwCX6jMalq^(sueu6!I6;Js)B-M>?I-25rz2(jVabbG9jUXPwz1c zPtdM(9EK7h5TVK#L7=3(`uB2-IK7hPA z6QeY!7J!9HV4Bb-MLTT?tKaG6xr$U?@5}3K#?| zcOuXdJ7Am01UESV6-=-lPlxP-w7`i3Ds+J+Sm0U=jU5L~u)zNm&{I!nf(0(c(8ux6 z1Pg4%P{9OT2-^mrVH5CVlsOSMM}^<98WisXQrN2nQUF^l!Ej!WiUT-y#d$&<*9A2J zE!Rf?7Z#8a$x!fk65iZ5k}g4*P{Wb{OaKUU5x^8-0=l@IMgm$oRV+n-0dzo7B3BBr zo=z19eu(vS5w?=zAy1^Zk_^!gFd?`UKy*H-KtP2~q>$i(29Rk28>jM65`5DH z+P;%85zG$3*I;ABaEEk5baAaPgoJ=}$Q4AJBi#_4CfxVG8-le1UbR3^{(|ERT7JH z7g1FNu7`XgaBl-4I|GzQPKlfvu_!ol-h5bu&^aI!iJPW=MdGK?sR&#XH6zf$@+jQI9C`1CiBz2-T4EXa1Pl$|gF9OQ60l`asZ<)2 z;AnJP4wDC|36lXWph78+&awp(DAl$-_z3h-8IbMpOne}kyBnvOFnA1Gco+I{sKRc5 zK$vuP&%0byp-eDP#UA`W@CP(+V($PkhMZ2L(CJJRIEl~->#ea;m^~TPn4>^I_-0M*IntOd2^dmTGF@$(fO%96Lu3v>nAc_rn@$9 zSwq{S^WG)rFw-~;+$XsuuRzz<|4_}Yrbnf*iJ3KBRZq`<`8^UPwGFmAyKv4C`=sh6 z3EK)Ab`^O~tezd5G->z3u-*MB%C{_@Rn)7-d8us~Z96n}U0pIK%EQWDRDQL{1szjO zC5%ySHlBkqcU59z`3=gKsH)5B{T`hvvK{x3)wjbjD{-3g%2jX1W)yXG&zwo$9ma?S;>z4!9kshA z;qp>%;_oZ^O^S6-wk&LG6WkdAICfm=ScAAf9(3>%<-Kq(j_)E?9YSL$(J3>|KO*}It5&^Up zMIw#_Xg<4fC$vYIP&VXIh2})s1eIw^7upjXO@N`G3(7<^I;TpgpwZsM4nr}hC(S|( z>8FDYFu-43A+(2y{gNArS9`n?*hsIYlgA&()Q8Z~7h4d!*KEKfs0=hwmCzj^wg$*& zNpk=@7=f}>fx~1{E_kU*Y{Za>24RUl?ZvYYL!Ee!f-VC3bkiUpv4vSM*@om?)H zE?gr2-z7>6kjy*8%Y~3Hjw5c&UQ1{79^61Y>A~q>#S)_jBbJN-Ml1%}ZsZok#Zfn! zHE>39iUVu;=NN~?8a-a#4V*X};~+aNLcf<6oL*j}Y5^~VK0Dzgk&U9YfdQ&|ASN;y z5hUvn$-?NbgPtuW{Lykf!VMA;GNj<5M8rVwMUQ}g&nJ$A3<#?9-s}TXIeKsQ0c@oA zW*tqELb<}ogm{IaZ$;$7{)MRT^$5ZPv|q>z7F&`AFB3emCjJ9Z-4jabz$G!p6ix4533DqKma4gzg04nJs1k9r7qTl(R+QneOZipTw|)?n*urDtd3Q zRtQz5v&f2qh2Sflu)vT8d7dET$!As%PtaN(FvXBfgb`?s9W;;eh(;VOb%17p9Z`p) zA5OrD90y_sfnr^tY2r*w!;p&yH21m?M+oFI4pb%80YdqI9>hy@gZ$_}j#x|QT~D};`e=}JsZ+Pn0g zc0y@iY)l{#vAwc;4O-}JM3R93EEXUcf5q>SmJ4u0C_FbBO`+HP@_oSkNYxT__z)== zvYkQ*UD4jO;8G`aYZ9S`w)+xU=-E`*8ChZ!Oq~Lb3D^thBFms>HNtK7W3hAO4 zV)X4sqB3zM(hX{R3=s;2(V(^m;0CRL|C}Q6WKhVHc=b9zu~*lb4lF`G{zNsJKa+R| zRjZy{UFhO;W)W>nhVLZ-zQ0MH*7xx-bF*^8=vVvnKxt zSPAJCk$x1oy)_X;I)<(z;eML?M~Ar3wZ&sKTSgs=>&OVD}3I;@aSwt^SY z#7H8fN3n%x-*idIpFI2~J(|d4k)I%g$P9K1TdMXY~lzyk_da5lR$XksB1a6sD?z+@lE7|L`_!^P%mpr0%|Ou zn2RIBHH4L*JBfh9PGR(0UGxzdX(jMWCblc|RxP21 z$&e(2WHp2#skD<5(;Q;4zY|cF5i>jP=dl_ke39JfwhXyIpA`P#Fa}yFG&{me|6s*atRaA z7pVJ1BE5Y=L5LVa?{z0slu%0K3&QpjYPe^wPvJqPDURUeNjK!3J@&wYSk91~DX;+Y zF+_e>N_faL2V#5MK|-s?-8iAKhv6Os$^*GURqPs?a0CqFd>*kK*&PKH+IW$u#?i~8 zK%94!NJ3i+fxSX*s}>S#P)i}`<-tNi8%if-g5GQ?yKXNWJlK#*aG@3}ybVTiA} zOb2aepoUCbo6q?Fkc{NAk+29Tj+_u%#a93zwlc)01`u2&l7x`cBR?Am5@f&^2U-aD zVy3!*`u9d3VOlN3#Gt`Fsj(uuaGWsf-=nYy6+t5ZS{3qRHdUB$l9!Wu(&NV;|F`Q& zwd3c~waT=KDZZ~gr>}_HKVi$G{lzxz%MW#KGGT6?!|gL)e`VpOsAj9a$4}F0ZQACW zwQBKy-R*S9>R*u9-hMEA-_Jg}^{?DMmy{UU^K?|M6yJ<9SQMT;nxc5i3bNx_Pij}H zUhcV^q+Mir8)vzT;}xQgs^a5uh2MZMpR8E=@Q@0_Wi3|{^fu1{h6`;zJV8ei9# zOBy@I)_ClApxC&x+3e)Z^xO#%WsbUGL%sb*jr`#_bho+YD!P%w)HB=Oqal8$cFm6) zNo*YU_iz3SKJV+4rO*D3%XIpR8KqoLy;M=I{NFg{5BfwO?|w5ATH!Qrx)ppgm?q~u z<^8=`A5V{ri8)Cj6tzYOO_6tCpHXzE64> z>Eous%$U(mxTrX-yVu=c zu<6ZOQ?qT`HWrp_cV%9FXr}r6RP4a$&D*NnZonGbb2l#kI+USO`aEy^b#~Jo#i5JF z*?gRD_D<)i!`C0EbL@nA{p`~=ca8Mx4m_$kcQkd4--zS3Yx+&yx&7XmC0C7%mkeC+ zbOrZr--rvUj%B`nZ>D$P-dw98!ZiwJ3y5cG+6>4Mm>ia{Yz5ZFA!}8l@)WWIcM@$t7yd{*??+!{SJ!O!m!IDpJ61`A75PM6dfYbfX})M@ihMQ%y^G zIa#%E5B;o`nomw@rjcn`7Rrc-nb9(CsLg-{(~6*gR|5sSRo8YfOB0qE*L1Co%o+Xr zdVXGc@RyMfbPKksc33RkF|2&xkC#(kcg_gjOuzMjzVwD;hX4F;>owIm-;$spranr0 zl);mN*H$}3OWb{fa8O{dZJFrN4T7h@0!>DGHwiZd8poD{0&fzY3N#+jj@={-QD6fB z2g@uLdUg{;Kz{1S;J~{)G`xX^pNSb z#^sZlr!wAH%@!6fq~GOI$$}32@O$OX<}yhqE7Orh zW|1;94tcLihQ^cA4>pfU9uZ3S<8i?`|3f4B&!Gxonp8&)=#1>k9JmG`yFd<&OTszn zcj+whm{*3zB~K`&X@F+=@GB0z;x|cMKNtN3%hbb%{O7{bi^l*?b zF*^mh>dxKjb*W=92Y6>3$q0mx*3B5ySW#IWNTJ(d67Nmq#bh z8>9!}0gRHP$y))J2kuXz8R#b4={8S-9@2d-UKQFu&wM*_ZuyuKqn9*aCKm8H>u|8hk6K*NKC z_TN>L!|=fQ$?cfMlZyd7HjTV3LjFk1na5`I1e$+bERW5Vb0j`HnPppn_V)b4=BleK5K2}>n!(2zf3E&vT& ztlZ0R>rHMFPz9+~lZ$3>xiZcL%!VozzhN?<(PaY?&=}-hI`T*2VgcCD>NM>{>w}ZcNJwV0^a432b|Iaf|O(wjs{onWX%|Efr6l9-vy5-UJSp-#e!Gh z$O22a{tAFh_GKQ-BSV9mFLJDg%Pi#Kn$)7{q@bi}aKHdJMPye<2Z$z5Bmd4rW_FX0qFr?7`^~w{@2+1w@@LSaFgUh}Qa7b=} zJP?#jKNgiG*AK)_9`%VgCzL-pE~YF-6FM{5Ngza!CqUwt$=`6p7;<}A8Uz^GfW>06 z<$M5($(PH?pl(2wV>OFKlhbq-MBZLS66q0(#g!8Y=&vkzh1<=-I}K7PHV`>vfmwQY zT!{K|G`4Ks2ksJ*0@;2%;q?yb@j%i4?MLz-Zcwx2f)0zvkp-F1PYzze3~+;AW@3mr zaz2GkWyviNoG72X+99<48{6|~Kh0-_-hHPDc22z(zOyROFxILK#lD>QBcf&R(ZHV~PzUJ7Vj@=CY#fWTH6vlGGq8Ak*6VC8@; zT!)3rlA`e>_z#G35esNM*?0gMD9kLo7^o@8=m*@a75)i>^rAUbnMLyfXLDGxejd*I zAoG@;ge#W=LO-T#UI-vZc;i57VgNp}5Fh$+=^PpS2WhVGPm82q1}4hx7?1@xZnB%i zeFKn!%Aza{^i4*A;Z|;Mq!i;nsILPImVFr_uiSuaPzSkwa(O(U1*llb_Tx}x3sHPJ z*po04N$mt8rQ9SSVtE=*7IM+S7Ri%OY0hxLo@8l|9y4U{AKVq@%C3+OYAwe?&@NdQ zOebY0%Pm-qtna6j9VI@X2mb+4c$-CP)o}DEKM7nMk<|q_G$Aj)NWaVE$a+T@jw;73 zN4O&=i^-uQ#4OnX*;Mj+g7hS8Ce+X0zah-{9G+|%N+*G!+_FIPWQ9nFJXwx!JmGyF z>DhP?<78F<&{r5h{)I8X4z6sZhlE1-ClOKuGC%=jw+Vn&7C8XOWy)=x0SYP+B1KsU zW5_@w8wG&Il}%yk3_ufdJ7EitxTF`zX31#}l<(whlELP4WkVA9D$rY5c7a{VO$@b7 zS&d?FxbmFgQso!T?Ww84{>303BDK6v1bi)97lk|xik7kqrbJ=!6z^BkUtVGK)F*+ ztC>(Ml~p()#CzBH7QCmnRbzhyKXJcyEIDGw*K?>C?jK>gPq@cFqUkQd$$IYdBgw;(Qc zHupTb|Mqq9_UrraPGIMUU=zS7vj2O-vUT@Z_tI;C>C^=Hef(wzynnZqG3-6uv;p`w zULy~)4um&>{e1tie0`sxlz>^1F?NaOL8d~A6>>zLF>OG~>i42Z4?M+{G1$M0z2A)0 z37!p8<|94fu(S~X0RK)tt^m7!Uj*GEU(%S0u~bL6yWbB2ZTgrQyMTE{@_|sxHx1*p z_|8yVONeWgpO88uFCn6t6x75IH--&=Y)3oF1ut*jHxDcZz`;$H_kg@Q5j~{ha61O$ zl$e=>CAjW;hM(_5s6x0zEE(EGaw`7gL_`Kez&$I4K%);0kD3%6uipu{C9JjLG=Cn; z%Z-KsG`SXJE$;}qF#!4x7ScS+T*e@^3=u;W2|#tpI5Zdun&l7*+Qn%m1Nmc1$D+t{ z_3bK7Q|==(TbMvi?m?TXtiF{SmdJ&O0QMD+#LsrRiG&V~N!w^78x{hC%Ake(<;#pAr=xT0dNSFNKwMfJZ+~>k<#|| za#M_b%(}Lhzh1LA%DlLzh4OYy`V9vGQ)+tsi?$WS$bW+%wi7shZEAQ^sE}D|Esr@k z>5u-%eK|4}EX4z)N*2zz_%vuqY{5_-pZ@7L(&h>x*qp9dirC|$#Kk8x*!W|>A8A$4 z9qPT1i*FpB4A43Otr!*DtmBek7>}LiP-O4*jgzSUR)eb4K5@}vi?%GNd;$k4vG5;A zE<@2eQs5ryFEBbz{%dEB2y$b{5&GJKMQy@466ys3?wjPCNGw~iB2xir;Q<@Gh)z*s zLTMFjAyKLvv@2x+Bw>&?l>>BXy9tF)nSL&6`+`v(0uT;4JSU4r?G}cefr|4=RGWiB zNw?m1^KUhnhkCN@1FshLTM9J0Ec8AlMRB?1KSBz^z+e>x2E1L5!MtY|P6R>i*Io3h?%OS0Kr4?# zJ(&>K)d)XW-zEcx!LGmh15vl{ZkRhTu*M=fE`>NhR?^ODv>&Es4k>}^#p{6tl3t_0xWVplfrsVgsZkV~&l_fdLd%NT%5)fV7N81EWFx50Wxnep)dAQ!pp6d1a4il|C~ZC|S`VcoWU?4NH*!fvvr0|3gwk?n z9t!5lBrYCzY#|y%?Rh~hXx^n*mIk}k5B-hzp;pd%8M;y;K(kS?l5{H0Ic+JD^yhNa zPMYTwMWfX}?HAObm7yI--a3@%kOrMG0R30y+76EaK@k~Ak?M)$gAIaJcI(EemgwmD zNV)7*-EhL!to^CdzW7q)k_4e4#xXGLMy_P7g!fWO3+nDA8DYEt)(Fy$okq^&gdaq| zbh6s%#*P@hN1|#Jn~3MAZRXD~j{6AUBNbYjPL}=ZoTC?MCFc~4ny_uEaK@(1&DkZV{O(@JyN6rLA?k5$|5bGbofGo%$QF)tEHM+8zqeV zCh}a$1sSuHb@r*e zV8@naDFLW8+lwOaw!L~hH&2KaK;$12BI;Bi=7PoO5o7y^!j*;Men=SkVs%bV@V*tD{>ATD!CUX+jtc=o%s9E*?>*NBpWU5|jf zR(enVgwJ8UzQa}jkqBDk9xG$Je)&6Lf(NWO?)E7UFIN`>%9iAI@{)~C7A8E7VfXVw zx^3f;?sY&4FZ1C6TgoFDAQd)H-3KDSWAv!N@4JhIBu0zJYreg)yLO~eD=RW?qsT}& zXbT*fGvd;-nVH|HObyxWnq4eluaI=}2SNo+PZfl1N2{BN0NM~AV?So9*ID$@t;Jwp zn#h=ED4yc|h|cvSJ*T0axyU+(;P%EP^f9G( z^$DNjz8K8++2_6xuwrj@Yu-5?k0eUeA$8cqpGkYR_$enVWrM1SM~b%VgidLbr{y7# zwYQzGhf=+Q?(2X7j7j3KV{KId4109p=r9f((i1XkKPgR4{}RYMWzG;gA@3nAL@hn~ zB0j4^&y8K(>8VX3Ci%`^0jnThxUI*|g+X&yrQNA2IWL!SA?e{&^1pikKj!_pZHO#? zuAI(pm?`XyQvh;pu6{2^5P)>MR9dkOuW~2WO%#{4oys5rgqbX{6)2Rbl|Qx%6!!Io z#ibe7jX8~`%<@%Vn}b}&xgy?ElzYFe+kZn#a^>lJv}wH=d$f{-@n-H-otH(b^)fR1 zvY%I>GT_!I+|YSpx4#s)qqq^q=6EbIk(rCeG@V_2ZiC_p{NeSHzbG%i zzx^9P0K5zUoF3tKn%p4CY4&xQe|T~qNqCry@fq#TrGzUZp`o@rj7^%h1xo@sgP>sV zxY59hxz$qaxLSYK>bb!Lm!@rU(%k4@ITx-f>6!@%;C3^4F=IRay{EY=<~Ql^99;T zKi~?ufOp#F#oj(f-=6F;EQZ(5|bE25!N{`crnH<91@fXmEix-@p9>O_0g0RJ)paNpCwUQ#ED%x4t(_?2KX+X;k%mdy_(Kr zcr%cA^y%Z9Cw5jqFS74&UyS?ZDXMdqWn1=sl76TatBTTtPYn>U4~FJG}gIomPqZcDI?+Z*|N zc_$YULhj76Ta*1^fp*_H*3hFJXS|=+ll|~`Jx8+Z^Q5Ty`Ijbslz-26{>*Fb(&M`Q z>9l6j7frOI|8>Rfs;y=P$>|3FU3OmwfEz2(_r~jy1SOa3oLl2LG1}`dH2xKQte5(6 zF>MQU1Bv7N7uPI=^`x!Or_WU0@Au|n${iP4aLUpt(B={m;;rdB)jCZ_U`53h+^_no zu=R(nw(HvJi{mXF5lN_fey3670)}nDhFAZbMM4VeQ?wG3sifIy%o6Yzt(t5Kz~tKO zjTU3)6!}U+lj&)me1%)9qn}M#e$Zm?;Pq2!Cg@lWJd`D-3EXV7opGfi&qL_`76p99 zVX@ci-;95EF-e@Dq*z35!-Wlh@}-kGr&kuCEhc70pv>*xa6z{_cDFah7i)n#@f8}S z;AdtwVJX(x6yH2O8_++Py!J~0W(mlT+OuORt!%+o_IK7rA6gxc-a|9utm4m~LsbUx zhY4MdzaW?sw^Q|3hs)&AQS7H>E#85_1S6V*e*S?S1LH&X**rZ@EN9QoB-G=r?}-Uv zEOjLPQkP1VB)l5?D1=mA0Oq*M6Vu+t%4J}9YeqiC_+Iht?u{~)txqxl*7N05dR4jV z6Bm;d`|%<|{!4hTT&Kwqh}MN@v;DJ#E*ljLXrmgYqz0BSb@J$ce2Ys1%`7Vq=fkUp zD8N|3{fXqBLN0jkn}h`?7xHb@V;c;N1%uGDQB)*wDJvbh->taBpD?Wd*tAj}xz#i^ zuT3V9&?LWVm$D?axxG^Zm@8s3Y9H{;NE@iT=OONJ#KY@ReY;f-VC5MdX+Ec_!R)mM zncUM_Zpph^8KJS{0(7sk{7nncFmnv@qZWG~G!vL8<2*K!t8%B%urjriX7@;Jom32>;b%p zZ?sa|{!|u+sT?7nkMcoe3_0fQUe^DO)Cx9(_wljDr=7lkTsm^Q``!Zfxvlm0{CYLh zSdXsxO|K?4XVVq%NQq`QI<=ivF)ITmkT=+fS!1@BveD4{g|^rGJ@hg|77b-v5(8I% zVHJk6@buR!>7X_2lQF@-#y-iP04Sai4h81{F2b@|(qY@&g5?g&n?e*AucQhF907x| z90&wFgaFr#Jb+EhPcw0mqQG0vWe%uu?x5c^*>rAA%kdOo9g(WGe_?QMV7}Qq+x*D! zesKR})#GeHM74)v<2N=N)yFPYnn}d6Gu_9S`VKYnA<8Zj=H|{enc8@7 z9dZnKeYzyTw~n3(vo3(|DNdWeQiGn(Ub|6p87uo}zerwkC7cx>L7<=V9CAj`<4+yu zngudzRA}>n(;gVwIJI1dk-C0&J6b{SNF0q%m#U?wkecA*Y{hH1sdWd2$syFW=%+t~}j7Ef%W zq~>pxoo&q7+3VADtXoxeHaYSwmaA{2sJ+up(E;-y{8KJe?CG6SjM_f6E&s;*L`sE% zpWvQB(6##nsV;w>5KY3a*)1YA&BS2v+EJd`l7!S>1ed>V8me~iOo>WC=hf6{P}v3T zRLbK32*RY;F3gH!N|L__S<$Rb}>7uZ?L7WfQJn1En_$I51peYHlXJm)Xcw2KiuIHbo=A)#sj=QsI z)O#CKST+sXON1SUQfu_lP{sxu+S1?4G&M~N9U$albUA6~$D96F3!&;eq zBDddmmNi+9(SHO`LK#%e?ZzGLYT;#3Bi!gwy&X9pE3#v^ zYGe01enqIes{8>5OQrG0_*ul!jn>M+YG=c&+dSSqY@>{m=UniyXXk&1U#1RmwK&BA zu792~3?*d7tt^L^g-yeUDkLQ9KXO9KYJ5#fgHkCRoRJM&;{+d=C+UIJx|mByx9I;JdaJ<;<4ji-tG;bL__yaXs@3&D9!@=apHTp!F<(K3&w zCUp8{Xlb&33zYh$A0nIIiKYP>4B-ie4KuvS92w|7AiL7$ic;JEfW!U8 z8RzK!orU^K6BuZ97MGP`M+e>f0=!n0tPTxTA?fmC4hzHfAV0OakyFo06Ijv>w0E~QRb3jiYQC=Z>spX2uYE9q0{^|~>XeeE z;`+M3{snSrA%2B@kbOvmf6CCK_IAnleLtPp13JHOatmNW`#1n=usiSC!uvdhYS4_Z zz3Bo1KAr6decxY?2PmCiUwar&-K6J*SQuvhd3rw}06$+MFa{|n5tdR$WEface?IUa z<{Gi|fA>&0=yrMf{M|(`W@&Q*z(s0G{iH@1bR#EE3=Dh9k7rY7fC1h0y%LP!1eNr` zp1fbjkDtkJxM9E^A;w7YzEKY_lsBkuxmchBAjQbLrTe259wS{iYCgR{A{HHXL6kZo z!R!a3M+rUBNp=M@@5jod{OeQBZPC$r^nEN4M7#Nb2KIo9;lq@GBe=r^tSY5m7-ujt zP-@C9A^gqg(P=qLs()vKx)Q%q! z{K*tssTY7+AWuG>av*XD0`{ok^FNJ8HG0L7XSg}fr}7IDIj65!kD%>dS-2cSrPzC_?L{y>rBh)N+U)}W`VpV;+R6k z#OQ{G>$byNz5qgV*7-xee+P1DG^hvJ?xifO(%?pPCfMwtUSE&^o(b40aqmg(iQ%lJ z4_}rOKp1T0nX&GKRfsn}MLC9mZr^_Q*K+rFh^Oje%(kisgaY{0H!RW0jQ*&}H2$Z# zhzsy|lx@G{kkw>yV*h>qB=~33dc%E4C>pZyAwMpYT$HvfDdjVyCnkP$5is=6fC{%qNmsKj6>nGMOLC+?o3I(=dRq&p{g4&w(xgf`j#^v*r+nd|EtRGn>9x z&5XUt>YY%s)xg$nix>=4XiMwoa#qB#4a~Ky-i{1%{EJ@$P+3CX46+0+3720a?M)L# z6h$mD<_Pk06HO9vit|Zht{Hnh1Fj?qm6%w-Is7`t1Pe6-HkxD(W)M{`SQ&2MPY(e7 zTs1kO>H^!z{q3r~hiV_B8j?BlNez5mhjZaN0AYG0uF4}KqFFzH!H9Hd{6`(v z>40^Qm;je`!Wb)yXT^%I^W7*J;e1hJ=jr|n>nu~VpsOAqogz89lE5s>h@u6BQd^#{ z5L)q_;8*mn`LE0@93_%x+zTRy1q@&#C_(JWwP42lkIlX{2cCHJ4R?_GG~{Z4E3L|< z6_Xu5X-~DvNyrN(^K65+rlclPnJoKyrfM?95>zJqh<3lhw;%M*s3@BG27-Z#N!e!V ztQ-swcdTX$a@#Gp+m!>&A0w39FY4zBRFq|~E%5muJGJ!pn$nSf*!D`{p#jj{b>M5Y z;nC+~J+WT3DG^59tSD&sRTA12ac*Oo=93vqv`Vae&HeO0{X~$_!g;Gw_ znk}bfi@(dsJO00m{sz;LW@5mrmv4?iq;Ho8E!St}=hNgBIU@6ktHG&>Cm!YdBX_Aq zU$-2Ptc@q%j?Df!Rf;QB0(fJaYD8;@`atoraPsQ5rsNKT;;59Bg!t5AYlKL~p$TYi z4f1P{RZmvH)ZyL1jDM;H@4HdAg)Ek8Rsi(uRH3hdkFx13)*Jnv1TO&i11gI!6RB7d zss47O)5Tk|0vOS@EgRoU1dQgGhW`${SI8t1QjYO?yY!yiqQgD0c=uEh>gDrqi5fuo9@@H4M@#{1CaE)){^So)9!S+r z?yW^C7fynvxM?{%OrsqcN{vb|YBxEHu%HH#v}WyxIPLyLAOi9B9t}8JLzV1#sLw2J z$oLz0PiU%_=7Kq{4o)6kM9C#F3&~lor*%qn^L4z1hOqg9W7ec3 zk?P;}iP{3CBsFk>0lqpTL%MqQ;Y$mqX8-tw7JJDy?Rk!*Ck@YF%cE1$L}X#B)8T2S z4Y@wNPoI}pyE33nslf79`o|6=@k9jN;!poW>!`BecsC}QS1xX>n}#t}dmdc|KfgwW zD^e)d!8aBkKWYIQwCwTq4jnyIAB1{Rfi7(=@zs65KqtZ~-LB+Xy4e0;%y^*l*SD9N zJR5bK%V1mK!O}^a+TM!CU*(d?-lDD5ru`UDneDWI40C{5%$WI}_BY+Ww}X1Q7G?Q` zJY=v@^8lp0I>}vLg^F^!HRkmUd0*%e3VHDws?pp1Olou!4O8(L2$s(xhSAA^)aYA+ zoUI5I;k45vB57oNL>FXG9&YcnJb2#_MhYYoNFJL{&)`~DmtEa}xs@OETlL3|4np(3 zrc+m6y$Zlzsw~?_s7#7kWM=+^36lS}d}6|Df8(^_!1u99wt44|g1o&-tb5CgHC~Oy z&PLIz{tZrKX2CNanjNm`kHBp^rhR@dAY1NZh&c#PT8d_?svl6#zflE*>Qe{eJaDyH<#5Ut1fR^f z9U^-ax)onyR*wJ>PmB9~D9#{^N(l02iZC72tv{o}TQJWl8-~OJW5Xjj6h?beBE`LB zg|X_x0{K8AG+4k)^|xEVbXW+xha7YOOEe-k3A@kw%!~fq3$gTYcyqX}D>&Xn^| zDVHCwG4ga5R0vVoDx^6@k&nY!Te6H|w*j2~YPub|XVuE!m`3>k!f@9=)M*71bT%{ypN zH7CgER0x$X6#8SsP!R{fp&l4u%He~9CP$2@2_$~F1OSZ|Xb~3)2SZ25RN~Vr5&|+P zlRr8~Lf%}du1hI`rr=J|rx% z?w?XZTHl^6pa~%ZWyt%wjDigI_3^U8+6U6>m{4`UA1|F}?%bASxQS*TS^&$qfgG~Y zROY&7vWx=z)@uq88UhPSN=2Ke{es++!(XhLUb7C*ER#nk)ioRU7Ru$BqMcD>(LFOTY#q(% zX-NVa^4L;vN1Xtz$=UVF+=GJig|7la-cV+>5`@&+xDxUZ2HSvRW)Q~J z9v~mlgB0P{JA3@(6_!d_UR+FRH8W^Vn9f#P(aA9{WikKNCM5Yx107RZ2zX`OPvcbzif z`ZE7aA2pJGMgRPL--ge#FSL&HCqFn1$0vUXFek^YgaNj~g1_ffh+)tB8w7b=tXnct zNACa&5qDWhbSM8EcAxttDf z8t*^3PEyI)k9<`CkpPihY5r1HCX&eKv-}tn>&k2ee6P5EPcmvUPWg!Ls2hP{+hhxp z2RSk+`1xGbLXTEYKEeDR=8vSO?K_ZW1a?@eTwl5nY4PE0mc!Q_5`F`v9y|!oL3gPI zN^@&he$8^MNHHS4H#$9|GuRj16vi=YDZ^lx?&HWe>0dy+a;9>^j@(mNF`z|(!{prlY~|1ILKl|Ni9ENZm7{d=l|93D`CXL1mXyvvRa;1R|1Gv+Cv7EN~shol{$9{SBz_ z`%nbJuG`b^?QY6aKzVc6xcQV9f<5AfBpz46q#mDp)7SH3xFN9H`^Bm~-C1!nQi|*c z(71j6ylvgRX!puZ*EJ<2?f!GxP{(69axz+Vw)Xwe>#>C;A*TqoKjLiVi$&v~w`QMr zO8p*}sRdnD&JS2x%^guFcl9?kIq7&6K7skZf{3&-($m@@87Cm&91rNC+4G)@aRk5S zXkqJKon%(r&wO(cKzNfTyZSD}T3u=bh&RtRkz{2B@`fV3lJ3AiT_J@|LKeyN-mBU$ zuF4WKre<-4LJ_HsDcU*XjtyC{`^9>SEo=4ub>eUwjfwvUX&qCA?18KY_=GnHJvk+Q zzVTj^&wo%yO-gVcDSkBNKNrJCpA{y#@CnH5X2XU(f~_MAhwe@L7H04ds%;$rJhz8H zTlwd}=K8yS2b+d45)?+71rSo;BuRS(%NcZ1oz=blu?(q=5hSYZhR z_O9ASl9NxjA49NrG84PpN&Z|kY;x02gpO0wQiA+(RuVt^df5erC*k<+^7d9G?3}fk zP3gKaTQz?FrA4qZmg-X3(hchd=PTNO^)8VuZQ34HmRJ zJ%T4PIP8Pa3?6~da+o#59c!zV;Cj>3C|R3hm{2t)DL@cq-}I-sE8oGq?n$Yp1>GRR zz;ss;zqml3jfyuG54G?g zl_T^6e5n}-E^lId@gtr{Y2gqeenw4$oQR1)#4G(NG%z48 z^?f)wG#yD=h9i z3NZk+IPA!zugvUD$LLL$339Rqw2Y<@G#rw^o2T;(3BeOLUI_r76 zDz!l3X1!6JG+j3!>`THxtQ&)0+>*Y2u;`)$^6l$rM^3NTY0BTMX8C?^Zij>+Dt(&=>RL)PBs0;p(eSm+L>n$JfEvX6 zPv1UJKW`e}gDPHe)s~UU_s(ZNMg%sUmr&#&!(NTsiqZ>hXKbpUd2!7SR{|$NOwxyR z$kuMvOJE0B{7_e2>3Po2s&i?t35(L4X^yIduDR0TqG#@p$=e8#Do=lnixeTv`e_cG z$r=tq{;))-AfF$nf{G%h-XXjOV%9N2|g=F^zg6Qc|itob_jmW*J^!6Xk& zNqVj}-uE4Mbnroznbj00T3e8N8!T;-5$|v-ss#d**LgYiC~;hjPUK?dH^wRA>EEfC zDxqKILaXAfKO;}*0l|NwD;j9CbXAW>)x-qc2Hi{@b_ZUGU4~vvm<7~ueBX&jEZC3- zZI?nzI0_o+*2wrMJf$RGxs7gr-lOKIt95%$$g1`jYZHgh&tSrx*I z76^1OaRr0|&H0`;i0-W7Q|)^AvTsgRe}?&(68XS8Az4IpTun@1If3Gh6_W zdIPCL1cG<&Aw0|@48=buVBi+z9Qkj!+tZ!8=sd~g*8OjnU6cT*u<|~2 zp0!dDo@S%6Xxhv!;2-_+oHpbMs7>tB$gYOgVulZK9hr?vl6chG1cM}%oL#(%Iv9E_ z5{Zf=h1InMps}l~^U`(3$6P+vgvRNYuB48#a(a%0&SJEZoP>Kh<>yF0k=3iP2@IZP zmqv5Aq6}ki%g9xkk!AII(ntUSt(0Choc{1GW_;7Ik*%0Ep_gSb<-{m_xH2(*T6Qyn zZ_l5qA`fS@!GeC#v_%qpPtFR~AaUfz^lg>)$g@#CHQ7fbNqUOprX=-&#P^x~DKqhU zkkt}=IFCO17F<1u%H;;#x(^2xXUhnGO7o})$wsBl#*#A z$dc7~wT=`tM2ik_C_7PLFeWQTTDdU6%^zS}J>t+I4l=v-7xF}4JC$%IQ=2S}G-L@` zia9CAG@RM9pu}}X#$~*1(QBnut5|)b9GwnBQhvY9Gm$f4ApKNiNsXDbipSvmamY#DV=^h8E_j99YUHdG<@zBWtXKBDkjOc%TuECg_21e_I=Eq zD7CvV7mFW4$zj_zBAGL3(UYr99d4l15YZ|V0g5lCuSiZ z>_+=d6~I|<0It)Ye2LsANjOqTf&3acl<)}712s%DF>m`r_@(S73zLZ&KlL|d(WLb! z9~%L*X8kQaVpN>|g-J=*D-%S~^)Y6Bcd$}sJ0m9N_OawT0o6v>UOKDC%34kWwEA*q z;BR&yQH2HST)11m6xJ11ByygMdbK){W1{)R7C>o4!Y?mgj80hSd~U3m9WXJa3a_4a zHb|Z;7isbI>9@ac@J!KMe)n@@a}8mC+4dc<_WJWbNA59W8Kt{-6mBOL3a3ZO!B zD#>V-AO_wrko5C%Ml`?uros11O+%?^IVfX?&CH+nqK$)MMx=0Dd1x4Vk`YNm;Yv+f zMZmDynUaYlKJEo6R3=r%2!u0Z`1IB%4sRx8BQ9`#=vh#?Gk*(3NSuP#0-hV|0ZG+n z)l2{@0_sZV9hrZjG?-lJrRFPF2VwI92+I!;@97>Y zFQ<0cMdcq2nTJ|82{SO^?e&|h#k=u7Y+wNZqL1+cUgmcI{I--MscD6RcrMq>j8_V#%KfhG4kcQ?;;6Exu z>4%Zq^K9v?p|t}=Y%bOwx0szY$DOGRuF92_El85rc+ZUyBO;d_nWR+l>CDmrumEDF zkddTnlMeZN`pAxLzFEuiz4C!H)CF3pDx>K*8I>9iH9jKa=uPB>a_%dR99MF7i?>xx zq!*ew2&-<^i&Nz?3pBd1qD!0nN4?_C7u}ZE0?kIhXwCdB zl{gUaDZ!2sN?_$@NYLzeScqoDD8N+eXOvYOQgwXYhcCaKMT-}+&idQ}ns9_a5tHx) z^kUAXpW-e6;&1mICh^WAQ1~T$>W;@-@h(IL&|ubP4v-@I0hge7j9KKY7_w6TlcQ1)mAF}K;?#c3f>ebYck35uc6wCZG>3(91ts_FBMCHh#? z{IEqChiGTD2wRInAFJ)^#e}PPgUpwhOVC@b5Q(!|tVO)daosBaJit*c)}-$0x-Ll} zliQkvW%N!9R*d9M3)^FCmo?mFXcxOy9G2?_x`)!c^}dd4Ame#$fI6EwN<c>XHBKFwFJTogKRgXx(l)E1~;c$rNE}8$!e~WqpxUd6WL4$$~W9^v@jL6 zYTMcY8DuO9X_w~c?bWO?-GX=M%-z;MZ~rhe-eYAGGBY*&YyhL+4sL+>Jn2y_yy4P~?zF$dYtuKJ42#_FCX8Q#Fn4h1zso zVo-HpL-Q+hU);@gAc4QEI2XxDA98{l?iS`X_0j)m0w7I5w|(Bb<7Dnh20+U}+!Z%C zz8rSAU}9co<9b`^&gCQAom7Y;5bZKz2U!$Ti5KEwq!gcS;Fr%9?RK{^{)1 zt{qXtqgZRCjeX_pP?Oa7580&r>pcS->u{ct>`*6nS8e}EfSl(pY_b#8Fkf{xhJoUF zQF$!u0=m)0;i$h}bwGn5LMGD+ZD4rlqZ@jP%U+#V{FcjMJ_wdR9U60P&4dcS3j&6! z$EU~Kki_tIuCSOv^Q{G@9Xli;4Ezk1<5l)iPPUiilC9&fioz!c_DXV<1|0^_DTq)g zPg0}Zi_JX>On5VoF5a!{tp^kEx6oL=s1WvN0eD*{&7QFaiMgz7C^9`BVyk6PA{JVxjhdaF+Z|7>+h4;J+!LgHI~L>_a7S-)#2lmz_~-&^ZPgLnYdK!5 zsL;*MnU<2FvrcyYP|k((w+sYIN8Fc`J&jKONXK-)Fv_19rvINwCXtU0!2UnXF?buX zq~Z_0y@h!RI1zq(T0_Y2o8MP1HPBCi49FMi7S{cFnr@ejNFTDHGsa*@EaWmTe^qIh(pGWE~|0G5s-O6U@Rozdg(6m21 z3VmM$Q0j`o3317M@W^NSIqe*VVMTMbCO}p-3h`hI@nn#dMd=FLu}1;wijq5D*=LhH zyb&lK`i=0N|j=mSn=m-bIZIh&Y&zZ2CleyPmom*?SZ}3f;xJiW@EzP&$yZe8JfLm z=^&iXf|zn5`yPz-8qkzs1)&vL32Yz=i7h~wm=T3uJ5a35uwgn*b?@oF?>;QjS*|aI zunenk*Fv8_aV1mBRebl64wrIFG!KOLX&y zBo@`11i) zwVWFoKl#Ka(8W6x{-QBfmNu5yLUI<6^OWl*`*!IqvaKw%5YDMZ=Ik^a!tjRuN1?rO zLcpTYW*oPRNRPOZ{SK3O!`>2b%ep2Fp+LQD+Q3y12xCT4ZrsrZO#0nn_@6`Wcf#R8 z8N7|&QrF)KdaT0TVn#|3_9^b88}9--wYw0Z>`GFkyLPRmmAamIW*r1dlu00kT2#$) zo30_fhHAMeaymXbUx6+95RwQ$*_R+}pc}F7+LYEy_dsN_46IBeBvv_*iqhGrq5&amL_2>Xtk{)D}T~PxoKm@K%V0`W6BezG@31-8%hDO7Lqm9 z4T3?ST)CzXPR*ssCM)JT21=V2LT3K)5Kd3co;FR*!6J!{k@z%y9F#5Tm zkEuvHouw6my<`kT_B`Y6?>U57@CFxcX<#B56AuXmGhWH`jm-0WT+!&TypdX@+H^+9 zjZ~35q!){0o>%Di-lYGrnBssVFTW}S0~#nNGP}356y3E|>_lIstQME+e`D!GbsUyg zqcU4aM?V^0ppO;6>gtc1S6W-w`@S&=@yS-5)a9_2mm@d8VXGDHjJJ^HnB^O3E2pS~ z_=dKV441UrXq?RZ*hfVBirAi92sgzZts~uZaz5Z2Bl-1g$>4Pw~z%(6<{<9L_h(GN+}ZTIw%c{Ds%(*ItDr8qovb_4xCPwAvAD=5tj^- zBV7(htmB97GV+!n=k-{x&giYtgHbKjj<}G5A90OZOC{wChltJUQ>NPzdyzof@@bxHZSi?KWNYyEWT9}i6OVBN(;pFE+z(#xB9q=*|x<5-}s)mNIyMwRcEpry`% zel8_jNi%MVj(mVh&eYQ46O)le^~mqugkM7ViA90mIa*O;74oHCJ4I=MXv z4weM_U7n&P`8Up~`PW$!S8h?;hzs8uG1roN{w(ujE`2~-CxR3%AuBa)cvyqzu3v!u zbYNW}=0zRWad_7_T6G-82S{sdXH=+0NlPCk;;THoKQrtg7(kPzbf+f~sHGed_Bkv_ zYF`v0j^>hK&>RJ*|4rw;>K=19vPpo}fXo5-gnqDak{W;%{{XoU0}2*|>W+md4vm^f zY$*st72cMM%a+I-fWam!Rqni2mJ`g*H+rf2I?) z9ccW&A{Mn{Kb5kaXdHY~L2jSr8V7;UIbz@W5}@{!)DJz$-c7My(-iQgDF?=6-lkPZ z8kGg+XdXh?re%#BtTYt4B4sO+j%Jl5LCMxl*tQK9!FJLsDMS0FF>voBKz@);$Fxa% z-pBX7ogrEnxo_)tds{NI&KN4DTzw*nMqgE?%$>Ef;p&Q|8V8gaOSUMxj6tcmycC#79b&<0PV@p3;?i(#>BcBXHpg;E zQx=-2`Zg19Bw}-<2Rg$@%3JbnH7k;Bwb(y}nrjdFF|8@a0To5ICz|u~HvoaQg;r#o z{~hc)+mWp)>*1{_?Cep5XIRG zJ++OTzqn|e>Qa+&0`-s(5l;0fSLDo+G{^V zsNOLg9IuvW1xuEe5R(HDBP_GZo3OfySj=Wb_Rhiud4fX9!s}`-;?= zI|D@6nLzH~gy12xgciPZ9dZHz=%jE|;p3hlqht4~zUT08mH>;={{?A4mcL{e(g3C% zv!zPKQMLqpaWVn1NTU}<-J7VDQG?qJL<+J!GKKGmjIbMGUm;FNv`(p|T&*V; z0p=#1sdJgCwr^&QCpG9bW8*s3*77 z2TVUs>mU6Oj_P(_6~B5m(ifN-{FT&sufF-Ro#1v%!r2pPw(>83h`<+GC1CK|)pIS6 zo!*IuaJbVu0he>fcOryve5U}kHqY;5kj*W+A#_%jDSBC^4q0YCxRX^mP@U_<<1E{d zEo=gay+P_E9bP#xYd9Kw_j9g4T$oj!m;}Nx4;e@B$meSc zfSpbkzZUTG^b2_NmRWkQ{Iyu3W@X|5WoZ#>_=#eFhJRnPO2~MP`TOE^BCn|h7|s@( z_X$eD3Fvvlz@0k5#bupHARGrHM|t3!3_21wT;QgE)R%OIF}q+|%**w&&VV?4aN$Mc zKi`4IbvQz=fW~#e-jo`CPVUMqVb5lM&k#J*YH8u_le?ss<(@#G%w;g7;yLUr+&S-l zC%IiF!ac8HchPI|huFi{GzvVX*bFAwqBJJ`1!Q3djA4o_zGPjp_8A@I=*c9)%+I$O zs$uwl5{znQ5STq4HYfvOFcZYi{5AXz)gH1+!k+#2?p}NjKC^fK4DK6n4i1jY-s zMp%&n_k^ZP{Rnx!v{i+M*jCl@n6g!c(Mv538u=UD8 zZG`qZgWIkei#DzR%+GF9hagwcR_c(f*cpgou_-8bT4CRw2=t|=&Xy-cNVeSfF@7<@ z7hN+zuEgMmZ%gWCRJ1q+4!Ex8A-*|(NlJd%4kZxs<(lpb9^0@5&@jAqQL`jjaPp!C zJtk9{Mf?AuL-82DR(Ns6I??iOPPJC2V68y66LRusZ>8TFbpmrbOpl6~*PoJ$L!2Ic zQG%SP2}Y<67%vG49QyJQ5pn-A&QT)HMYqH*eo&!YZ(4NA#By{$Ig_KwWs{SCKt!9K zxED*)7~;NPGeY^>ePHMDodTQgyh1;^>-Upf@&iK*_ zZ;B-X8kRsc1MaPVlOz0bd)#Yk z!=OFQ#h`^pDmxU?#G+NhD?NG=J4@bO9TAOFNPfQ$dPN}^LX^Wai` z>PTma(mnSLu)s2ce;;9g6ZeA*ceMjw1S$CaC;7Q_%2AwC=0${<)a%?Fh_Cm zN-vLfW~9E{naLnp!d)p^g)ZD{+l9NA_o&vryuU_uN@J%8AvRFG*Ffv@tgkHR z%kT~D^Q!O_*asG;xYlucE3cQ+Av7lL)&5{u+~+&cjEp_942@oYs|~p1z}FiJj*v-J z0$1AXZHc?v+t`Qszy#WD?I|5G>|5{}_`a@1^~fAER0L%k$ebTS453-Yz3HLZ2F6Fj z$RcD*&^V1v>gxjv_)ds!crhiDMw91{r8%TVxLp)co7-j9_JP<=h4d;f<;q5a&Ye9fiQ40Td;JJceu^g>9w$@$pr^C%G4#DU=N_w|1+p_{MAHC$hC3Y1%`US* z24BUAQ^_>^FmGy5B{qk(_{b;j2{6qXnJMGG!g~K*Fy?XoTgv3W3%xq)zokn4J43tZ zzokz8yWq%-`EY5d^O+Uo;hZv9uGq{hweT8^R>fSpfH@<7K2b!Rur}W0#*?wmCm&O5WX0t%-n%6IkYJxc_|DbUIVhh{zgM(pnWtdamMB^&9PxEWdb+-> zy{VcPhO@PQj)kiN@-tuz6j7jp;%Dv~sqSAkKQv>nqv!H3 zI2LqH+3o1Rd?s9A+>5-RzlYyg)>|5Sbam^@U9n2R23k;`> zu;kA9mU>+=ce!(3Kz{bI(nT}n#(G+$XV@tB0!_<|9Tuvz?FV+LvruyT1^CBVsI=gG z3nljt6=VyQ9-32A!}r>J{c8)Ib9N73Y4aWO0_K#tMQlBWaAQC7FgA+nHkW%4Vi7M;){F9<#A(SWr&hlJf+1}gew(Q`$`<=J8hz+W=c3y z7=-<*SQ>=PgZK*5Yt{Teqmyo|Up1ODF%2_>ZrdrGHK@l>hZ zkdvQL6@LPY>1r7tcM6sk(w%|@3^V>1tTQhUYLzM^(?^90N0OJ>HH;l!Jj;){ss7m2 zc=p()x2NBVA))5JvhyL{M!^w=RK5hbh49f9C5BqQG+1!?VFNu!6zAP2ii?OG7?HFU|gxtJF(Frs}1?$A3{8eMdZlD7|-0#*>TS{MgHFBQJNY^P;uu{i@vObzC!9bJv;| zo-&CVYl`1Pe>v^ZmjaJ6rY&K_^XE2Tw!LddYdN$0UeNSB!yY*P^Y!#Dy8ZtGwKcWC zmoZ8K6O;H-&VRI-!T{edzxa>eFH(xZ&Sq2_e#AHpeyDza1~-W-ee#SI?+}MxP-@ot zYMi|am4^L(@yyvPeF_A&??eJ35g1G);lCI+UxoA}J&}JbQ`M6k0=pAuSJ?FKOQ$OQ zNS@Ka<`mepX|CgSFmXKpZ2sOq_4(W7$FHLo%JtiKuYcjVy1aHreO1=@H-EeQbsY8V zU)I0e->%>ObNTY^;#qcA`V<)2zO#}RH>Bk}=*ce&KES9}!gn50W{}p4i9ck#RE=3U zlm4as>r;B=_dlig&CGt!`Jcr)pcjTx>N-r*yvQd1cy|tR8iy_apZf(PCp)NOic#>c zi&d)zXMdD`m$Um7z^)U+Jef)D(W4CeXtrB!f^N*T=*E9F;p^9)Nh>`Krj#XO(#q&T zX|-?hDvtufOoe8bMH(h+Y+B4w0O7rlQW!x%Jp$2_8mt=&W%*&mPyIsCv#Ut^*^Z6~ zGmrV#`Ktj8+yWt)!gj0NdD0`QQ`_fgv?#(scz8`Fmm$iH!YY7 zpLd>_{nC6Sl0(yT{s#nXn09{jq@c=gJ-k`(*3}?|_J7DP02a z`DRY@4n!2ivJ4C>KQHO$rTp0}{oIsH?}hnEOYe`S3-qiR%2^>GJ*-7|L(VHCs8 z)qfk|z|K`Tc=mci+ApKcC_ty9OwuT&|(^#)oM%l z+3t{fnEm->aSKu_IZUJ5@_|^8e%%dP7Jrh%7-`%lsV%+^d&J}LSS&ugk!ge-CR5-_ zu&EFW%P1XfoI4Ba_|7!i8>8h)2|Lr{z$lgjwr{r?>(O78n<5D210WbT4F&Kvoh{CP z>N7Qo!ah@zkBl%ii5}L}tnvB8)VwE>PE(V}f=x}v??d;iH#VS4#e>2FTQwbbA%E}r zsct>YP-|Ar^Lo@Vl3xH;X)tyZjMaEOWIqQ(Hyw3EQhn)%+-MD}Qm!xa3$!zURluwIlEHubWZRlk!TBAYq^ zDzVVDkz-8LBq)XIHpV@Z*F}W0Bm1!g=KJEB0a{%t4uGbk$2b;3xuZ$1ELn(@^U#BxAFiu+{z@;xRnX7#+F$E7k_5%!Y?CN z|B}fYfnvczoeH5uzwf8swHo6-s-WD3PBEy8S6|3O6{5u=lTvoLkugAd@c<|l7tWOk zNWqv~yZ|}fo;`SQin*WVJgRB@co4_V`BY+P(*jP$G=o#m)PoniIC?Z9VCKTgVGkqW*6@h;i9ioW^;IUR*b79pPy+7%ftgA#Rhi_7UgSQ z&CT3dRG~wO$eBeIY=HKn3ieP8B77_X`Y%ObVGZ8v?GlJfVj5A!6(K1R!I{qm5kU!u zQz;}s62+e-DHOr^1?AS2FoWEu)ws%}dI$zjqn(Vxu4w0GWI(i&(SL(RJF9#?j&`y~ z-dK4Q43`^812%^mJ%WsdX!22h%dg3zjSA37 z=+WYpOQ`e%5iT`mE`O9z#7d9*+p`)}OnB}=p9tr=vrcM5p=Cf*uf*6fV5V>{#V>S& zi5F`HH|+G~nC2zr%t#{66*4QUygrB_#EchvNL8iPYB~1I za`b($uX33eB+(Ybi^a4c9`-zLNWL6_qE9_-#R_w&3wec!eSdwtOwFj4#O#i>ols=1 z=iKk1%AqaoExKTR$9c`rym9dFWikXZ;kbkiIdAcMOx{?{!>H13nxTo~khIW+fG{O6 zxn#Z0^6>~|Z~+UludAb+$BH8QQY`a)s{%-hC8WU@te7{o7^_R(mtG0wZ6+OSOu-|p znup5E3b1$vN`Kl+!&uC_4U=43$`yu54w+Kpn3M_U8jw#c48tRJpB4|)_TPK)`&}lJ z%vI8D$1y4wqB6pGQz&|)XXqt(qi$J`nXp{bf`-zCJQ5$@j}X?qM`P6v*9pRJ%w?*Z zO~YfKp1B|mE|6~1c6aY^o7&fQ zLJ`@Pz(koSG#=PQA;K?lo0#0NZW9qkyG>P=``so+_8o2$8y@90Vb>^go0x9TaGSV- zPjQ>JV1Lo>Hu2|LFiF>|3r@qNJRTa8G%sd&x!c6#h;^HY@TqPSRxiwLBEm0so0v|p zZWG@$-EI?;e7LFAb!bKpXCrh7DiOF;CjJOq%x!7SqhFt=Di-bVvgUgE$Ft%FJ~E=X zf#_k28)|$$DQ>tYlD?~4j4bvl7bD!>HK4bqGk?~iroaG`FapQjT1B}>b#X7adZEv~ zy>Y0}k)O+XJO#ML#m{9nk%oCoQO}4s+b4&OY^qY{OoTJ3xCKzVDIg58_%;@YrK5(i zf+()>2Y$N!un}~NUSwYj2aH}MPu|`ugsv4$2mOF_BUSRn7&D-nO96EcgzHq5jumM`bFJ5H#RmBx$p zW`Lo+ZWs+!c%7l^NL=wh=6VZL^P0hIRm$gSM z^?_7Kqok`6SY5|fEGan4bd3Y45Td=&4g#jaRf`3ui0^Z7YRyV>Wnxf@Wv)5ww11$K zu198ZD3u4mp_E2OC^<@H!bp_TmAE4UrHJm^O(mg2dA7jYD zls}{2wPA%$k^U`BNOC)r8AwrZ8!MA+%4DyNv-|o=#Uy$m5=0K$pISGfZqd9J`)&#L zu6P;`9B<|mX_>b=xA`L*093XrMZ!zQIpUm`ZYfA-bYEUQ9@;(>Jb=!E*neL!-Jako zZ&@Mw+%|h`0Q3sc(1vxiLiFZ-!D|Yesrr!(-)Hqv-6N626ja*Pbzr*1HnyXpY^;`9 zQFftbOmh_~%I0bWd8K5v@=e>a$EWsgb+FTrA;1K^GeMp!5k^5v#()~j$FU_iu8ib+ z3c1)3F(G>ivalLoXrQnfk$)5t8dj_E`T#U^Avw_BC&?{U0=Sw_gHtxv?f2+Ghz6#UVme`^H_Io@3Fo% zYW$q;hvANOS4p`;j&*mNq3u1^GfDKZUg7omu?}8w{8)!BIex5f7T$a6Y5B44+=<6} zvr;248gZ;d;G|=n*a08wMEJC0orw~AtTV!Uj&*3MJ{{|f8~m}(mt)K7Rc$$BRIHlD z17~OWL|W!;9ycKE&411?`iKttEJwpRZ<*-yG!1pEi$0@#au@y09G-=OmoED3nSEXK z_Y4;0YpZ{(ZEqL-X%w7Yd>4HhU`H4IEmyg}fLhEJx4d50Nk2e(ssUj0f!tRW>mBz( z6MfCXLl1IQh#%ERKSc3UTdWvbwk-Ku%%YSuqqKCb1UgPur2~dg=|DNg(C!*3VamFvUmwt80A$sXE!e+_Vt$jRH`WkIY$6TZSluyXYEPEa* zu-E`UIrcSy9d5~yj#MHE>8c;PawNyTq67J|w)&H~)dj4xe)x;i!rsHoN8qmN>9-x= zo+$tt-E(%j5PzRKHWEp2$Hp4359&^rC72&NwPPa<1Mb+!w5`UMcWh)LM|EsuBwvmR z)UlCqf$Z40-Q7DnHny+rgd*m(-O*?~aCbB!{F081Om5hYjYJsTv9Zc>f5%2f_8lD? z+3={2jo3BHii4PL&*<356^sj*vyP2huxRht$e(M$q<^U`0|~>V33&sQV^SvkmX3`~ zj@XWkMEKN>jaa>~%0(jl@{Wy6C)kdSeADpoq&m{Ekx3ra=$`S1ZFFCo_#<$!y)77t z0-(`7X9wjrS!8QQ?lx6;eeO2pht74IEHoR++$PfYJKQFVOq3?K$!_jTzsqg1m8;SK zw`sGx_Y1g9&1*ZNh~E(}HIoHfI)B`E|F#>G@_1-W%7owIHZeJ3-6kS@s@sIs3v-)@ z@XOsMrW35Ya1+r%UfYjn>Tgf_abP5cqKs9nV}v#H;VZuhc?!oJD_J~E>6faqZ> z4{CfqsXVwRl4Jg2E}AB?;70dE7~cvYn0`wcFOwt!uT8!WxkE*9&CmP(J~U@B{{w6_DDjt(T>%r5 zwPMTy+Ss#$W6%eG&`IvekVfQPLi!a6%&%0yiB*V;P?Kw0&Syl~lDph?P}={_LD2Ye zOok_HLE@wkl7~(RSzK;(Pw}%Ur)BYdbjj)UJ)fjar)cp#FF8+gb+)4+(LJV|y_DQ2 zx%F}?m+qbDsNy1T=;W@@OHC?f9wyQ~(>%ACjviLN0pw0a7S_bF!#P4Cgo zR2^=FkTlLAJ?ObDHudF^&P|#7wYWBiY~@S1m5s)dKZ01}UF`2dN}*Gvujme$QIScY;w7pgZSyiIz}|}D z%g$-33~;Z1@L4E?^6u~0Ls1n(OwI<7pmfS}u8$`Zy6{roax>I`%*oyc1CD~|*)M$^0Qg1T6 z3A0UxSlwiJ8y{1!XqRDAa9r9IzvTqzCM) zjNU(gV8^W7$?sH&glXNM+rP)!Y-%LN+!CB2$skyM_UVWk5@h^(lhV#m?0zeS6yi0F zf48>uWFYzoFAtZ;6R-0G{-()si^SMKF`@1CyN^7ufZlirsT$cV7` zS?V&^bYHpWWQkpKHW!Jaw5~lXiH0BT4$;PcvV%hs4mDqIC>h6LqcEKi)O^FC1#HjZ zvUcC0`Q*7qw%UA$NUC?r<~u~#xVC$r0>XN*I3-gEsqMkf%))7486P>E3$+K6^dOEs zHYTG|thb09KxL(H?+AhDTsv7j%UP$T2(tCuxse^OSU!dyQO?nj1qh&%aM_XF!wz?U zB@-GSld+@jx7iF}r&5^joXRHLwP;&4zjzq$F&x$YH^c))^i3VMMVn1MWDjbkrn>a} zHYQJjo0hm?vyPy^-0wjeK!NF7jZy-S3!GP!*iYB>IRwPk%3O$T^OGQ;)tQiLOm3XU zC!r~T`*!qvvXMH*jN(z+v=qUpKI1umGxd&7sSk0k4tnTFA%V@-q60gRBS^aqU1M}-8`_2>ot(>1Qw~rTSfxwS6djX*e7K4XJh`N)UEa18 zYYgA*&(PE30VMnD^k_q;Qj!*pkj|_Qm^Lnh_-zX3Tz{kRm=4dn{x&~J7TH{Xez&s*$YCMRH=(8p)6v zOg@)OS;M)UacTv`)G1O%<>q$V>3o}>Jv%TaLqn}UDSH4Kl|rijm4a(tkmuV@(<2IK zj@ebE~7|e&8&o;tK=^A$%P2$GgHD-4kcM&%q zQ&fqykFpvXza&MKFx4Q&6_LsuWm%)KnY^-)CXP zPrDu@8bxu%)UzrKm!0a0t>;kVgEA!L*)*Y1_?Y@in~$u%(nu25R~q59`pR|)IlZ@0 zon@#>EN!QPfwOW_S0h zNOc62ml(dYw{3K3DlegYR}3QjomE~!tvVq?P<%NPuT`gU3+H7@A;ZZ~&?tPH@=Kea zxcq_yJLEA=h5}>}+E9Q^Jsb+g2W2>EdlpWW!t+ZoZ7$*x3{t}rN-&5zRS5>$ZL|cl zHYT2Y{}(^UDv^;&A=?^5upvz&c{=ZI`bRUJ4k&OfC=1uIbk_wEQd0qZ^naHb@wYQ1 zsF0Y`2Iu?_63%&I^vzxV`090$UjVbE5^C<0Fet^Bn~1kj zqzKoXEzptS4&Vz{Qb<bfVjl}$a(_XHB4(JE*Pz>+ zn*!l3K3$3oS@5J57bB~@e0^I2mb{=j>5tmbpJacLDFPAgV6;q88%zm?P$)4*(tu5- zHQrfZ8`ky%GO7r{m1Nu?A~MiGIV`a55{Z0~m$2+vGO%laTl`5>hz%+>*mHhm(h>*m z1v8Rp8y7%ET{B3O)PKS~b=6C3s8_I^)?&J-1rsktUm1JXCf}}0)2Xy}!+n`+BbL@1 z1hKTjND(-%rzK%jll}EZh16670IvGGWxIoGVIeESuR7D_0?0@ZxjIFCdTY?S(ZN(Te zY7K@A6#-%U*o}(p?qJ;o{HCiEFFDsL#RFgDQoJ`2tW#pEai-?cr!n@9s0M8BczV#4 z$f)U5xoTxZ0m~>dy%3opz-SFRmjriJ0j5cjiyEkDM2uWKqD^q(jlxP06S+%X!yP)| zt%y`6;r;O%Jb$IZ%zDtMU?0uWa0BNIY_MJ$>{v0zQjsKRi;+OiktTUF(_kc_Otbb8 z%h5o!uo56x#OI7jrl}0v9`RreqQG}ajZRfxz^Fw-d<1ZkVWv{;xxh{!ttq3aST?35 z5Ec}YSjFv zteuY4fPY(X-C#T#&fDk4m3NCn-OQ~k-Co6fz9!$#wYzPY->b@lRb}SvBLDnMdezOt zQyQ@FJ1O?J^A=@*;%eYRIFR_vxss8>&6W7ku@V3XVXIrMtA=*`02)CERk9WM%9x>=yU?w{O*=>#hJUyIg?sBNj?N zIKx`3+zz!iMoo7TxS%Hmu*LHAsAyO;(*eHeO->p^BoqZLC+R*XLv~_^nZ}yTG!1}a z0~^{55qIkoiJ>sErnKnrYNRMFeB#6U<{PoE+sh8*wa)*1$7x#6s>~9ZwjhQR=8bmR zk$)LN%~sx~K8s4NTePGasUoQxRJ&X|Ki)Xcp04aq)&&pk+^v^|$+BgBtopo%**e$_ zMlAXq?G3l3jOxN;E9qUask%t7yN+bxRsZvJ!J`s7O`1dTSS#7Jo)zYQu{ctF(CJRajbu+s0gikul{7QxHE? z8gdwql!+%8Hk!Ex;39yKELbWDWq6^W2?#`Jp^aV+^b;bWJ<7H+X{)WZ7=uTVkby!T zY?{jz=}MYH*#g!rs%wbAY7#Q0nyXPuWviqDgV&?BH=aZDa2x4!PdyH22;F@b2Y=g- zx`lgpMEd_4mTuv2YnxLxun?79QrD@Erc6=oRjc+c7lkO`he4lci1itFn(R+>ODWY6 z@_b@DVMg)b#I}9ztu^7c*?vB$9Bwxi-K1t?nQGW-x4DXCy|Cf<3wr`XvBV|hHwGGy z&w~#)?{<^etqDxxEF>}1kCp>x+kfPDgR?t%`NyiUZP1@W%HdJk??b$uin7M%+HNY7 zU{^-ou@~oHUa;ZBzTM`bT{>FqN$SLvUTR|ibw0xxk2;5I=Wn;$>o?X6S)CVe3|Dt| zWJ$t|%jF*5$T~Ya-e&Rn9nr~?6k}gi#;5aR*sYxm@3Dxhim~yX?L2p;H-CnKkKXdv zZJ&Y$fPnuBQI0_^ffIxQT(K7aiNwujCge!FiND~Y83ke|TErm+@6JmkmC`ZyMT({{$3 z>7>eiHftQ)9t5j=Jr}B;AuT6ZOu~QKJBIhLnc;GBSC9BcXP`mHDMkz~rx|FW z`uySg~qEPv&qr~;Tx{?+1r zGaC#8&mb5lyqV`OL~h8!$X`o6&_a0$#% zCCNUgt}-gDgfc>>4H0z;w}?F6tWDUP{n|Kgf)NB$x~TOYuoBgr8IlHUV_l*t%X{FWhY;;If znpr)r>h0B>Z~&?dy5q%N=fG#fi0-VU*F4%HBWG81L;r?*ou7FawZaEg3GSOs11BEf z{Xab3yx(W&n&(CnX`^|tXf-VRqMDXg_X#%kbd0K1mW@7tw|@gXI))$2CA#Yk%vd(d zZzxH86`Jc(N%h&c&)ivn3u0UW4ms`73LHR$3Y^$i(i9~)x3xJSDNA&T_E+s|g)II2 zd9M{B7~8i3xjoz$3iXaBm{4=c-z=u+Zwlto*8qh?QrVJ;mt{ z&1Dc=gicuKjDPWu)XbAo0X~fRS38w|CIro55yg>L=wc0;cG=(y5{}cZ#}|Izfu<>f z!iH7Qm;!7?=44Bo{ ze&?IMJZxe+Ru}M-zR$$0>VEkfzP$fuVOPqy(-VbFF@Nx3&on0^r86f!$(l@f9}y^% zDVxB@)E`_^$bJgIgv^jWwZjOnrUlMmy8O-J4PPoWxJ_twbAQb>xkYY(3*(=0jq=x$ z=0mDZrMCSycYZwKg|9OVQ^~MJw4_*vcD)LeTMFL-Wh79IE7x#Dl6HG4_%T&+J3BI@ z7F4N0s(-+Lg%YLYm++2_S)&4KY@%u@vOn$L6y!@k;BW1(VR1@kToL5+zY>QTPKqm& z-X@SC1IFyuiBH^!CCB?w%^jPtZMdC?c+8e_yet-&W6PYy^0xqOZP?l9E&OygeJ%_c zjGQ+rS#$-?d`wVeitT&lnA;qip<6`;Go;+hfPd=K$_jZa@-Ap*U};nZYNr$UejJhVZCs?0Zfo_A2uAO()G z!heI~Du5huxI!`1s=^d^EButXb+uPt^@!I~+>mzo!y9f`A3G(Sx&nREL7mR|A-iH# z(ofcseiBa;=d@ov?Fh9^`XzCh3e?bCPs#cv(WNQ~7IH~aY!k8hrRilKQaU%{cA)4L zYd=>*7ch&|^91Rso?gXvFNyFP-mr}p#eap|V%;GgxX7#Muc-Z z1*7Z4>qIza7-{2<&fOzMr}swhXUfOJQ<*Op%Lrev*qB6bKG$)@3@>7P$bVdXSLgFZ z&XMmWmv~Ic7i}i0yk!=Q>@dF6o^&RGdD6`D!~4oyxz$B+s}4l!QJ7-K`<-Ztn`vK4>67MVk6TXNo01lxb`*-T#L zWbWl=0Y2BsFsI~R&W3`UTfuhELVp71(^C@~)z0a9bxC07q*qo|YPe3gI%n5W3504= z#w7ThkeyA!l$~^Y*Oi$*qc_d8h)tEKkeCN%hhUEhI8Wy$nH0H`MflhTSLOn-YbF#IZT`w2L??G}HLtysZy$#kzP_ z6WTCzA0ND#h1VFIF|!214Hk>gVF~^oJlo-+}=ara{`}%qr@brTl~9%F`OBm?_p~bhK(hy zGt^X=l!=$BH>9eJpH|fbVk?}cxjBx}p~$*$;TO{}xpV5OE5ha|stFrmN{&6J5_T@m zx891ir?@wrrm3YK4C#&7AAgVNjp+8nZ-pC+&Y(R`DSMGgVkG<-%6}5+lda;E++~<^ z($t*XtzmmSR}iemcFs87#62A)w$E)t#278|Sa8Nj6+`Ll?R}OQyxqvujh(tbuLL}1 z!1}&EBge7(b-bx^%+_+tCF3xsZ;%@=8S;!lQfe{IM%cC?3qj1AC>Il|41d~0**PFq zGi2gKnF?64oG8<^iGMN`w45f(r)@!Z(`1m5n4&k_cLB6`mPq8yR6n~hZg|rXz(Cf4iPtj>H z9`8z>PLDlGr)&O)uqi^}w0OQJaSS!4&*;|(zEeJ&sQCJkLP`(QahY0LcL>XEEA}Z+ zeu{4Jl*K~gwQN;}NctA&Aw3%~Y6^F~_AB}n?N{To%ToIleYEzM+N5KbtLMq>M)?h@ z?selT?p7`&@qaK4FYIYtAe{2}Lg4%qlQo`k7ZF|Mr=_8%)l?!OCvnxQwl+^uZ9P66 zE>$}{Hea=S>Wfv}0=|a)y&7yceFX=YIsPaWcBbd6u{#*=tjt$h1>a*guxRJH&Q7nR zQyRp1{<+l3cU%ho@e+?SCeQU!I(?HD5_R=7>SIh44uA1d$TU-)z)NTM63b`UeO~4+ zyU&je+kMM-RwQX3A&i;eZb{&K?0AP0VqH1JkhVtE;gD8H;t3(`4uKRFT;RCq{vSJL z!BZ=vz-Y=b3n7tzGr(Q6+O*bhtuAyg>WY=i6%S5HdbVs2Gx&1v9^-H}OP3o74%t30 zXTL5ontvH~PgBIqD~t^ryzk9Vt#&V0ORaES7v(|G;p};Z?vD`vX9v3 z%urwV_<7J^rP z(A?XM?XC2FzRj62xWk4%Kg~EyY1zBOhJJrahJQRHaWK)l*yxp4?T_=pmk{sD-8V}~ zeI-&t@vegBWu#xiB^I%x!gAUd&*;Q1i@Vp7fA!b#V*kdJElegRwIxG_7-;tA+V=*< zl4%LF%NA)_l%I)>LL(KaHRDHfq;!Yo>&ME;`u5um_BIYTq@HzB&X;8LoC}z>W!M*e zYg@P;oS|XlUyN12oHJ&((>Xt8FR*?7u^G1$C8X^ck6AlWcI2@$`d@H-%+3{FKa@7( zYm&r6*eEc4{<$_cGxNnufycDHN4}9%%3?s`AyBV9WoN$s1E0Hg=9ANeJOMJ35QUop zq|&pkg&zYAZm18qo88;ex!`GUnM!G& zq|#O@rP-#E@rFhrwSQxV%3prv9)tV$mtQ{UjY|Sjg4=H2{|H(aG#)gL%A2K{-oF3& z<@fsaUvJ<47w%^5-Ii#f4YL-W`Tjq*-@o6}eY}GziI&DVU{}0ReM>ir_C`ajaK>eL zuPWTe1fj+;0FbV`xDJ4&P5>`|(&{o?RYWOiD9=C~+d+yGD}`_wsHJ;EGRicyBHc$t zT80~Ll`-k^o7!0hp6AptmPxk-cPg#oqF-J-UOrJdtkaV%SmvGLN>jD6IuPJ-lhlmqk)U&fSmV_9W)i8S_xsF#|uC zX0E7Jp+)b~vMnGdoOIT;*6wj|TJQqC2u?i^+zA|5R7hqBXWZ$3OMJ+N52_D_GotoL zr;61pjGAErtef@dn_cwfw@slME+ojJU8@J5<55pmiAOE%5F)55-f>a~1j~kV=Nqem zMl&YN;EGx}5V5Enl7&H;z)t(n(IV|NOO#Y*pX*>_*Aar;kW;Bu=C1zOzyV%>39Aci zE6{;L#Y!wHX&gd--Vg>)TFqOX8PwzqQ4e%OE4c=cC^ubGvO!*zTRWC|%6hgH%R&s% znq2`@ByV;lf68?fgw(#K@2gN%Vz!|x$qy?26hg-wicAr<1f0u~LC;=TH)j@@)Hbu= zF|)X!yMs?GHreJFC6;tvr*XR5F}vidx)Bj`MzfzWRCVZozi(+gCkhfmO}i9qST^US zy}h52P)LL zdmEKb?v{2t94tOsqhKytV^$zW(|wje@d6+_4K~C|qm>q|@>n7)Xn-3c6BPduo*=^C zAi8n}Yz!BFbF~wZ@sN=I3rdacEvZ26Q7WN1ldK5Xr=OtKz3w}V_=o9?WgJctDwalG~lilat!Dy0>XgA|t@rHtMS0!?P{lWk=?YAcf@(j#u5!& zWr3rb56SB;%Eb_{Lii=P`VmImt3g6fk%G?seXt&>7=+hRbM|O1RZgK9Vw1N}s)s(= zE*YoIA1hSr2ok0c(%?yPtKUMnX|B|81F;fZG9;EL8;>K^A*1QPYJ(6@K6R`zrmM_*QEB>nnVs#`)&b6At|9g=PxU~n!)4& z5K^7Qj=`t~c{;+a;IuUDijoLsg}UINlazbWre3wDz(H=*dB)xymIgYuQjQ(KmpHrP zmEDe0K3+ujL9r+G7{rcOIQC?$Wd{1oZ-=kLt(4tu}z^wo5&h#M`|G;fn<9 zU3Oe*3IoNeXm3?99RvU2?P*KzwAX*pMYKNJ9J4xs?%|z~KoP%_YUq!gF@QaveH=vJ z`m_obfvi`!&fN~tK$oEHrC9e2?uiM;IjClaHs*dvwIix6KaO?i7{-O<;CWTPV3o&z z$?Sn>dwJTo$Jtw#DC}t70TAP?<#uQX9{bScRl$Ya{rNC*yvXT%a7N;%)v5>m*m3UU z&pUVHysUlhM3@_lPZCW%Bi&=AUQl<|{JBxhr0EZMTJK;xgs-UVRF*}X0UV=uI!D+s z<{7)#TD!H2`w-MMVCS<#d7y2FVfwLuk2+3YANcRX3_$D&(%1+Ks zL%^43)Y%)6uqI^oGL&cSm0F%X%-rC1pSgV-FzSQBPLR76`>HjIJ!^IeUMt)5pd^eb zUhf{gLw`r{th*lOL@(sfUQW|xb@K-&o$@~d8fc5-gxlCx1xnOgVnW$q@AWxyht#P-Rd zZw6k9bHpW?O^VrIQsCTxe>|GY+EhEzy@6VLOq-gYOqqA9k~d=lh34Ofw9O`a44okNY_9t)>zpYL3qltkdQC;5-n-|b$ zHcWo94fLN>(SNeUxxwRq>3ONEv@B@0pWd|iEersN9VTeoF9K%kaGiF-D0g&yhYLHg z@q$(UqfayZR>tfODmtZ_zVt*$Io>qfZOLetY`E3x2i1U66pzBrohQFo2`H0}HY@dH zDF6`iz^{_H7PxqFK8ljT4b0ykvwh*$`Q^Xp?b}}y)G!eZPihQ*l-YQd-_-oE=?ffW zK!MTP0QrEA%`?+n!JS@~;hP80UI746jRA8psIk#5^=yDj_yRupAm>Neq=lcNj_D}_$o(0OlLqA^;2aA48@q}AJT-f+SI54(= zC)_a-oiu{n%Kj;g1vMkDmtZWI+K~s3d@p{0AsOhrGQAOh;b(3=6`^lje|@l~RU`&G ztT}i1dYj^sxAwTrw3Mk-APle#02O8EF6;lPY1;mN8QnZl80_JgTi!*O{=lKzM_BFf{1}ZOq<-~g4g)lfsWd-kq6B|-VdKdWO zdG;Q4&|mW&((})Mgv=zrf-8?9tSs%*dATCzln#uQM(44?vOV`o1@RMg*Pend}tjShb(?1OlQ?N5k6+Cm)^+_s&gT*_)E}tKU18YVcS((9;*!{W zU%~I~UVADUhEto#_+D~8lShNH>zqf|Wq&|_9&I#XDPya6EXkvNODbh-9b*rK;RPS9 z6+L%GfS~R_LV#%5uTSQY01+OG1=ut8tN`(Irv%8|B>|ohP65g1d~}E+JTkh#T;i#v zy$M)M-yc8j-dm)!Z)h<|J2i7>SC)25BoUQLNE<5ID zqByjDQfl$eYomtEXf794?;Pgmm8sRbGYtI<-}*(kGP5CXZYtNjSEbP5)fgku&OuMw z23fxQ<{K? zNYb{v*5FBDyyfOyZDyF~<-FE5t$DSGIc6*S{nNb@GoD5tb$wj$T6cj}=8NF8!LgsU zrug*#U|(Z32p>KEXT^?DGx|gXlxwaQygC@W!0}$wnuGUeABev<&DkyY9a{tb0R+a> zoJYSuYJKhY4TNGOu4)2bAPn~n#--{YuwmumSvI35THL6Oc3t^ki|^Ws?Di7FF&3h4f|5H` zQy*rChW9cHT9t6Ei|de`h} zc>j1o!9)I@ZoGGP?;$3j}hK5X-*!RrjtjBwfjEQ@6bJ`Ol%ad-yD&tmL zl)a53Z#C>XbA|7_@^$?758gb%{DOk3pcg`!8T9DCSG>r_|U zikk94cXgJVYuw1UdM66?-}Yl|%%$&p4rdk*ZCx808M1iP%RRB4`yXrFQmbqo{MmS+ z*_$Uhi^A8Ox=Y%39q`W4Iu}h#G49#tqIOY)#mTz$=U=ccXRp6?y|rFj<8Azb>8f!> z=E2_{Owx2@PtS|0O6=~_W8$mwBNc-WkJ?^#z~ufj$E^8*H-0Yu?vHz&6IrP>&zKdo zVx&$DwGy!BT|3tOl_+cr`HEfb&Oc~NzX7hauc)eUVeDY`Gpnd{BGEUEVQ@J z%4>W+l(C{nt*xh9e(31PcbAH~-_#O??JnxBwvyX!zE~wVV`KGcW5bfxF9nZw#hQlr z`)61k9Tu5XK6#^Qt6q6+t=r(5lOFF9Cnee2Z_IDh7|Jlu7hMrONiUk_GU40fZy&d{ z7d&eEexm({YmwopZx!#;IIQKv;a?_9wVs`GC~K9EdRRsOkX!G&@n+0vj|}=!(vppd z${cq5ZkfjSthjH#qsh@^i%DfhXe;Vnwy`#6@^Z4F#A>QrK>f1UgZ&phY$*0@`h4*E z+r+wC=iPI5doLdm!!Q;N95zbx$iYFT{m(vXKc6YQ$=m*7^wvy2eFU_0EKmSz~+yr#u_K!#Pd;)L~Ptp~b{ltETJSc20W~p*6gGh$(lo zn&sU+g8czma8A76)xXg%~ z=M&`buOX_tI&1K!4PTR=H=Lg^%QP>~H+kEgQO_Ls`eD00(2ql9zR4Sg6Q4(vUP<2I z{BG&2fv=y}Oqh8&rhd9Z&E3r(t&iwi&fq#9$kEO=x>KH%>1!M_+t=#*#3@IGOW$2m z-=S%-s_aKp2{S;4a9Vo2%K!7DWCw$mWtsJJuBLq7g=%@Cmp6BP%QAh)+Px~f$LDhf zhn}bJ>egGQtJOH~nOVd2lY11r5XXJVKAwGY-kRPnWzL@is_;QB7oXQvZ$D!As_oeIRoPANh)wKA4fUR;VPkXK+q@t6 z)$JnxYENqHtxElEiAZ%IuATdMx2=dr$`#_?hXk(fSEal7W7yi8EA9>}G%-3i$M?I- zL-T`AW`0~6K6Bphzh?^DFW1~~e{-mt%cuzxBJA7~7OQ*iiBXGx`*(nWXffmN$eH17 zZ=HNk4Q37RenI5pc-MbPtEthN>}zo%(a$d*H;SIckM4Q$#pC#X*Y=6tJnl2J*War* zPaA#za;a?Og3k|c`WBqL{h~!Qq^g~J>w9~VS zbeG2p&+3(D40wSpe0#va+HZnaOy#iHoctcieUql!X@B_hE#p){TKlU13|g}*dPl2y zKlgPAOk3ySI>7ec#Z_eASG&ueZW1l{^XKcA>`@_#gg$X+AT{EKApNTQMl)TYodC$TcwQF^Z=x22QC!+XFOSsytnIvQVn3gNaM`Fo!YU9dShpg%v+kPGiP>UHwMvDwj z?9?^1WmV*_9XqCO@9}$NWVpa`DS5re$c*2IA}ky-pI!X+>(=}qS7N3fi}CuqRhg+e&sX#tuERhsAmi{nRj%9&*{Pn zPSQ8_j6Fu@*0Ay$e~q7UJ9^XOu2#+7+j~FX%yG*;+>uILb(~tBp-ZuW#Ucpd( z%Zhb-zlh*hoPAH*JzSluHgYMmQ00B{2K_acqpBVh__AX{X6tl2K4&@i(=^v_2IsXd zJ+QcK@TibEy6>WUBmMt7>v@W;_U0;iX8O(KK=^l-xod;Etnlj-8F4b;VXtnh+iEw* zzZt*t@vhb>1B-i}&9byuRD9X2*yAAELoK*FT6S>32#4Q=qF&l|dj=1@b+4j+Qu^vz zznvc!3fBKAF2L<3&$>Jl!CwZ|nS06|&FA%)h zdC%j&WsiTZ)H0g6WJ{Uf)89ka_DlTot2$%2MX#6rcWcJKvRY}8qrXZI-|x_8WtZSj zOG-S7i_K!a>ldx~@VX*RWLo#Ub;KJk{LRl3OtQ1Gy;l4=dANLm+MV{&@_aYrg6FY5 zqnlSZ1?^t=diSehm%P5aKVOJ{6fp8XGosfGySpEEcHiIo@!U@df{QBa2Cg|)Q`+*Z zbo(F^*3q9i16QB0`#$pUysAD?1?^_j$F6+j?L5DJkjIL{7u)~a^}Rmi#39RK(QJnj zW@Lzge%O+Xl|`m;+24=%PB(k4U1aldJE=0}(ctKPHCNl?mYojtt?o5B<>-h0zAaTh zv=ZCKc}IV0*GZXQU*VoKZKcuX0&Dxf1yO@-Ykf8qr*|pQ9GSfBLTRY>USB7_$rl98 zJ0AWpe{X#&E^JVM$H$UmPk*Z|cNtdI?jdUbHR~zw&$V}_1BT__oyRwy*Ck^2ACeV& zYRaH)Cpd?bU)4tXohWx(_VG!I*4Ej_OV9Hz>DRX`c;Z<-@Zjaee}(-U%HvF)O}$Hw z(|*6Yx8_MFb)Uv}tNW(iH&(S4x9{-j?X~6Po@s^tKbfurJS|d)iH$S5IIEXnrSp29 zKF#<~l(a8E{4MUrn=TJkRl@5vgA13u;B4>WThgtlF+NM!>r&J!*X;6n+xv~}m6PI~ zo#0mKc`9_nxCw0RfaTSyoI=o1oP3|dOzs$)tYKsV)o1Jz%=~dvwtmO9FrFvUK z7}wHf{kBZqY-JwNcGYQ~(Ya%9uZ&Ka9XIGfloiMBw!8R*=BAG`mVR)|biUHk=IQ;e z|G?)PULK2nRgoNRbINSbfr2MP8*jY+xY6RqhalgH$=Q|LdOrH{YHPsN`@83rRw?dkDuths6GH+8+%)yD3QKNe*d<>UmP$PtGA-{+qi zMX$`gTam+0BpTw)uX~>_)M&bV+?cRA5O$L7@U!6a)^81Zar(_ocP<fGsT+6=AE&bgh(K6P=W>lb6a(7;2sWA4X~@+cm` zHEJ8DXOuWw!F`C+j4_*0$>M4I-~`ybdG*VH!nwAl&K>AsiJmztG0_CE7oBPH#?5syjj zCsw-!iEd9nn(fe!tzBT;qi>hGvGG|=Zyb;Ab1qC+*qW9)a&%LQVRPjteBJk*Lw#Mh z;wRL{eAL{%@xu4p^FLNDe0;X$#PV~Hyk8Lp`@Pt!yEXH}@v(nJUou}Aao%5Y>dEuSs3;0t zV39E3l4)t}`sm#Yybsild*)MmVZGVJ;!jtc4>oMHS=#$oLY~8X)$H1s`%_8yJ)=+l zyhE1^60|gYzhC2@=;43F;bd~1$%~zR?vLCsWMrVd_Nr~?Q!0%fT2>qR+lu1%?-+da zGr6z(OPjrg!pPTiJw+;4zaLF0^Lfe^2Q8a-d?&{HInHzb=t=R0DSLP1@!hKr9kbJY zKJ(8FYkM@;^YZ7$r1vl1Jl;~nTd?i0_0$igDOOK2D&2RUjtZ~n+vLwSFH9eQbr$|^ z-k#;|UGFA;aZCHU?!WeL8w|dmz8iVI@^(xAYpmE=r3($-YPC2$)_3s|W!8^%(6fo{ zRb*~iKljdX^IW@|jlUWn-ca-0=@dOK`TBz!!(2VeMrvmznB-bHN8C(mwet@vnddq| zKWVdpcX8A$L7QL6nOFBuR?VKEW_4kOyIt;m;>oU?{@)jSl>A*#cY%C4nXlnvc|UJs zaaP)gps770Ka_Ug9;LR=AoNpZchS8(fvIQnfko{LSNwguZcoYeb+1mWSu)oUi~#t)3~yPJhqEONK)ZCf$N#o2wU zeiGB~oTu-T!Be({?|QdC#On9nGbzcfO(|n7K8aEn`@PS-ntb^C@CjB`RmU!-9E`K5 zzglqU+>OYF-iOU+mVa4T2?p1)_MG!Fw`XfF1@4R9Z60}Iz>TK~^$mK7*@t=X&o8Uz z`1T8_a#*mAJeR+I_^*3fX98S&2CD=+M19eD*YEzAtt$nyW|?fh5@|4frisRts)rZd z=I;xw5Uu|vf`3WTy?AVG>HFo~gpZ@&PBOqx+_4vbsCy8+;gyD8@UYBXw_ofT=Q(He zz>t}L#XlNP49GhW73X|Pv+But^{rRPmgCkNZt=j~i*IDoKn6#V2);DhH zb@D;cj^q=+7nNmB?$^DKb7}FGv2F+R(tj^m#9z@?Y%NavoN6Wt<-DxqPfzS~I+Xu{ z;IR9DDB`O$ZTZ^uz=!f}Lr#og4}S5q;^Bq%=dIiq3+GyV4?MfeQ6+e;h3VaUX{J-N z+g}o4#?Q92Rp%r{dIeriaZQLG*1Y1}@twDpi7p3Tos>A+XhVDCeuJ?-mwm=I?a4W? zG=BT}Z5boeK7^mRKC@>0d{J)j;KqdD3A>wgt~UK?b`B+UuRi-w_GQT5pfvwaoDH`J zT(^1AmU#5##pc;R!~0}C8}0jFL&2wBhOS>e<+?ri^!CSWha6OUK1+Apy=E0+{O>d5 zu^rwG2g%g1nMD?T)~~m@zb|UdOvcXk4-5Xb6+Ec5-Ms2j&)9o6MqT|8yK9=KXzZ5? zhq7~tw`W^sjmp}P@v|iFbV}g6qFWU;T$543cDr^&TMRNDrO~Jn6~y{7WcjWQ-)xGa z3*TMH&GxJ#+YLGUXL2+~Ic%xBHD@*_AS$Tp+sdM7v-~f$6+0GdjY=J&HR{Wxy4s4h z@I%(=w{vSbbtjhYO51G4c~M}w=;{(dtH0?JexId#s{Z%~PFCM>XwqN1_1T~Oo!Wa? zE}SvrPKw%Y^Ph`W_VVywnY`_;cln)C(`c>rOWRL9jw$%V=sEYv)w*R@TzXd8FaEZF z*W)`w{}A1--+dvy?0f5-QH>Gy1w04*sa z%yWfDu81T<)t$UZ22VhZh9;XKV0Sb*3>K5@Xmc4X=|nJu$6%54a6W^@p~naqEH3>< zA%n%^bv(;rGFSr1YT#jb3rpDXjhPpVWROfgJ&w&FSseNU2Nn7eRw$&LP}3k;9h)MV zhwH^`9CzH4t5Mw#@Bb{9Rn1`FHtW}J{Ca7{1C7Kn(;ueH`o32jQurup!mBZ#RPyiJ zryNeGUha(xxk$HyfLn_!ontpAH20suXV07;tfPJMTdBdXE`F|K?tlGzW^elFo-1;4 zYHS_9uAOdYDOhK;^lb9H7>nxfnqOcNBR*hk zJK=%BgYhm{-*UT_+=Oa<^aRJ^kY+gMj@A+wQ@kV`o77dr?hsrgo57Z7Qr#(%1?>)@ zM?!<$p|D6cpTX{sUnJX$A+-n?&k-=#9U_n9aTqX6qU%V$kiqUyWTeo8!R}CAq$`KP z>6kVs($$T@p(l3ZF=%Z^y16nq^u+F528W*56UIwS4JP*VWN_$-J-rwldSWjjgCjsS z-7$fHv?Oa;Z z51K<0uwfYLY6#;G&c@~=e`9F*CSr3@W>2gKy3hkNK+TC*C=!`qcH-DM*kBA*nZlUb zx!4xuU=FR^c~}HGYmV6=i}~1EG_Mb4j~>m(Qc+(9Opvw!`--6`3mCZD1=AB(EW*q% zq}~@^o|i+Iihm|y+Bo`T2XALD#)e>Mv;%zgZVP5A-YUX=<2Xl{`{|egV)Vs=&}ag4 z$GJ?@l!+N)T;zucbMYkt8>%Ye%5=$OGq|)&*(_HEmli6UWHGq3RN15_gG-B*4c%N? zu57M=0sl$0%jUuhXqB>g?hKyHl=vPD9<5XMM1hdOlP$p&KFF5f>cs$i>3E%+fWeb3 z!Oe|9`*$SU9lnt)VKDrm>R)kFV$@-)sV10>SXCYK!eGNqTZyqKUjrM77O+5|PKAUH z+BY2Ag!0)^Wz9zSxghi~O-vU@!v!GJ&DvNm42gu$+n|FzgQg2KoAt21II156O-Dn_ z3P<+`Lo>-3)5TE8P-s^4#IE7U%oXb)UTuOoVi*tA_9tw_O{Ulg4EYhbEt=tjX`yg) zEF2{c$L!Esb8Izo9|6nC?*lgH;RA0O!NANh^bndT#{ye~^8~am^KqU~szyG-VsP_~ z>BJiGQcLWHs)$cZmE^cF__R<-fh&Vg3zZajG5EAlNuiJ-1!nLT(v{8N(|RS{;RCH# z(w)cP(|RR6SPbx7sa8l&l0jo@B-szJSs+`2mm5PMS;D_+fu*NXQZ%v0*ci@dBEJ|c z59hPcnQVe5Rft+3eBaz33&GLaAlN=-=DQa-VK~y61gaWjhMS7Fv#>w#I5$_<*hYMb zO{vT@MCuo2s82GXBi7|%0&os1+*X_*z_c(lAe45F-9qd>j`oMbG~icG7)l9)Y5okt z8ql`MFzA{k_y#@%3&zmpDEKCAD42PRg8t}G7s(aE2iX#YZVaJp39c*#n1NKETvrmJ zu51l}#4M&{5&!B?klS?JM!eD+>xOabehlKxM)=g4M%^SAlpAiwvgUfqi? z*fz2BLU~$9+QPQscADSkZn?5x!;m3i+Upm?kC^q*$yiJS1;=2yXl)K*^N&OBnSt4f zXNWc|6vSKyGuO zml=qKW2j^KJ2^fl<2dXulge^orsFp4K9UZ7*5*CFE`N%v3OG6NW{jFv5`DzWmSIOR zUs?|=CXg2yV z!Z;Gw!C2^2HX)FDPQpr9ZP|1}3oT5?!jMNg;NRbLDl*)MCW^?w640Ow%GNWmOmr^; z`Ysj&5}jI&%|sciK_Bn766(@;0YpFxm1JUkRO^Zv5iAy)vUFpyP8P&G5p8j7SR%bq zpaLuwy-x`8EU@|x?Dm2W^e%x7u%vtnFbAxc1-3061|ZIYKqLJCai2xfCJ%w1Mbah@ z(cXnc(kp;?&m!q1K#*sV^b#P-vq;Gj{xx0}ONz#;)?*$R8WRI_M`t5873swTE{}SS zT?2`NX*%IBkTOj1DTMVU&?W@J>i!<0aPO_yAObaQ0og9j$KGKmARi>UZaZK^>~@g9 z^JIJtvMmG~R^NebAkgzdshE>cK{2$Vc3~GK{xg0z_7z8tyWu?vd$C;@8nG9egG;dn z3>lX~bI1X#LIsn-eiVv7jPY<%g8fDqyA%7F-SJ{8=Zb)8OP2MMHh5;|SrOiNsCgp&k% zdfz?oQ`R1HWIOUl*cl*2&@6yqJ* z1gIf}m=4`=xH8?;AgkvfuA$9XEb7$^mI|tICD7hxc+mM3wgyA`ub>$`1aHOBmDkV& z$MFSRc7m~@RxAxe6I-F_@EMz_f>b_3^VBzN0fshxgJ$Rt>=lOiKVXKLKd~#w>nCQ1 z!rHOZh}RBZd;P-180zy2zLwxJ`wx)mU%#;)w9*2R%U{eB>}Pq*Sq4H^fH5xwHodP@JcNc*qs;*YYZ;FtB!l9hdDta^X&nN4an($)jAjljTt^+)48&7w+VFlnZwfJ<5eUnI7fBd<)u0Nh;Zv zl07*&@KE_FfX1iVI2&>rZCnz_1rljsLYbXhLq~NXD1#4|5vWuT@RjyqHi4EKfq=k= z%?R|a7j8%Su#iBc8T5h=&m~YUdaOR?q|=-iM(idw3)5!#yDT z=?{}t4Zzy~R3x!k!qzO9xF<^e3(hMbWzOrQDx#x_#A5|?G?93$K<3Sq$I4h7mG`h@ zG*05NG9D-KSQ(L%c&v=cu_=$0Q8_l{u|jf?%wwsKvc*vTE2DF4%44N`PEPs)mNd0F zz{8y}jFbW>SmG4)ijVg|$=aBy#2g`Z08yj-GzKjdf^b0mZUizM2qKjFY04nz1wZ|S zBhC=$1wS24ppkCS3x2vBLuWjp34S_51 zlCe@K%*Z$?6c$J+C6&?#lCBX^l>!prR|>Dd7)U0qRiFhVurTR2KnqAI9ixPbnEtpS zI(`b$Y0Y5V8Utc4iAR(;abc$|Uq$yO;hN}5EY=TQ3jsGaF*Xy6^07A<`W1rPA>m|v z9nzZ&o(_y^A-Xad`XJPwN3r2x+z{1)%}$&GO^E7;R8ZiwP9%^3mKHJz$G8N^6i_lT zgoaRRt$2NvxDL+Zq3uPu5jvlMZ%1hfAYR5P!U$#0#w&C?=op=(lPoEaBqic~Y|yww zFu^lh@X;9Byaf!|BoB8tC54V8M+m74hfO;2xlGV7o5OMx!XG~J+lD)!{hEXu(uT3f zejBddOTd$S39N)e@;aUn3LW_zv{;jHLdo0k8R%6WuDfw85t>_a#8Hc8nG;zIl4PT% z!-PRh2Xpmebyg-x1|6;c4~vv^smhC@wH=7+C4f0P%#t!h;6l0ucu$aQq1XXubI^nW zoGA?`w~i3`DnO3;=u9QyA?k6w#W2yRckT>vqw1&a_4ylA`!IrZ>?{8C>0wyg!=k1~ z&9uIwXBr${f2J_&U{2uv8w2j_3{Nb(m|S#C-FJ)a(O#bC8Eg9N3u=0o+js9|)|VZj z-p_Jc8xyld1GOtYsGeIrFRk7?wd;E4*Lop#Zd3gKYT0KMW>$QvIWcTpf@?p$IR(K< z$oz<{_8awWrg0yV>~BkohARqi``q70^N>~r;f-2r2o=$6qw4EZ#amTBJ>3@>aYkEb zfSO^@-#(ccQ7f1|+^3VS2lIxVSC7f@sP*xyP}w+<^s;wNJllrvR(%|f&$ZEA%MdQ! zd&M9l+iJ$I+pR8cW6pgvPx^(vbeZWZaG4xZ*re5c6e#b`#l zp8j07oyWAjowQostH0HpXQ;KtCjFy*1hq<>l($@c#3;f^6A2=Os5^~|8$T<0YTUF4 z(1%1%01Hoc;1-lQ>jHxSYpz1`i~cd>t|((CUal&!5;STjSUw<#nRrey{;0py+aUea z7O%LC`(w!Wb|?I4qd#@Hhl#|UMbC~&79n8G=4$RC#Id|}*@M{sdnHS#0%XK9_ z-&yPevz3&Xrqts-xyZR5{HRcan1`W78iWSrM*`$@kx(~*)ZP)CNk};dljBHA+zC=U za52g|W+P)wf^v+^-ZcHEoPs9*6QNqp27Ee9=!-hb@f4};fFLO3cUn4tHtj`F&+Q;I zUffQHf?81rT2mfE!#L$39n6<L9@Vv8mh z;@i+CUE)3JuS?j8yXh0FFx03=^g!*J#5ygyuuScX!}=e4k$EbW%h3A*`XpN)r*j8@ z#q<`TGEHw0YCZJ6U@x+Fz-DOL?geQ*Nf$G{{HZeux>8CVN&Itk0Ta{ZH7^i_?C8P^ zvTm7}AnE=`PJ9WfSQrr_fdfN21=Z;SJRh-*i8o+TREOBBCjlJXtS2GObbu!nQu&P$ zK}r*xUgE>Oi5Mc54w-BSxMXKm1GYhSW;KxEk)2r$2SP0^iAXjVLULzT!>2yUvKqc8 zeCn`cF#9vT2t$gxn2D>+iGCfq2Ol*~B^Kb2dr-Apw0Iiv3z8B^TK>?2c!8k`3!tGb zmP9CqPFq5Ah7}P^AP*~G=^t&0(@?;(h2}kb;wOg6?4isA^?4nnHh_qfg6zftu=ThB z9fFV)%6B;ekU??J80Sigdq}*^iI|}(qRR?kHn8vxy#pnuKFRp9fZCLdyhGF?WmRv| z4OAv8dy|lxk+QNk>Fy4nXzoIKQr}6UL}wsZiBTuHlET4ZE@4Miv>}E|TB35K)?!0n~ki^NS+eL$ zGzCq)>6A>euMYc??Cbvqa_Q2dm;C9e?0gkmmxe5$RP^q_-elMW`;=i8g;R8`e;jpo zO47Lzg;O%o0O=&@1c!nt3NIv_l!7TXUElNqoMKD)DRmG_;S>kC1Q9*8XhH;)K$$4B zfE0mCB7lCNNWWrmT`y1jUaCt;{ ziEmFP=3tH;SRToxpp#yDBo}I6q%{5O^fhobv~~b)E6$rjbjQ#kCusn-q|j)g6ga?a z|Db?r;3qpGh?giS0$Nacor!U2jvRzmN5WcYXAwx8WuI6litxb14bgY;2|j!0EN*|M5FRo7%o z9qV6(u%$#@tg(jB?$}Tf>X}Vw=Ds^_M(t`3&ZG{gti?xji2iVxl>@8qkxRtlsCgZj zOa2C;rK7wn(ahJ4ggcd+QQ(TPH^$(7;AHQeICv8=2)CfaCKpOIZ16pWBoEfI!~{aU z$1FJ3{IrE=MO9ls3ab|pn+Y~lvf*^?CR&jPf)Z^brscLCGeiL8(@|(X!Izw5!6-?z zGe#Hki3#R(u%k}>C3qpXaimKX%K_2DsLJgH)0E)(y+ZgFz->z>*r_*fd zNrar_AQ=ZH0~HfvOjCSxH=)u|M}+csI1V#YES{>}Ttkyf2z#`6FOh=+_JRX&_YoIy zq_q#e?t6e(irV*sE^D9#qW+{w806lp?2cxX5gJgySWpIjBS3{^M4qFRGOIg5P+`+| zIUuO8>B}4wLBsSB!9)%UJ4p2We^FB>L50GhjJZNYrBCaD-m~dr2a4a*YK2HcD|0Lr zXk?)YY`rsMhUh^fw}cMMNI8Y+^j4`j(;2EE%&@5J+8#xe1Fz(xoHD`$LEX#@u1BEq z0(>jMg}VY&j*XmgtUD?@L>SEqw`Q{Ss|<*JyD~>?AM9Lf?Y*3_sccSYldJwhe}e6B zzjf;PX=vuyp~YFlXZbl<3JY2r{i#%X-1p3;;Sb%urdpW4@cMk@h}K}Cp+nmJ>Ft^D zk20Gj>bwltobtVDWZpZz?ff_DW|~fO*4t^KwWkRKw543a6Y_8l59w48Ln*$XF3L;W zHGEzjT{tty-s{$aiF-C*;S)ESx+CdM7y7auC0*$7!^A~&s*)Hj+T1vEzN^cg zO33rOr5i0*-~YGUy^JU~wauwHs{}_}Yo;unTG=JBjW$Ji* zY=^qZx(~C~S!0sA$&!w`$&j87JDmF;j?+HpGtIw8=@!!EPK3jo#N|^blr3o+(PC)M zsXbd$`gCuf&6j!ihGSmASyP@oxi+lcD*5}J=djW(`7D>{;5|a-`NBuTHbz;-U)g&n zbXch6)nRY9R87@7YxKze@Y}&3Vu=TFV?Nw592jt-%YPiZQt3)dju5uA{XqqH@*%hw zHd6iUP=kl3{HG0=v*r7)x*JQ5w%)5GpJE;wZfW)H%ZmDL(Q!MZw)y-8$6NiaIn08* zpxl8y4ZnDf)_4@57uGQJ_Gysnz-mgWvB&OIgT61crI)Q4^yj(DutxoJ?)8Ch1{IpO z+5gNxU@&-K-RJJCol9J<9-1-h^7d)5JD=#K9JkY+bIk4J6~}9Zt=$7HZNEy^Dh@hI zJkd0Qw8s&yS@F4!P&9xd9Lc5X2hs}Mf{R4;Mnhr%3cExwtvX@{1@bZk_&2Gg5Qjst zZvzqS@^H)`b9lM*xijsCGC9CP2djj0@=lHl^T>h*g%+|=u*go94&kJ;e4y?$Dq>o6 zI2sE7_#2vf8Bz^CntPcL&?-SAu7E;DUsM0*vR>n?|8-f<%}uS>h2ox38%eb<4@%|= zcVuIA!?oCtA`HSBhu)4^@uS50_KQV!+sk_nIEs>vulmyc$rJ4#sLrz?^{C6Vz@3eH zZ~SiT`1r=FDVM!}YtGn!^;>IVTQYi#_~xbm^ZLqT)kE)Ttlu{yO1S4fs?rb*WIv=l@yDCMz*a(HFn_ywd(Jtlvu@~RkGyKz>2}KooOPRC zX+1x^ch%g;leeN%oktnCtm->O=aTREZSmhm>TW2uwmtRe%P>>DlcIN@oTRQ86|(2C z250-E;ZN>v=-ZF?HRAGKv#Y~KeteYfVHiEC-@Cz&e&CG<#%Ro{aF`SCf{n_n0!+P`c&nGfO&~r4#+Vs*?Xy^LN*4X4Sa&C5;JP?46`*%)L$w zQQ?w~LR5d95aN6i>E9r{RM=qHDC7n)0%yaGq#HzU*oWUu!Wh!g$2SOj__X#0Vc3-o zg=R=#`rRb@Q_1*EVi1tp@LH&J7gJr5wlR8rlVH>R>{|rc@ia=h1@SxQ28^z{1&NNN z_3;+bSK6|vB?eL_5jWxc1+{Sdq@k7=3mw$NDjY7$5%sSndZX~$z<=pp0pAgAxlJ$$ z5^hy3swJG!;oF3lacr5CMr~I=# z$*G7!ADgyOM^#D&m-3p`h z@{eYC0)f(oppPmK$d3~;samQ+A5W_4f8NJuO3s?)#_^e4>b`DApGazpe5SJ9@tFdp z-SL@1>PS}pg>aQcQ44&yC7~!SK8r_%B>87WaKx+hLa=y674o6#rzloFDWu|!{0rF} z>Rec%PoSg{K8GifDkXoJNAh_b>RPk>!#n}dW5qrp6+Gp~@&AXY`D|sC@CDQ*0{U5r z{+WD%|Z8+0q8{076d?elwQbXQRQ0siMed$ zNq9`^N}T*S9#;`hKY z5)&121XRIX{$Vah2~~wW&i&?;iG;3}&k1%rcICiTMyxfjBPPemYsaUAM~m;5*ma4W?=E?fCo z9*fWPQ5*=sq+r!>+gFJl!KGi0V&sBiZDKMg%o{M>;ni=8pi`rQyj!V z3zU)`P^wUQpg_UKc>;AFB$2;N{9v-m69ZpWS~Z8SNYuf!0VNf8$Ccc{m9qsl z4+PdZPNILH*Z_Z>ozw%+Cr}bATfkQYSdcxBDm%zc!hu-(&m@xfaln!k?SsP-0ku_p z7|dU3AaKr3qmk+k2%-|SfHNyuI)@`r0$2{0DUI^5f0_6>T#&!wVt4>+N_||#&;!XL zTQTOr9cQ>lrTDA>GMaz-DEY(nXMxf{E)&wge+E+HxPY*VQs6R$N*KU}9~LN13`9-I zJt0ttl<*6(1tkpNa=^Y67X!(i(gHb3yasM%E8`(V&<>S!Ms&EZEjenEGbApTtC$AB zO>U+lH--D$k|Q_yXCcE?^c=20a=zF(4&)Elzd6)h@y+|77?fN&{b$h-(IM9WCzRibfWub;3js%{n4hu) z@QW}-X@LNhP*ezPUC|>zsC<@4VPYXrLj|!ygjO;x=mUqBe-^IeLRziV$5T`t+|&g> z?ieTPC_zCdS21;`~%eC0kN4+`0m`18*TDWxbp48OdiP8a1ShG?v4{ZROl zoaV`o1M^UNA6(v$y$=m~O>`4Muv4T4kUc|ySDcnB`30N&6p-%9UMH<50xbYYS9(?{ z43R9Jgt|zCAQLM1p<=EK19^&ePW35P5ui_?2+EMVD!~!-@&Ac9QW8e8AYq~HlGY}8 zm?HgvJW2BVHhC!s;q*{($KXr~tbzo)RLq7+5^hl{vH$=XAPfpG1i30=8H^JsPXYrK zRR?TW5zv7?GpXx3awH5of#oPp%%Of?N^Q8)RN=CpB4s53WE4p{^nsHoKFbsS8+XmA z7n6|KD5k#9$ALRlXI>NiM35LOEdi9HfPSz`$=S2~QiTxV6qW$hE9&<&a^u)gFI5@` zf}@sC*{IA!;ecOhPi&}tC?X;oR->pyHXCS+;=~-u!MOZlfZzRNjS?S$zg>f?{6elk z$$HpO(N&ZdFkiW13d}}APOW$t0DbUBae+J$z?EVr#8IW4Kv+D8MB3 zOC7nza9B!&7Sc#k30;5_DHhwn{eYb)zK_ivteAem9bMS6;y|wCGO7GxxE#0v$9hjC z+e*U)%4-6sRdfP43i<~wCFuuLQb8Pw@8k27(2^sN{LGhHl7&oz;AhmGU-@+k;0HiI R-V@&4!9~=qtUP_a{y$Q^6W{;< diff --git a/snowflake/ml/feature_store/tests/BUILD.bazel b/snowflake/ml/feature_store/tests/BUILD.bazel index aa0f3eba..c1c43f45 100644 --- a/snowflake/ml/feature_store/tests/BUILD.bazel +++ b/snowflake/ml/feature_store/tests/BUILD.bazel @@ -1,6 +1,9 @@ load("//bazel:py_rules.bzl", "py_library", "py_test") -package(default_visibility = ["//snowflake/ml/feature_store"]) +package(default_visibility = [ + "//bazel:snowml_public_common", + "//snowflake/ml/feature_store", +]) py_library( name = "common_utils", diff --git a/snowflake/ml/feature_store/tests/feature_store_object_test.py b/snowflake/ml/feature_store/tests/feature_store_object_test.py index 425f3c54..63b617be 100644 --- a/snowflake/ml/feature_store/tests/feature_store_object_test.py +++ b/snowflake/ml/feature_store/tests/feature_store_object_test.py @@ -126,10 +126,10 @@ def test_invalid_entity_name(self) -> None: Entity(name="my_entity", join_keys=["foo", "foo"]) def test_join_keys_exceed_limit(self) -> None: - with self.assertRaisesRegex(ValueError, "Total length of join keys exceeded maximum length.*"): + with self.assertRaisesRegex(ValueError, "Join key: .* exceeds length limit 256."): Entity(name="foo", join_keys=["f" * 257]) - with self.assertRaisesRegex(ValueError, "Total length of join keys exceeded maximum length.*"): - Entity(name="foo", join_keys=["foo" * 50] + ["bar" * 50]) + with self.assertRaisesRegex(ValueError, "Maximum number of join keys are 300, but .* is provided."): + Entity(name="foo", join_keys=["foo"] * 301) def test_equality_check(self) -> None: self.assertTrue(Entity(name="foo", join_keys=["a"]) == Entity(name="foo", join_keys=["a"])) diff --git a/snowflake/ml/feature_store/tests/feature_store_test.py b/snowflake/ml/feature_store/tests/feature_store_test.py index 63859b01..65246002 100644 --- a/snowflake/ml/feature_store/tests/feature_store_test.py +++ b/snowflake/ml/feature_store/tests/feature_store_test.py @@ -180,7 +180,7 @@ def test_create_and_delete_entities(self) -> None: fs = self._create_feature_store() entities = { - "User": Entity("USER", ["uid"]), + "User": Entity("USER", ['"uid"']), "Ad": Entity('"aD"', ["aid"]), "Product": Entity("Product", ["pid", "cid"]), } @@ -193,7 +193,7 @@ def test_create_and_delete_entities(self) -> None: actual_df=fs.list_entities().to_pandas(), target_data={ "NAME": ["aD", "PRODUCT", "USER"], - "JOIN_KEYS": ["AID", "PID,CID", "UID"], + "JOIN_KEYS": ['["AID"]', '["CID","PID"]', '["uid"]'], "DESC": ["", "", ""], }, sort_cols=["NAME"], @@ -214,7 +214,7 @@ def test_create_and_delete_entities(self) -> None: actual_df=fs.list_entities().to_pandas(), target_data={ "NAME": ["PRODUCT", "USER"], - "JOIN_KEYS": ["PID,CID", "UID"], + "JOIN_KEYS": ['["CID","PID"]', '["uid"]'], "DESC": ["", ""], }, sort_cols=["NAME"], @@ -229,7 +229,7 @@ def test_create_and_delete_entities(self) -> None: # test delete entity failure with active feature views # create a new feature view - sql = f"SELECT name, id AS uid FROM {self._mock_table}" + sql = f'SELECT name, id AS "uid" FROM {self._mock_table}' fv = FeatureView(name="fv", entities=[entities["User"]], feature_df=self._session.sql(sql), refresh_freq="1m") fs.register_feature_view(feature_view=fv, version="FIRST") with self.assertRaisesRegex(ValueError, "Cannot delete Entity .* due to active FeatureViews.*"): @@ -251,7 +251,7 @@ def test_retrieve_entity(self) -> None: actual_df=fs.list_entities().to_pandas(), target_data={ "NAME": ["FOO", "BAR"], - "JOIN_KEYS": ["A,B", "C"], + "JOIN_KEYS": ['["A","B"]', '["C"]'], "DESC": ["my foo", ""], }, sort_cols=["NAME"], @@ -264,7 +264,7 @@ def test_get_entity_system_error(self) -> None: snowpark_exceptions.SnowparkClientException("Intentional Integ Test Error"), ) - with self.assertRaisesRegex(RuntimeError, "Failed to find object .*"): + with self.assertRaisesRegex(RuntimeError, "Failed to list entities: .*"): fs.get_entity("foo") def test_register_entity_system_error(self) -> None: diff --git a/snowflake/ml/fileset/parquet_parser.py b/snowflake/ml/fileset/parquet_parser.py index b3f7d0ca..c851a159 100644 --- a/snowflake/ml/fileset/parquet_parser.py +++ b/snowflake/ml/fileset/parquet_parser.py @@ -1,4 +1,6 @@ import collections +import logging +import time from typing import Any, Deque, Dict, Iterator, List import fsspec @@ -83,7 +85,7 @@ def __iter__(self) -> Iterator[Dict[str, npt.NDArray[Any]]]: np.random.shuffle(files) pa_dataset: ds.Dataset = ds.dataset(files, format="parquet", filesystem=self._fs) - for rb in pa_dataset.to_batches(batch_size=self._dataset_batch_size): + for rb in _retryable_batches(pa_dataset, batch_size=self._dataset_batch_size): if self._shuffle: rb = rb.take(np.random.permutation(rb.num_rows)) self._rb_buffer.append(rb) @@ -138,3 +140,31 @@ def _record_batch_to_arrays(rb: pa.RecordBatch) -> Dict[str, npt.NDArray[Any]]: array = column.to_numpy(zero_copy_only=False) batch_dict[column_schema.name] = array return batch_dict + + +def _retryable_batches( + dataset: ds.Dataset, batch_size: int, max_retries: int = 3, delay: int = 0 +) -> Iterator[pa.RecordBatch]: + """Make the Dataset to_batches retryable.""" + retries = 0 + current_batch_index = 0 + + while True: + try: + for batch_index, batch in enumerate(dataset.to_batches(batch_size=batch_size)): + if batch_index < current_batch_index: + # Skip batches that have already been processed + continue + + yield batch + current_batch_index = batch_index + 1 + # Exit the loop once all batches are processed + break + + except Exception as e: + if retries < max_retries: + retries += 1 + logging.info(f"Error encountered: {e}. Retrying {retries}/{max_retries}...") + time.sleep(delay) + else: + raise e diff --git a/snowflake/ml/model/BUILD.bazel b/snowflake/ml/model/BUILD.bazel index 1ce48b8f..5b0847cb 100644 --- a/snowflake/ml/model/BUILD.bazel +++ b/snowflake/ml/model/BUILD.bazel @@ -64,6 +64,30 @@ py_library( ], ) +py_library( + name = "model", + srcs = ["__init__.py"], + deps = [ + "//snowflake/ml/model/_client/model:model_impl", + "//snowflake/ml/model/_client/model:model_version_impl", + "//snowflake/ml/model/models:huggingface_pipeline", + "//snowflake/ml/model/models:llm_model", + ], +) + +py_test( + name = "package_visibility_test", + srcs = ["package_visibility_test.py"], + deps = [ + ":_api", + ":custom_model", + ":deploy_platforms", + ":model", + ":model_signature", + ":type_hints", + ], +) + py_test( name = "custom_model_test", srcs = ["custom_model_test.py"], diff --git a/snowflake/ml/model/__init__.py b/snowflake/ml/model/__init__.py new file mode 100644 index 00000000..bcebb67d --- /dev/null +++ b/snowflake/ml/model/__init__.py @@ -0,0 +1,6 @@ +from snowflake.ml.model._client.model.model_impl import Model +from snowflake.ml.model._client.model.model_version_impl import ModelVersion +from snowflake.ml.model.models.huggingface_pipeline import HuggingFacePipelineModel +from snowflake.ml.model.models.llm import LLM, LLMOptions + +__all__ = ["Model", "ModelVersion", "HuggingFacePipelineModel", "LLM", "LLMOptions"] diff --git a/snowflake/ml/model/_client/model/BUILD.bazel b/snowflake/ml/model/_client/model/BUILD.bazel index 1fc4f5a7..6fb55009 100644 --- a/snowflake/ml/model/_client/model/BUILD.bazel +++ b/snowflake/ml/model/_client/model/BUILD.bazel @@ -8,6 +8,7 @@ py_library( deps = [ ":model_version_impl", "//snowflake/ml/_internal:telemetry", + "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model/_client/ops:model_ops", ], @@ -28,11 +29,11 @@ py_library( name = "model_version_impl", srcs = ["model_version_impl.py"], deps = [ - ":model_method_info", "//snowflake/ml/_internal:telemetry", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model:model_signature", "//snowflake/ml/model/_client/ops:model_ops", + "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", ], ) @@ -45,15 +46,8 @@ py_test( "//snowflake/ml/model:model_signature", "//snowflake/ml/model/_client/ops:metadata_ops", "//snowflake/ml/model/_client/ops:model_ops", + "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", "//snowflake/ml/test_utils:mock_data_frame", "//snowflake/ml/test_utils:mock_session", ], ) - -py_library( - name = "model_method_info", - srcs = ["model_method_info.py"], - deps = [ - "//snowflake/ml/model:model_signature", - ], -) diff --git a/snowflake/ml/model/_client/model/model_impl.py b/snowflake/ml/model/_client/model/model_impl.py index cf781d26..f1591305 100644 --- a/snowflake/ml/model/_client/model/model_impl.py +++ b/snowflake/ml/model/_client/model/model_impl.py @@ -1,7 +1,9 @@ -from typing import List, Union +from typing import Dict, List, Optional, Tuple, Union + +import pandas as pd from snowflake.ml._internal import telemetry -from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml._internal.utils import identifier, sql_identifier from snowflake.ml.model._client.model import model_version_impl from snowflake.ml.model._client.ops import model_ops @@ -37,10 +39,12 @@ def __eq__(self, __value: object) -> bool: @property def name(self) -> str: + """Return the name of the model that can be used to refer to it in SQL.""" return self._model_name.identifier() @property def fully_qualified_name(self) -> str: + """Return the fully qualified name of the model that can be used to refer to it in SQL.""" return self._model_ops._model_version_client.fully_qualified_model_name(self._model_name) @property @@ -49,6 +53,24 @@ def fully_qualified_name(self) -> str: subproject=_TELEMETRY_SUBPROJECT, ) def description(self) -> str: + """The description for the model. This is an alias of `comment`.""" + return self.comment + + @description.setter + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def description(self, description: str) -> None: + self.comment = description + + @property + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def comment(self) -> str: + """The comment to the model.""" statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, @@ -58,18 +80,18 @@ def description(self) -> str: statement_params=statement_params, ) - @description.setter + @comment.setter @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - def description(self, description: str) -> None: + def comment(self, comment: str) -> None: statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) return self._model_ops.set_comment( - comment=description, + comment=comment, model_name=self._model_name, statement_params=statement_params, ) @@ -80,12 +102,13 @@ def description(self, description: str) -> None: subproject=_TELEMETRY_SUBPROJECT, ) def default(self) -> model_version_impl.ModelVersion: + """The default version of the model.""" statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, class_name=self.__class__.__name__, ) - default_version_name = self._model_ops._model_version_client.get_default_version( + default_version_name = self._model_ops.get_default_version( model_name=self._model_name, statement_params=statement_params ) return self.version(default_version_name) @@ -105,7 +128,7 @@ def default(self, version: Union[str, model_version_impl.ModelVersion]) -> None: version_name = sql_identifier.SqlIdentifier(version) else: version_name = version._version_name - self._model_ops._model_version_client.set_default_version( + self._model_ops.set_default_version( model_name=self._model_name, version_name=version_name, statement_params=statement_params ) @@ -114,13 +137,14 @@ def default(self, version: Union[str, model_version_impl.ModelVersion]) -> None: subproject=_TELEMETRY_SUBPROJECT, ) def version(self, version_name: str) -> model_version_impl.ModelVersion: - """Get a model version object given a version name in the model. + """ + Get a model version object given a version name in the model. Args: - version_name: The name of version + version_name: The name of the version. Raises: - ValueError: Raised when the version requested does not exist. + ValueError: When the requested version does not exist. Returns: The model version object. @@ -149,11 +173,11 @@ def version(self, version_name: str) -> model_version_impl.ModelVersion: project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - def list_versions(self) -> List[model_version_impl.ModelVersion]: - """List all versions in the model. + def versions(self) -> List[model_version_impl.ModelVersion]: + """Get all versions in the model. Returns: - A List of ModelVersion object representing all versions in the model. + A list of ModelVersion objects representing all versions in the model. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, @@ -172,5 +196,140 @@ def list_versions(self) -> List[model_version_impl.ModelVersion]: for version_name in version_names ] + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def show_versions(self) -> pd.DataFrame: + """Show information about all versions in the model. + + Returns: + A Pandas DataFrame showing information about all versions in the model. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + rows = self._model_ops.show_models_or_versions( + model_name=self._model_name, + statement_params=statement_params, + ) + return pd.DataFrame([row.as_dict() for row in rows]) + def delete_version(self, version_name: str) -> None: raise NotImplementedError("Deleting version has not been supported yet.") + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def show_tags(self) -> Dict[str, str]: + """Get a dictionary showing the tag and its value attached to the model. + + Returns: + The model version object. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + return self._model_ops.show_tags(model_name=self._model_name, statement_params=statement_params) + + def _parse_tag_name( + self, + tag_name: str, + ) -> Tuple[sql_identifier.SqlIdentifier, sql_identifier.SqlIdentifier, sql_identifier.SqlIdentifier]: + _tag_db, _tag_schema, _tag_name, _ = identifier.parse_schema_level_object_identifier(tag_name) + if _tag_db is None: + tag_db_id = self._model_ops._model_client._database_name + else: + tag_db_id = sql_identifier.SqlIdentifier(_tag_db) + + if _tag_schema is None: + tag_schema_id = self._model_ops._model_client._schema_name + else: + tag_schema_id = sql_identifier.SqlIdentifier(_tag_schema) + + if _tag_name is None: + raise ValueError(f"Unable parse the tag name `{tag_name}` you input.") + + tag_name_id = sql_identifier.SqlIdentifier(_tag_name) + + return tag_db_id, tag_schema_id, tag_name_id + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def get_tag(self, tag_name: str) -> Optional[str]: + """Get the value of a tag attached to the model. + + Args: + tag_name: The name of the tag, can be fully qualified. If not fully qualified, the database or schema of + the model will be used. + + Returns: + The tag value as a string if the tag is attached, otherwise None. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + tag_db_id, tag_schema_id, tag_name_id = self._parse_tag_name(tag_name) + return self._model_ops.get_tag_value( + model_name=self._model_name, + tag_database_name=tag_db_id, + tag_schema_name=tag_schema_id, + tag_name=tag_name_id, + statement_params=statement_params, + ) + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def set_tag(self, tag_name: str, tag_value: str) -> None: + """Set the value of a tag, attaching it to the model if not. + + Args: + tag_name: The name of the tag, can be fully qualified. If not fully qualified, the database or schema of + the model will be used. + tag_value: The value of the tag + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + tag_db_id, tag_schema_id, tag_name_id = self._parse_tag_name(tag_name) + self._model_ops.set_tag( + model_name=self._model_name, + tag_database_name=tag_db_id, + tag_schema_name=tag_schema_id, + tag_name=tag_name_id, + tag_value=tag_value, + statement_params=statement_params, + ) + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def unset_tag(self, tag_name: str) -> None: + """Unset a tag attached to a model. + + Args: + tag_name: The name of the tag, can be fully qualified. If not fully qualified, the database or schema of + the model will be used. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + tag_db_id, tag_schema_id, tag_name_id = self._parse_tag_name(tag_name) + self._model_ops.unset_tag( + model_name=self._model_name, + tag_database_name=tag_db_id, + tag_schema_name=tag_schema_id, + tag_name=tag_name_id, + statement_params=statement_params, + ) diff --git a/snowflake/ml/model/_client/model/model_impl_test.py b/snowflake/ml/model/_client/model/model_impl_test.py index 1fca7576..c097e09d 100644 --- a/snowflake/ml/model/_client/model/model_impl_test.py +++ b/snowflake/ml/model/_client/model/model_impl_test.py @@ -1,14 +1,14 @@ from typing import cast from unittest import mock +import pandas as pd from absl.testing import absltest from snowflake.ml._internal.utils import sql_identifier from snowflake.ml.model._client.model import model_impl, model_version_impl from snowflake.ml.model._client.ops import model_ops -from snowflake.ml.model._client.sql import model_version from snowflake.ml.test_utils import mock_session -from snowflake.snowpark import Session +from snowflake.snowpark import Row, Session class ModelImplTest(absltest.TestCase): @@ -57,7 +57,7 @@ def test_version_2(self) -> None: statement_params=mock.ANY, ) - def test_list_versions(self) -> None: + def test_versions(self) -> None: m_mv_1 = model_version_impl.ModelVersion._ref( self.m_model._model_ops, model_name=sql_identifier.SqlIdentifier("MODEL"), @@ -73,13 +73,42 @@ def test_list_versions(self) -> None: "list_models_or_versions", return_value=[sql_identifier.SqlIdentifier("V1"), sql_identifier.SqlIdentifier("v1", case_sensitive=True)], ) as mock_list_models_or_versions: - mv_list = self.m_model.list_versions() + mv_list = self.m_model.versions() self.assertListEqual(mv_list, [m_mv_1, m_mv_2]) mock_list_models_or_versions.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), statement_params=mock.ANY, ) + def test_show_versions(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="v1", + comment="This is a comment", + model_name="MODEL", + is_default_version=True, + ), + Row( + create_on="06/01", + name="V1", + comment="This is a comment", + model_name="MODEL", + is_default_version=False, + ), + ] + with mock.patch.object( + self.m_model._model_ops, + "show_models_or_versions", + return_value=m_list_res, + ) as mock_show_models_or_versions: + mv_info = self.m_model.show_versions() + pd.testing.assert_frame_equal(mv_info, pd.DataFrame([row.as_dict() for row in m_list_res])) + mock_show_models_or_versions.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + def test_description_getter(self) -> None: with mock.patch.object( self.m_model._model_ops, "get_comment", return_value="this is a comment" @@ -99,34 +128,175 @@ def test_description_setter(self) -> None: statement_params=mock.ANY, ) - def test_default_getter(self) -> None: - mock_model_ops = absltest.mock.MagicMock(spec=model_ops.ModelOperator) - mock_model_version_client = absltest.mock.MagicMock(spec=model_version.ModelVersionSQLClient) - self.m_model._model_ops = mock_model_ops - mock_model_ops._session = self.m_session - mock_model_ops._model_version_client = mock_model_version_client - mock_model_version_client.get_default_version.return_value = "V1" + def test_comment_getter(self) -> None: + with mock.patch.object( + self.m_model._model_ops, "get_comment", return_value="this is a comment" + ) as mock_get_comment: + self.assertEqual("this is a comment", self.m_model.comment) + mock_get_comment.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) - default_model_version = self.m_model.default - self.assertEqual(default_model_version.version_name, "V1") - mock_model_version_client.get_default_version.assert_called() + def test_comment_setter(self) -> None: + with mock.patch.object(self.m_model._model_ops, "set_comment") as mock_set_comment: + self.m_model.comment = "this is a comment" + mock_set_comment.assert_called_once_with( + comment="this is a comment", + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + + def test_default_getter(self) -> None: + with mock.patch.object( + self.m_model._model_ops, + "get_default_version", + return_value=sql_identifier.SqlIdentifier("V1", case_sensitive=True), + ) as mock_get_default_version, mock.patch.object( + self.m_model._model_ops, "validate_existence", return_value=True + ): + self.assertEqual("V1", self.m_model.default.version_name) + mock_get_default_version.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) def test_default_setter(self) -> None: - mock_model_version_client = absltest.mock.MagicMock(spec=model_version.ModelVersionSQLClient) - self.m_model._model_ops._model_version_client = mock_model_version_client + with mock.patch.object(self.m_model._model_ops, "set_default_version") as mock_set_default_version: + self.m_model.default = "V1" # type: ignore[assignment] + mock_set_default_version.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) - # str - self.m_model.default = "V1" # type: ignore[assignment] - mock_model_version_client.set_default_version.assert_called() + with mock.patch.object(self.m_model._model_ops, "set_default_version") as mock_set_default_version: + mv = model_version_impl.ModelVersion._ref( + self.m_model._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V2"), + ) + self.m_model.default = mv + mock_set_default_version.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V2"), + statement_params=mock.ANY, + ) - # ModelVersion - mv = model_version_impl.ModelVersion._ref( - self.m_model._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("V2"), - ) - self.m_model.default = mv - mock_model_version_client.set_default_version.assert_called() + def test_show_tags(self) -> None: + m_res = {'DB."schema".MYTAG': "tag content", 'MYDB.SCHEMA."my_another_tag"': "1"} + with mock.patch.object(self.m_model._model_ops, "show_tags", return_value=m_res) as mock_show_tags: + res = self.m_model.show_tags() + self.assertDictEqual(res, m_res) + mock_show_tags.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + + def test_get_tag_1(self) -> None: + with mock.patch.object(self.m_model._model_ops, "get_tag_value", return_value="tag content") as mock_get_tag: + res = self.m_model.get_tag(tag_name="MYTAG") + self.assertEqual(res, "tag content") + mock_get_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) + + def test_get_tag_2(self) -> None: + with mock.patch.object(self.m_model._model_ops, "get_tag_value", return_value="tag content") as mock_get_tag: + res = self.m_model.get_tag(tag_name='"schema".MYTAG') + self.assertEqual(res, "tag content") + mock_get_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) + + def test_get_tag_3(self) -> None: + with mock.patch.object(self.m_model._model_ops, "get_tag_value", return_value="tag content") as mock_get_tag: + res = self.m_model.get_tag(tag_name='DB."schema".MYTAG') + self.assertEqual(res, "tag content") + mock_get_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) + + def test_set_tag_1(self) -> None: + with mock.patch.object(self.m_model._model_ops, "set_tag") as mock_set_tag: + self.m_model.set_tag(tag_name="MYTAG", tag_value="tag content") + mock_set_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=mock.ANY, + ) + + def test_set_tag_2(self) -> None: + with mock.patch.object(self.m_model._model_ops, "set_tag") as mock_set_tag: + self.m_model.set_tag(tag_name='"schema".MYTAG', tag_value="tag content") + mock_set_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=mock.ANY, + ) + + def test_set_tag_3(self) -> None: + with mock.patch.object(self.m_model._model_ops, "set_tag") as mock_set_tag: + self.m_model.set_tag(tag_name='DB."schema".MYTAG', tag_value="tag content") + mock_set_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=mock.ANY, + ) + + def test_unset_tag_1(self) -> None: + with mock.patch.object(self.m_model._model_ops, "unset_tag") as mock_unset_tag: + self.m_model.unset_tag(tag_name="MYTAG") + mock_unset_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) + + def test_unset_tag_2(self) -> None: + with mock.patch.object(self.m_model._model_ops, "unset_tag") as mock_unset_tag: + self.m_model.unset_tag(tag_name='"schema".MYTAG') + mock_unset_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("TEMP"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) + + def test_unset_tag_3(self) -> None: + with mock.patch.object(self.m_model._model_ops, "unset_tag") as mock_unset_tag: + self.m_model.unset_tag(tag_name='DB."schema".MYTAG') + mock_unset_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=mock.ANY, + ) if __name__ == "__main__": diff --git a/snowflake/ml/model/_client/model/model_method_info.py b/snowflake/ml/model/_client/model/model_method_info.py deleted file mode 100644 index 013eace5..00000000 --- a/snowflake/ml/model/_client/model/model_method_info.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import TypedDict - -from typing_extensions import Required - -from snowflake.ml.model import model_signature - - -class ModelMethodInfo(TypedDict): - """Method information. - - Attributes: - name: Name of the method to be called via SQL. - target_method: actual target method name to be called. - signature: The signature of the model method. - """ - - name: Required[str] - target_method: Required[str] - signature: Required[model_signature.ModelSignature] diff --git a/snowflake/ml/model/_client/model/model_version_impl.py b/snowflake/ml/model/_client/model/model_version_impl.py index df353f60..9e38a900 100644 --- a/snowflake/ml/model/_client/model/model_version_impl.py +++ b/snowflake/ml/model/_client/model/model_version_impl.py @@ -3,11 +3,12 @@ import pandas as pd +from snowflake import connector from snowflake.ml._internal import telemetry from snowflake.ml._internal.utils import sql_identifier from snowflake.ml.model import model_signature -from snowflake.ml.model._client.model import model_method_info from snowflake.ml.model._client.ops import metadata_ops, model_ops +from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.snowpark import dataframe _TELEMETRY_PROJECT = "MLOps" @@ -49,14 +50,17 @@ def __eq__(self, __value: object) -> bool: @property def model_name(self) -> str: + """Return the name of the model to which the model version belongs, usable as a reference in SQL.""" return self._model_name.identifier() @property def version_name(self) -> str: + """Return the name of the version to which the model version belongs, usable as a reference in SQL.""" return self._version_name.identifier() @property def fully_qualified_model_name(self) -> str: + """Return the fully qualified name of the model to which the model version belongs.""" return self._model_ops._model_version_client.fully_qualified_model_name(self._model_name) @property @@ -65,6 +69,24 @@ def fully_qualified_model_name(self) -> str: subproject=_TELEMETRY_SUBPROJECT, ) def description(self) -> str: + """The description for the model version. This is an alias of `comment`.""" + return self.comment + + @description.setter + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def description(self, description: str) -> None: + self.comment = description + + @property + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def comment(self) -> str: + """The comment to the model version.""" statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, @@ -75,18 +97,18 @@ def description(self) -> str: statement_params=statement_params, ) - @description.setter + @comment.setter @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - def description(self, description: str) -> None: + def comment(self, comment: str) -> None: statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) return self._model_ops.set_comment( - comment=description, + comment=comment, model_name=self._model_name, version_name=self._version_name, statement_params=statement_params, @@ -96,11 +118,11 @@ def description(self, description: str) -> None: project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - def list_metrics(self) -> Dict[str, Any]: + def show_metrics(self) -> Dict[str, Any]: """Show all metrics logged with the model version. Returns: - A dictionary showing the metrics + A dictionary showing the metrics. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, @@ -118,15 +140,15 @@ def get_metric(self, metric_name: str) -> Any: """Get the value of a specific metric. Args: - metric_name: The name of the metric + metric_name: The name of the metric. Raises: - KeyError: Raised when the requested metric name does not exist. + KeyError: When the requested metric name does not exist. Returns: The value of the metric. """ - metrics = self.list_metrics() + metrics = self.show_metrics() if metric_name not in metrics: raise KeyError(f"Cannot find metric with name {metric_name}.") return metrics[metric_name] @@ -136,17 +158,17 @@ def get_metric(self, metric_name: str) -> Any: subproject=_TELEMETRY_SUBPROJECT, ) def set_metric(self, metric_name: str, value: Any) -> None: - """Set the value of a specific metric name + """Set the value of a specific metric. Args: - metric_name: The name of the metric + metric_name: The name of the metric. value: The value of the metric. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - metrics = self.list_metrics() + metrics = self.show_metrics() metrics[metric_name] = value self._model_ops._metadata_ops.save( metadata_ops.ModelVersionMetadataSchema(metrics=metrics), @@ -166,13 +188,13 @@ def delete_metric(self, metric_name: str) -> None: metric_name: The name of the metric to be deleted. Raises: - KeyError: Raised when the requested metric name does not exist. + KeyError: When the requested metric name does not exist. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - metrics = self.list_metrics() + metrics = self.show_metrics() if metric_name not in metrics: raise KeyError(f"Cannot find metric with name {metric_name}.") del metrics[metric_name] @@ -183,24 +205,12 @@ def delete_metric(self, metric_name: str) -> None: statement_params=statement_params, ) - @telemetry.send_api_usage_telemetry( - project=_TELEMETRY_PROJECT, - subproject=_TELEMETRY_SUBPROJECT, - ) - def list_methods(self) -> List[model_method_info.ModelMethodInfo]: - """List all method information in a model version that is callable. - - Returns: - A list of ModelMethodInfo object containing the following information: - - name: The name of the method to be called (both in SQL and in Python SDK). - - target_method: The original method name in the logged Python object. - - Signature: Python signature of the original method. - """ + # Only used when the model does not contains user_data with client SDK information. + def _legacy_show_functions(self) -> List[model_manifest_schema.ModelFunctionInfo]: statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - # TODO(SNOW-986673, SNOW-986675): Avoid parsing manifest and meta file and put Python signature into user_data. manifest = self._model_ops.get_model_version_manifest( model_name=self._model_name, version_name=self._version_name, @@ -211,7 +221,7 @@ def list_methods(self) -> List[model_method_info.ModelMethodInfo]: version_name=self._version_name, statement_params=statement_params, ) - return_methods_info: List[model_method_info.ModelMethodInfo] = [] + return_functions_info: List[model_manifest_schema.ModelFunctionInfo] = [] for method in manifest["methods"]: # Method's name is resolved so we need to use case_sensitive as True to get the user-facing identifier. method_name = sql_identifier.SqlIdentifier(method["name"], case_sensitive=True).identifier() @@ -221,14 +231,48 @@ def list_methods(self) -> List[model_method_info.ModelMethodInfo]: ), f"Get unexpected handler name {method['handler']}" target_method = method["handler"].split(".")[1] signature_dict = model_meta["signatures"][target_method] - method_info = model_method_info.ModelMethodInfo( + fi = model_manifest_schema.ModelFunctionInfo( name=method_name, target_method=target_method, signature=model_signature.ModelSignature.from_dict(signature_dict), ) - return_methods_info.append(method_info) + return_functions_info.append(fi) + return return_functions_info + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + def show_functions(self) -> List[model_manifest_schema.ModelFunctionInfo]: + """Show all functions information in a model version that is callable. - return return_methods_info + Returns: + A list of ModelFunctionInfo objects containing the following information: + + - name: The name of the function to be called (both in SQL and in Python SDK). + - target_method: The original method name in the logged Python object. + - signature: Python signature of the original method. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_TELEMETRY_SUBPROJECT, + ) + try: + client_data = self._model_ops.get_client_data_in_user_data( + model_name=self._model_name, + version_name=self._version_name, + statement_params=statement_params, + ) + return [ + model_manifest_schema.ModelFunctionInfo( + name=fi["name"], + target_method=fi["target_method"], + signature=model_signature.ModelSignature.from_dict(fi["signature"]), + ) + for fi in client_data["functions"] + ] + except (NotImplementedError, ValueError, connector.DataError): + return self._legacy_show_functions() @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, @@ -238,52 +282,52 @@ def run( self, X: Union[pd.DataFrame, dataframe.DataFrame], *, - method_name: Optional[str] = None, + function_name: Optional[str] = None, ) -> Union[pd.DataFrame, dataframe.DataFrame]: - """Invoke a method in a model version object + """Invoke a method in a model version object. Args: - X: The input data. Could be pandas DataFrame or Snowpark DataFrame - method_name: The method name to run. It is the name you will use to call a method in SQL. Defaults to None. - It can only be None if there is only 1 method. + X: The input data, which could be a pandas DataFrame or Snowpark DataFrame. + function_name: The function name to run. It is the name used to call a function in SQL. + Defaults to None. It can only be None if there is only 1 method. Raises: - ValueError: No method with the corresponding name is available. - ValueError: There are more than 1 target methods available in the model but no method name specified. + ValueError: When no method with the corresponding name is available. + ValueError: When there are more than 1 target methods available in the model but no function name specified. Returns: - The prediction data. + The prediction data. It would be the same type dataframe as your input. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_TELEMETRY_SUBPROJECT, ) - methods: List[model_method_info.ModelMethodInfo] = self.list_methods() - if method_name: - req_method_name = sql_identifier.SqlIdentifier(method_name).identifier() - find_method: Callable[[model_method_info.ModelMethodInfo], bool] = ( + functions: List[model_manifest_schema.ModelFunctionInfo] = self.show_functions() + if function_name: + req_method_name = sql_identifier.SqlIdentifier(function_name).identifier() + find_method: Callable[[model_manifest_schema.ModelFunctionInfo], bool] = ( lambda method: method["name"] == req_method_name ) - target_method_info = next( - filter(find_method, methods), + target_function_info = next( + filter(find_method, functions), None, ) - if target_method_info is None: + if target_function_info is None: raise ValueError( - f"There is no method with name {method_name} available in the model" + f"There is no method with name {function_name} available in the model" f" {self.fully_qualified_model_name} version {self.version_name}" ) - elif len(methods) != 1: + elif len(functions) != 1: raise ValueError( f"There are more than 1 target methods available in the model {self.fully_qualified_model_name}" f" version {self.version_name}. Please specify a `method_name` when calling the `run` method." ) else: - target_method_info = methods[0] + target_function_info = functions[0] return self._model_ops.invoke_method( - method_name=sql_identifier.SqlIdentifier(target_method_info["name"]), - signature=target_method_info["signature"], + method_name=sql_identifier.SqlIdentifier(target_function_info["name"]), + signature=target_function_info["signature"], X=X, model_name=self._model_name, version_name=self._version_name, diff --git a/snowflake/ml/model/_client/model/model_version_impl_test.py b/snowflake/ml/model/_client/model/model_version_impl_test.py index 84f30fcf..100d8921 100644 --- a/snowflake/ml/model/_client/model/model_version_impl_test.py +++ b/snowflake/ml/model/_client/model/model_version_impl_test.py @@ -9,6 +9,7 @@ from snowflake.ml.model import model_signature from snowflake.ml.model._client.model import model_version_impl from snowflake.ml.model._client.ops import metadata_ops, model_ops +from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.ml.test_utils import mock_data_frame, mock_session from snowflake.snowpark import Session @@ -41,10 +42,10 @@ def test_property(self) -> None: self.assertEqual(self.m_mv.fully_qualified_model_name, 'TEMP."test".MODEL') self.assertEqual(self.m_mv.version_name, '"v1"') - def test_list_metrics(self) -> None: + def test_show_metrics(self) -> None: m_metadata = metadata_ops.ModelVersionMetadataSchema(metrics={}) with mock.patch.object(self.m_mv._model_ops._metadata_ops, "load", return_value=m_metadata) as mock_load: - self.assertDictEqual({}, self.m_mv.list_metrics()) + self.assertDictEqual({}, self.m_mv.show_metrics()) mock_load.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), @@ -140,7 +141,7 @@ def test_delete_metric_2(self) -> None: ) mock_save.assert_not_called() - def test_list_methods(self) -> None: + def test_show_functions_1(self) -> None: m_manifest = { "manifest_version": "1.0", "runtimes": { @@ -212,11 +213,13 @@ def test_list_methods(self) -> None: ) ) with mock.patch.object( + self.m_mv._model_ops, "get_client_data_in_user_data", side_effect=NotImplementedError() + ), mock.patch.object( self.m_mv._model_ops, "get_model_version_manifest", return_value=m_manifest ) as mock_get_model_version_manifest, mock.patch.object( self.m_mv._model_ops, "get_model_version_native_packing_meta", return_value=m_meta_yaml ) as mock_get_model_version_native_packing_meta: - methods = self.m_mv.list_methods() + methods = self.m_mv.show_functions() mock_get_model_version_manifest.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), @@ -243,6 +246,57 @@ def test_list_methods(self) -> None: ], ) + def test_show_functions_2(self) -> None: + m_function_info = [ + model_manifest_schema.ModelFunctionInfoDict( + { + "name": '"predict"', + "target_method": "predict", + "signature": _DUMMY_SIG["predict"].to_dict(), + } + ), + model_manifest_schema.ModelFunctionInfoDict( + { + "name": "__CALL__", + "target_method": "__call__", + "signature": _DUMMY_SIG["predict"].to_dict(), + } + ), + ] + m_user_data = model_manifest_schema.SnowparkMLDataDict( + schema_version=model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION, functions=m_function_info + ) + with mock.patch.object( + self.m_mv._model_ops, "get_client_data_in_user_data", return_value=m_user_data + ) as mock_get_client_data_in_user_data, mock.patch.object( + self.m_mv._model_ops, "get_model_version_manifest" + ) as mock_get_model_version_manifest, mock.patch.object( + self.m_mv._model_ops, "get_model_version_native_packing_meta" + ) as mock_get_model_version_native_packing_meta: + methods = self.m_mv.show_functions() + mock_get_client_data_in_user_data.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), + statement_params=mock.ANY, + ) + self.assertEqual( + methods, + [ + { + "name": '"predict"', + "target_method": "predict", + "signature": _DUMMY_SIG["predict"], + }, + { + "name": "__CALL__", + "target_method": "__call__", + "signature": _DUMMY_SIG["predict"], + }, + ], + ) + mock_get_model_version_manifest.assert_not_called() + mock_get_model_version_native_packing_meta.assert_not_called() + def test_run(self) -> None: m_df = mock_data_frame.MockDataFrame() m_methods = [ @@ -257,22 +311,22 @@ def test_run(self) -> None: "signature": _DUMMY_SIG["predict"], }, ] - with mock.patch.object(self.m_mv, "list_methods", return_value=m_methods) as mock_list_methods: + with mock.patch.object(self.m_mv, "show_functions", return_value=m_methods) as mock_list_methods: with self.assertRaisesRegex(ValueError, "There is no method with name PREDICT available in the model"): - self.m_mv.run(m_df, method_name="PREDICT") + self.m_mv.run(m_df, function_name="PREDICT") mock_list_methods.assert_called_once_with() - with mock.patch.object(self.m_mv, "list_methods", return_value=m_methods) as mock_list_methods: + with mock.patch.object(self.m_mv, "show_functions", return_value=m_methods) as mock_list_methods: with self.assertRaisesRegex(ValueError, "There are more than 1 target methods available in the model"): self.m_mv.run(m_df) mock_list_methods.assert_called_once_with() with mock.patch.object( - self.m_mv, "list_methods", return_value=m_methods + self.m_mv, "show_functions", return_value=m_methods ) as mock_list_methods, mock.patch.object( self.m_mv._model_ops, "invoke_method", return_value=m_df ) as mock_invoke_method: - self.m_mv.run(m_df, method_name='"predict"') + self.m_mv.run(m_df, function_name='"predict"') mock_list_methods.assert_called_once_with() mock_invoke_method.assert_called_once_with( method_name='"predict"', @@ -284,11 +338,11 @@ def test_run(self) -> None: ) with mock.patch.object( - self.m_mv, "list_methods", return_value=m_methods + self.m_mv, "show_functions", return_value=m_methods ) as mock_list_methods, mock.patch.object( self.m_mv._model_ops, "invoke_method", return_value=m_df ) as mock_invoke_method: - self.m_mv.run(m_df, method_name="__call__") + self.m_mv.run(m_df, function_name="__call__") mock_list_methods.assert_called_once_with() mock_invoke_method.assert_called_once_with( method_name="__CALL__", @@ -310,7 +364,7 @@ def test_run_without_method_name(self) -> None: ] with mock.patch.object( - self.m_mv, "list_methods", return_value=m_methods + self.m_mv, "show_functions", return_value=m_methods ) as mock_list_methods, mock.patch.object( self.m_mv._model_ops, "invoke_method", return_value=m_df ) as mock_invoke_method: @@ -346,6 +400,27 @@ def test_description_setter(self) -> None: statement_params=mock.ANY, ) + def test_comment_getter(self) -> None: + with mock.patch.object( + self.m_mv._model_ops, "get_comment", return_value="this is a comment" + ) as mock_get_comment: + self.assertEqual("this is a comment", self.m_mv.comment) + mock_get_comment.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), + statement_params=mock.ANY, + ) + + def test_comment_setter(self) -> None: + with mock.patch.object(self.m_mv._model_ops, "set_comment") as mock_set_comment: + self.m_mv.comment = "this is a comment" + mock_set_comment.assert_called_once_with( + comment="this is a comment", + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), + statement_params=mock.ANY, + ) + if __name__ == "__main__": absltest.main() diff --git a/snowflake/ml/model/_client/ops/BUILD.bazel b/snowflake/ml/model/_client/ops/BUILD.bazel index 4775fa4d..1e914e93 100644 --- a/snowflake/ml/model/_client/ops/BUILD.bazel +++ b/snowflake/ml/model/_client/ops/BUILD.bazel @@ -1,8 +1,9 @@ load("//bazel:py_rules.bzl", "py_library", "py_test") package(default_visibility = [ + "//bazel:snowml_public_common", "//snowflake/ml/model/_client/model:__pkg__", - "//snowflake/ml/registry:__pkg__", + "//snowflake/ml/registry/_manager:__pkg__", ]) py_library( @@ -10,12 +11,15 @@ py_library( srcs = ["model_ops.py"], deps = [ ":metadata_ops", + "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model:model_signature", "//snowflake/ml/model:type_hints", "//snowflake/ml/model/_client/sql:model", "//snowflake/ml/model/_client/sql:model_version", "//snowflake/ml/model/_client/sql:stage", + "//snowflake/ml/model/_client/sql:tag", "//snowflake/ml/model/_model_composer:model_composer", "//snowflake/ml/model/_model_composer/model_manifest", "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", @@ -30,6 +34,7 @@ py_test( srcs = ["model_ops_test.py"], deps = [ ":model_ops", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model:model_signature", "//snowflake/ml/model/_signatures:snowpark_handler", diff --git a/snowflake/ml/model/_client/ops/metadata_ops.py b/snowflake/ml/model/_client/ops/metadata_ops.py index 4ba7c11d..5f0a5818 100644 --- a/snowflake/ml/model/_client/ops/metadata_ops.py +++ b/snowflake/ml/model/_client/ops/metadata_ops.py @@ -68,9 +68,7 @@ def _get_current_metadata_dict( version_info_list = self._model_client.show_versions( model_name=model_name, version_name=version_name, statement_params=statement_params ) - assert len(version_info_list) == 1 - version_info = version_info_list[0] - metadata_str = version_info.metadata + metadata_str = version_info_list[0][self._model_client.MODEL_VERSION_METADATA_COL_NAME] if not metadata_str: return {} res = json.loads(metadata_str) diff --git a/snowflake/ml/model/_client/ops/model_ops.py b/snowflake/ml/model/_client/ops/model_ops.py index 40ce8914..9794807e 100644 --- a/snowflake/ml/model/_client/ops/model_ops.py +++ b/snowflake/ml/model/_client/ops/model_ops.py @@ -1,16 +1,19 @@ +import json import pathlib import tempfile from typing import Any, Dict, List, Optional, Union, cast import yaml +from packaging import version -from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml._internal.utils import identifier, snowflake_env, sql_identifier from snowflake.ml.model import model_signature, type_hints from snowflake.ml.model._client.ops import metadata_ops from snowflake.ml.model._client.sql import ( model as model_sql, model_version as model_version_sql, stage as stage_sql, + tag as tag_sql, ) from snowflake.ml.model._model_composer import model_composer from snowflake.ml.model._model_composer.model_manifest import ( @@ -19,9 +22,11 @@ ) from snowflake.ml.model._packager.model_meta import model_meta, model_meta_schema from snowflake.ml.model._signatures import snowpark_handler -from snowflake.snowpark import dataframe, session +from snowflake.snowpark import dataframe, row, session from snowflake.snowpark._internal import utils as snowpark_utils +_TAG_ON_MODEL_AVAILABLE_VERSION = version.parse("8.2.0") + class ModelOperator: def __init__( @@ -50,6 +55,11 @@ def __init__( database_name=database_name, schema_name=schema_name, ) + self._tag_client = tag_sql.ModuleTagSQLClient( + session, + database_name=database_name, + schema_name=schema_name, + ) self._metadata_ops = metadata_ops.MetadataOperator( session, database_name=database_name, @@ -109,22 +119,39 @@ def create_from_stage( statement_params=statement_params, ) - def list_models_or_versions( + def show_models_or_versions( self, *, model_name: Optional[sql_identifier.SqlIdentifier] = None, statement_params: Optional[Dict[str, Any]] = None, - ) -> List[sql_identifier.SqlIdentifier]: + ) -> List[row.Row]: if model_name: - res = self._model_client.show_versions( + return self._model_client.show_versions( model_name=model_name, + validate_result=False, statement_params=statement_params, ) else: - res = self._model_client.show_models( + return self._model_client.show_models( + validate_result=False, statement_params=statement_params, ) - return [sql_identifier.SqlIdentifier(row.name, case_sensitive=True) for row in res] + + def list_models_or_versions( + self, + *, + model_name: Optional[sql_identifier.SqlIdentifier] = None, + statement_params: Optional[Dict[str, Any]] = None, + ) -> List[sql_identifier.SqlIdentifier]: + res = self.show_models_or_versions( + model_name=model_name, + statement_params=statement_params, + ) + if model_name: + col_name = self._model_client.MODEL_VERSION_NAME_COL_NAME + else: + col_name = self._model_client.MODEL_NAME_COL_NAME + return [sql_identifier.SqlIdentifier(row[col_name], case_sensitive=True) for row in res] def validate_existence( self, @@ -137,11 +164,13 @@ def validate_existence( res = self._model_client.show_versions( model_name=model_name, version_name=version_name, + validate_result=False, statement_params=statement_params, ) else: res = self._model_client.show_models( model_name=model_name, + validate_result=False, statement_params=statement_params, ) return len(res) == 1 @@ -159,13 +188,14 @@ def get_comment( version_name=version_name, statement_params=statement_params, ) + col_name = self._model_client.MODEL_VERSION_COMMENT_COL_NAME else: res = self._model_client.show_models( model_name=model_name, statement_params=statement_params, ) - assert len(res) == 1 - return cast(str, res[0].comment) + col_name = self._model_client.MODEL_COMMENT_COL_NAME + return cast(str, res[0][col_name]) def set_comment( self, @@ -189,6 +219,123 @@ def set_comment( statement_params=statement_params, ) + def set_default_version( + self, + *, + model_name: sql_identifier.SqlIdentifier, + version_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + if not self.validate_existence( + model_name=model_name, version_name=version_name, statement_params=statement_params + ): + raise ValueError(f"You cannot set version {version_name} as default version as it does not exist.") + self._model_version_client.set_default_version( + model_name=model_name, version_name=version_name, statement_params=statement_params + ) + + def get_default_version( + self, + *, + model_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> sql_identifier.SqlIdentifier: + res = self._model_client.show_models(model_name=model_name, statement_params=statement_params)[0] + return sql_identifier.SqlIdentifier( + res[self._model_client.MODEL_DEFAULT_VERSION_NAME_COL_NAME], case_sensitive=True + ) + + def get_tag_value( + self, + *, + model_name: sql_identifier.SqlIdentifier, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> Optional[str]: + r = self._tag_client.get_tag_value( + module_name=model_name, + tag_database_name=tag_database_name, + tag_schema_name=tag_schema_name, + tag_name=tag_name, + statement_params=statement_params, + ) + value = r.TAG_VALUE + if value is None: + return value + return str(value) + + def show_tags( + self, + *, + model_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> Dict[str, str]: + tags_info = self._tag_client.get_tag_list( + module_name=model_name, + statement_params=statement_params, + ) + res: Dict[str, str] = { + identifier.get_schema_level_object_identifier( + sql_identifier.SqlIdentifier(r.TAG_DATABASE, case_sensitive=True), + sql_identifier.SqlIdentifier(r.TAG_SCHEMA, case_sensitive=True), + sql_identifier.SqlIdentifier(r.TAG_NAME, case_sensitive=True), + ): str(r.TAG_VALUE) + for r in tags_info + } + return res + + def set_tag( + self, + *, + model_name: sql_identifier.SqlIdentifier, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + tag_value: str, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + sf_version = snowflake_env.get_current_snowflake_version(self._session, statement_params=statement_params) + if sf_version >= _TAG_ON_MODEL_AVAILABLE_VERSION: + self._tag_client.set_tag_on_model( + model_name=model_name, + tag_database_name=tag_database_name, + tag_schema_name=tag_schema_name, + tag_name=tag_name, + tag_value=tag_value, + statement_params=statement_params, + ) + else: + raise NotImplementedError( + f"`set_tag` won't work before Snowflake version {_TAG_ON_MODEL_AVAILABLE_VERSION}," + f" currently is {sf_version}" + ) + + def unset_tag( + self, + *, + model_name: sql_identifier.SqlIdentifier, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + sf_version = snowflake_env.get_current_snowflake_version(self._session, statement_params=statement_params) + if sf_version >= _TAG_ON_MODEL_AVAILABLE_VERSION: + self._tag_client.unset_tag_on_model( + model_name=model_name, + tag_database_name=tag_database_name, + tag_schema_name=tag_schema_name, + tag_name=tag_name, + statement_params=statement_params, + ) + else: + raise NotImplementedError( + f"`unset_tag` won't work before Snowflake version {_TAG_ON_MODEL_AVAILABLE_VERSION}," + f" currently is {sf_version}" + ) + def get_model_version_manifest( self, *, @@ -228,6 +375,27 @@ def get_model_version_native_packing_meta( raw_model_meta = yaml.safe_load(f) return model_meta.ModelMetadata._validate_model_metadata(raw_model_meta) + def get_client_data_in_user_data( + self, + *, + model_name: sql_identifier.SqlIdentifier, + version_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> model_manifest_schema.SnowparkMLDataDict: + if ( + snowflake_env.get_current_snowflake_version(self._session) + < model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ): + raise NotImplementedError("User_data has not been supported yet.") + raw_user_data_json_string = self._model_client.show_versions( + model_name=model_name, + version_name=version_name, + statement_params=statement_params, + )[0][self._model_client.MODEL_VERSION_USER_DATA_COL_NAME] + raw_user_data = json.loads(raw_user_data_json_string) + assert isinstance(raw_user_data, dict), "user data should be a dictionary" + return model_manifest.ModelManifest.parse_client_data_from_user_data(raw_user_data) + def invoke_method( self, *, diff --git a/snowflake/ml/model/_client/ops/model_ops_test.py b/snowflake/ml/model/_client/ops/model_ops_test.py index 317f39fd..dd041e3c 100644 --- a/snowflake/ml/model/_client/ops/model_ops_test.py +++ b/snowflake/ml/model/_client/ops/model_ops_test.py @@ -1,3 +1,4 @@ +import json import os import pathlib import tempfile @@ -9,10 +10,12 @@ import pandas as pd import yaml from absl.testing import absltest +from packaging import version -from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml._internal.utils import snowflake_env, sql_identifier from snowflake.ml.model import model_signature from snowflake.ml.model._client.ops import model_ops +from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.ml.model._signatures import snowpark_handler from snowflake.ml.test_utils import mock_data_frame, mock_session from snowflake.snowpark import DataFrame, Row, Session, types as spt @@ -38,6 +41,9 @@ def setUp(self) -> None: database_name=sql_identifier.SqlIdentifier("TEMP"), schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), ) + snowflake_env.get_current_snowflake_version = mock.MagicMock( + return_value=model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ) def test_prepare_model_stage_path(self) -> None: with mock.patch.object(self.m_ops._stage_client, "create_tmp_stage",) as mock_create_stage, mock.patch.object( @@ -53,6 +59,74 @@ def test_prepare_model_stage_path(self) -> None: statement_params=self.m_statement_params, ) + def test_show_models_or_versions_1(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="MODEL", + comment="This is a comment", + model_name="MODEL", + database_name="TEMP", + schema_name="test", + default_version_name="V1", + ), + Row( + create_on="06/01", + name="Model", + comment="This is a comment", + model_name="MODEL", + database_name="TEMP", + schema_name="test", + default_version_name="v1", + ), + ] + with mock.patch.object(self.m_ops._model_client, "show_models", return_value=m_list_res) as mock_show_models: + res = self.m_ops.show_models_or_versions( + statement_params=self.m_statement_params, + ) + self.assertListEqual( + res, + m_list_res, + ) + mock_show_models.assert_called_once_with( + validate_result=False, + statement_params=self.m_statement_params, + ) + + def test_show_models_or_versions_2(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="v1", + comment="This is a comment", + model_name="MODEL", + is_default_version=True, + ), + Row( + create_on="06/01", + name="V1", + comment="This is a comment", + model_name="MODEL", + is_default_version=False, + ), + ] + with mock.patch.object( + self.m_ops._model_client, "show_versions", return_value=m_list_res + ) as mock_show_versions: + res = self.m_ops.show_models_or_versions( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=self.m_statement_params, + ) + self.assertListEqual( + res, + m_list_res, + ) + mock_show_versions.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + validate_result=False, + statement_params=self.m_statement_params, + ) + def test_list_models_or_versions_1(self) -> None: m_list_res = [ Row( @@ -62,6 +136,7 @@ def test_list_models_or_versions_1(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), Row( create_on="06/01", @@ -70,6 +145,7 @@ def test_list_models_or_versions_1(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="v1", ), ] with mock.patch.object(self.m_ops._model_client, "show_models", return_value=m_list_res) as mock_show_models: @@ -84,6 +160,7 @@ def test_list_models_or_versions_1(self) -> None: ], ) mock_show_models.assert_called_once_with( + validate_result=False, statement_params=self.m_statement_params, ) @@ -120,6 +197,7 @@ def test_list_models_or_versions_2(self) -> None: ) mock_show_versions.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), + validate_result=False, statement_params=self.m_statement_params, ) @@ -132,6 +210,7 @@ def test_validate_existence_1(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), ] with mock.patch.object(self.m_ops._model_client, "show_models", return_value=m_list_res) as mock_show_models: @@ -142,6 +221,7 @@ def test_validate_existence_1(self) -> None: self.assertTrue(res) mock_show_models.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("Model", case_sensitive=True), + validate_result=False, statement_params=self.m_statement_params, ) @@ -155,6 +235,7 @@ def test_validate_existence_2(self) -> None: self.assertFalse(res) mock_show_models.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("Model", case_sensitive=True), + validate_result=False, statement_params=self.m_statement_params, ) @@ -180,6 +261,7 @@ def test_validate_existence_3(self) -> None: mock_show_versions.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), + validate_result=False, statement_params=self.m_statement_params, ) @@ -197,6 +279,157 @@ def test_validate_existence_4(self) -> None: mock_show_versions.assert_called_once_with( model_name=sql_identifier.SqlIdentifier("MODEL"), version_name=sql_identifier.SqlIdentifier("v1", case_sensitive=True), + validate_result=False, + statement_params=self.m_statement_params, + ) + + def test_get_tag_value_1(self) -> None: + m_list_res: Row = Row(TAG_VALUE="a") + with mock.patch.object(self.m_ops._tag_client, "get_tag_value", return_value=m_list_res) as mock_get_tag_value: + res = self.m_ops.get_tag_value( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + self.assertEqual(res, "a") + mock_get_tag_value.assert_called_once_with( + module_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + + def test_get_tag_value_2(self) -> None: + m_list_res: Row = Row(TAG_VALUE=1) + with mock.patch.object(self.m_ops._tag_client, "get_tag_value", return_value=m_list_res) as mock_get_tag_value: + res = self.m_ops.get_tag_value( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + self.assertEqual(res, "1") + mock_get_tag_value.assert_called_once_with( + module_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + + def test_get_tag_value_3(self) -> None: + m_list_res: Row = Row(TAG_VALUE=None) + with mock.patch.object(self.m_ops._tag_client, "get_tag_value", return_value=m_list_res) as mock_get_tag_value: + res = self.m_ops.get_tag_value( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + self.assertIsNone(res) + mock_get_tag_value.assert_called_once_with( + module_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + + def test_show_tags(self) -> None: + m_list_res: List[Row] = [ + Row(TAG_DATABASE="DB", TAG_SCHEMA="schema", TAG_NAME="MYTAG", TAG_VALUE="tag content"), + Row(TAG_DATABASE="MYDB", TAG_SCHEMA="SCHEMA", TAG_NAME="my_another_tag", TAG_VALUE=1), + ] + with mock.patch.object(self.m_ops._tag_client, "get_tag_list", return_value=m_list_res) as mock_get_tag_list: + res = self.m_ops.show_tags( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=self.m_statement_params, + ) + self.assertDictEqual(res, {'DB."schema".MYTAG': "tag content", 'MYDB.SCHEMA."my_another_tag"': "1"}) + mock_get_tag_list.assert_called_once_with( + module_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=self.m_statement_params, + ) + + def test_set_tag_fail(self) -> None: + with mock.patch.object( + snowflake_env, + "get_current_snowflake_version", + return_value=version.parse("8.1.0+23d9c914e5"), + ), mock.patch.object(self.m_ops._tag_client, "set_tag_on_model") as mock_set_tag: + with self.assertRaisesRegex(NotImplementedError, "`set_tag` won't work before Snowflake version"): + self.m_ops.set_tag( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=self.m_statement_params, + ) + mock_set_tag.assert_not_called() + + def test_set_tag(self) -> None: + with mock.patch.object( + snowflake_env, + "get_current_snowflake_version", + return_value=version.parse("8.2.0+23d9c914e5"), + ), mock.patch.object(self.m_ops._tag_client, "set_tag_on_model") as mock_set_tag: + self.m_ops.set_tag( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=self.m_statement_params, + ) + mock_set_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=self.m_statement_params, + ) + + def test_unset_tag_fail(self) -> None: + with mock.patch.object( + snowflake_env, + "get_current_snowflake_version", + return_value=version.parse("8.1.0+23d9c914e5"), + ), mock.patch.object(self.m_ops._tag_client, "unset_tag_on_model") as mock_unset_tag: + with self.assertRaisesRegex(NotImplementedError, "`unset_tag` won't work before Snowflake version"): + self.m_ops.unset_tag( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + mock_unset_tag.assert_not_called() + + def test_unset_tag(self) -> None: + with mock.patch.object( + snowflake_env, + "get_current_snowflake_version", + return_value=version.parse("8.2.0+23d9c914e5"), + ), mock.patch.object(self.m_ops._tag_client, "unset_tag_on_model") as mock_unset_tag: + self.m_ops.unset_tag( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=self.m_statement_params, + ) + mock_unset_tag.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), statement_params=self.m_statement_params, ) @@ -341,6 +574,7 @@ def test_create_from_stage_2(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), ] with mock.patch.object( @@ -377,6 +611,7 @@ def test_create_from_stage_3(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), ) m_list_res_versions = [ @@ -407,6 +642,45 @@ def test_create_from_stage_3(self) -> None: mock_create_from_stage.assert_not_called() mock_add_version_from_stagel.assert_not_called() + def test_get_client_data_in_user_data_1(self) -> None: + m_client_data = { + "schema_version": model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION, + "functions": [ + model_manifest_schema.ModelFunctionInfoDict( + name="PREDICT", + target_method="predict", + signature=_DUMMY_SIG["predict"].to_dict(), + ) + ], + } + m_list_res = [ + Row( + create_on="06/01", + name="v1", + comment="This is a comment", + model_name="MODEL", + user_data=json.dumps({model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: m_client_data}), + is_default_version=True, + ), + ] + with mock.patch.object( + self.m_ops._model_client, "show_versions", return_value=m_list_res + ) as mock_show_versions: + res = self.m_ops.get_client_data_in_user_data( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier('"v1"'), + statement_params=self.m_statement_params, + ) + self.assertDictEqual( + res, + m_client_data, + ) + mock_show_versions.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier('"v1"'), + statement_params=self.m_statement_params, + ) + def test_invoke_method_1(self) -> None: pd_df = pd.DataFrame([["1.0"]], columns=["input"], dtype=np.float32) m_sig = _DUMMY_SIG["predict"] @@ -540,6 +814,7 @@ def test_get_comment_2(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), ] with mock.patch.object( @@ -585,6 +860,83 @@ def test_set_comment_2(self) -> None: statement_params=self.m_statement_params, ) + def test_get_default_version(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="MODEL", + comment="This is a comment", + model_name="MODEL", + database_name="TEMP", + schema_name="test", + default_version_name="v1", + ), + ] + with mock.patch.object(self.m_ops._model_client, "show_models", return_value=m_list_res) as mock_show_models: + res = self.m_ops.get_default_version( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=self.m_statement_params, + ) + self.assertEqual(res, sql_identifier.SqlIdentifier("v1", case_sensitive=True)) + mock_show_models.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=self.m_statement_params, + ) + + def test_set_default_version_1(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="v1", + comment="This is a comment", + model_name="MODEL", + is_default_version=True, + ), + ] + with mock.patch.object( + self.m_ops._model_client, "show_versions", return_value=m_list_res + ) as mock_show_versions, mock.patch.object( + self.m_ops._model_version_client, "set_default_version" + ) as mock_set_default_version: + self.m_ops.set_default_version( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier('"v1"'), + statement_params=self.m_statement_params, + ) + mock_show_versions.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier('"v1"'), + validate_result=False, + statement_params=self.m_statement_params, + ) + mock_set_default_version.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier('"v1"'), + statement_params=self.m_statement_params, + ) + + def test_set_default_version_2(self) -> None: + with mock.patch.object( + self.m_ops._model_client, "show_versions", return_value=[] + ) as mock_show_versions, mock.patch.object( + self.m_ops._model_version_client, "set_default_version" + ) as mock_set_default_version: + with self.assertRaisesRegex( + ValueError, "You cannot set version V1 as default version as it does not exist." + ): + self.m_ops.set_default_version( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=self.m_statement_params, + ) + mock_show_versions.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + validate_result=False, + statement_params=self.m_statement_params, + ) + mock_set_default_version.assert_not_called() + def test_delete_model_or_version(self) -> None: with mock.patch.object( self.m_ops._model_client, diff --git a/snowflake/ml/model/_client/sql/BUILD.bazel b/snowflake/ml/model/_client/sql/BUILD.bazel index 465e2f24..8857ea8b 100644 --- a/snowflake/ml/model/_client/sql/BUILD.bazel +++ b/snowflake/ml/model/_client/sql/BUILD.bazel @@ -1,13 +1,19 @@ load("//bazel:py_rules.bzl", "py_library", "py_test") -package(default_visibility = ["//snowflake/ml/model/_client/ops:__pkg__"]) +package(default_visibility = [ + "//bazel:snowml_public_common", + "//snowflake/ml/model/_client/ops:__pkg__", +]) py_library( name = "model", srcs = ["model.py"], deps = [ "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:query_result_checker", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", ], ) @@ -16,7 +22,9 @@ py_test( srcs = ["model_test.py"], deps = [ ":model", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", "//snowflake/ml/test_utils:mock_data_frame", "//snowflake/ml/test_utils:mock_session", ], @@ -27,6 +35,7 @@ py_library( srcs = ["model_version.py"], deps = [ "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:query_result_checker", "//snowflake/ml/_internal/utils:sql_identifier", ], ) @@ -47,6 +56,7 @@ py_library( srcs = ["stage.py"], deps = [ "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:query_result_checker", "//snowflake/ml/_internal/utils:sql_identifier", ], ) @@ -61,3 +71,24 @@ py_test( "//snowflake/ml/test_utils:mock_session", ], ) + +py_library( + name = "tag", + srcs = ["tag.py"], + deps = [ + "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:query_result_checker", + "//snowflake/ml/_internal/utils:sql_identifier", + ], +) + +py_test( + name = "tag_test", + srcs = ["tag_test.py"], + deps = [ + ":tag", + "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/test_utils:mock_data_frame", + "//snowflake/ml/test_utils:mock_session", + ], +) diff --git a/snowflake/ml/model/_client/sql/model.py b/snowflake/ml/model/_client/sql/model.py index 040b5dea..f07c02dd 100644 --- a/snowflake/ml/model/_client/sql/model.py +++ b/snowflake/ml/model/_client/sql/model.py @@ -1,10 +1,25 @@ from typing import Any, Dict, List, Optional -from snowflake.ml._internal.utils import identifier, sql_identifier +from snowflake.ml._internal.utils import ( + identifier, + query_result_checker, + snowflake_env, + sql_identifier, +) +from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.snowpark import row, session class ModelSQLClient: + MODEL_NAME_COL_NAME = "name" + MODEL_COMMENT_COL_NAME = "comment" + MODEL_DEFAULT_VERSION_NAME_COL_NAME = "default_version_name" + + MODEL_VERSION_NAME_COL_NAME = "name" + MODEL_VERSION_COMMENT_COL_NAME = "comment" + MODEL_VERSION_METADATA_COL_NAME = "metadata" + MODEL_VERSION_USER_DATA_COL_NAME = "user_data" + def __init__( self, session: session.Session, @@ -30,29 +45,60 @@ def show_models( self, *, model_name: Optional[sql_identifier.SqlIdentifier] = None, + validate_result: bool = True, statement_params: Optional[Dict[str, Any]] = None, ) -> List[row.Row]: fully_qualified_schema_name = ".".join([self._database_name.identifier(), self._schema_name.identifier()]) like_sql = "" if model_name: like_sql = f" LIKE '{model_name.resolved()}'" - res = self._session.sql(f"SHOW MODELS{like_sql} IN SCHEMA {fully_qualified_schema_name}") - return res.collect(statement_params=statement_params) + res = ( + query_result_checker.SqlResultValidator( + self._session, + f"SHOW MODELS{like_sql} IN SCHEMA {fully_qualified_schema_name}", + statement_params=statement_params, + ) + .has_column(ModelSQLClient.MODEL_NAME_COL_NAME, allow_empty=True) + .has_column(ModelSQLClient.MODEL_COMMENT_COL_NAME, allow_empty=True) + .has_column(ModelSQLClient.MODEL_DEFAULT_VERSION_NAME_COL_NAME, allow_empty=True) + ) + if validate_result and model_name: + res = res.has_dimensions(expected_rows=1) + + return res.validate() def show_versions( self, *, model_name: sql_identifier.SqlIdentifier, version_name: Optional[sql_identifier.SqlIdentifier] = None, + validate_result: bool = True, statement_params: Optional[Dict[str, Any]] = None, ) -> List[row.Row]: like_sql = "" if version_name: like_sql = f" LIKE '{version_name.resolved()}'" - res = self._session.sql(f"SHOW VERSIONS{like_sql} IN MODEL {self.fully_qualified_model_name(model_name)}") - return res.collect(statement_params=statement_params) + res = ( + query_result_checker.SqlResultValidator( + self._session, + f"SHOW VERSIONS{like_sql} IN MODEL {self.fully_qualified_model_name(model_name)}", + statement_params=statement_params, + ) + .has_column(ModelSQLClient.MODEL_VERSION_NAME_COL_NAME, allow_empty=True) + .has_column(ModelSQLClient.MODEL_VERSION_COMMENT_COL_NAME, allow_empty=True) + .has_column(ModelSQLClient.MODEL_VERSION_METADATA_COL_NAME, allow_empty=True) + ) + if ( + snowflake_env.get_current_snowflake_version(self._session) + >= model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ): + res = res.has_column(ModelSQLClient.MODEL_VERSION_USER_DATA_COL_NAME, allow_empty=True) + if validate_result and version_name: + res = res.has_dimensions(expected_rows=1) + + return res.validate() def set_comment( self, @@ -61,8 +107,11 @@ def set_comment( model_name: sql_identifier.SqlIdentifier, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - comment_sql = f"COMMENT ON MODEL {self.fully_qualified_model_name(model_name)} IS $${comment}$$" - self._session.sql(comment_sql).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + f"COMMENT ON MODEL {self.fully_qualified_model_name(model_name)} IS $${comment}$$", + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() def drop_model( self, @@ -70,6 +119,8 @@ def drop_model( model_name: sql_identifier.SqlIdentifier, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - self._session.sql(f"DROP MODEL {self.fully_qualified_model_name(model_name)}").collect( - statement_params=statement_params - ) + query_result_checker.SqlResultValidator( + self._session, + f"DROP MODEL {self.fully_qualified_model_name(model_name)}", + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() diff --git a/snowflake/ml/model/_client/sql/model_test.py b/snowflake/ml/model/_client/sql/model_test.py index 2d0c133a..ba6fab90 100644 --- a/snowflake/ml/model/_client/sql/model_test.py +++ b/snowflake/ml/model/_client/sql/model_test.py @@ -1,9 +1,11 @@ from typing import cast +from unittest import mock from absl.testing import absltest -from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml._internal.utils import snowflake_env, sql_identifier from snowflake.ml.model._client.sql import model as model_sql +from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.ml.test_utils import mock_data_frame, mock_session from snowflake.snowpark import Row, Session @@ -11,6 +13,9 @@ class ModelSQLTest(absltest.TestCase): def setUp(self) -> None: self.m_session = mock_session.MockSession(conn=None, test_case=self) + snowflake_env.get_current_snowflake_version = mock.MagicMock( + return_value=model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ) def test_show_models_1(self) -> None: m_statement_params = {"test": "1"} @@ -23,6 +28,7 @@ def test_show_models_1(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), Row( create_on="06/01", @@ -31,6 +37,7 @@ def test_show_models_1(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="v1", ), ], collect_statement_params=m_statement_params, @@ -56,6 +63,7 @@ def test_show_models_2(self) -> None: model_name="MODEL", database_name="TEMP", schema_name="test", + default_version_name="V1", ), ], collect_statement_params=m_statement_params, @@ -80,6 +88,8 @@ def test_show_versions_1(self) -> None: name="v1", comment="This is a comment", model_name="MODEL", + metadata="{}", + user_data="{}", is_default_version=True, ), Row( @@ -87,6 +97,8 @@ def test_show_versions_1(self) -> None: name="V1", comment="This is a comment", model_name="MODEL", + metadata="{}", + user_data="{}", is_default_version=False, ), ], @@ -112,6 +124,8 @@ def test_show_versions_2(self) -> None: name="v1", comment="This is a comment", model_name="MODEL", + metadata="{}", + user_data="{}", is_default_version=True, ), ], diff --git a/snowflake/ml/model/_client/sql/model_version.py b/snowflake/ml/model/_client/sql/model_version.py index 7ffc7d22..18ba0a55 100644 --- a/snowflake/ml/model/_client/sql/model_version.py +++ b/snowflake/ml/model/_client/sql/model_version.py @@ -4,7 +4,11 @@ from typing import Any, Dict, List, Optional, Tuple from urllib.parse import ParseResult -from snowflake.ml._internal.utils import identifier, sql_identifier +from snowflake.ml._internal.utils import ( + identifier, + query_result_checker, + sql_identifier, +) from snowflake.snowpark import dataframe, functions as F, session, types as spt from snowflake.snowpark._internal import utils as snowpark_utils @@ -46,11 +50,14 @@ def create_from_stage( stage_path: str, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - self._version_name = version_name - self._session.sql( - f"CREATE MODEL {self.fully_qualified_model_name(model_name)} WITH VERSION {version_name.identifier()}" - f" FROM {stage_path}" - ).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + ( + f"CREATE MODEL {self.fully_qualified_model_name(model_name)} WITH VERSION {version_name.identifier()}" + f" FROM {stage_path}" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() # TODO(SNOW-987381): Merge with above when we have `create or alter module m [with] version v1 ...` def add_version_from_stage( @@ -61,11 +68,14 @@ def add_version_from_stage( stage_path: str, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - self._version_name = version_name - self._session.sql( - f"ALTER MODEL {self.fully_qualified_model_name(model_name)} ADD VERSION {version_name.identifier()}" - f" FROM {stage_path}" - ).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + ( + f"ALTER MODEL {self.fully_qualified_model_name(model_name)} ADD VERSION {version_name.identifier()}" + f" FROM {stage_path}" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() def set_default_version( self, @@ -74,24 +84,14 @@ def set_default_version( version_name: sql_identifier.SqlIdentifier, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - self._session.sql( - f"ALTER MODEL {self.fully_qualified_model_name(model_name)} " - f"SET DEFAULT_VERSION = {version_name.identifier()}" - ).collect(statement_params=statement_params) - - def get_default_version( - self, - *, - model_name: sql_identifier.SqlIdentifier, - statement_params: Optional[Dict[str, Any]] = None, - ) -> str: - # TODO: Replace SHOW with DESC when available. - default_version: str = ( - self._session.sql(f"SHOW VERSIONS IN MODEL {self.fully_qualified_model_name(model_name)}") - .filter('"is_default_version" = TRUE')[['"name"']] - .collect(statement_params=statement_params)[0][0] - ) - return default_version + query_result_checker.SqlResultValidator( + self._session, + ( + f"ALTER MODEL {self.fully_qualified_model_name(model_name)} " + f"SET DEFAULT_VERSION = {version_name.identifier()}" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() def get_file( self, @@ -108,14 +108,14 @@ def get_file( stage_location_url = ParseResult( scheme="snow", netloc="model", path=stage_location, params="", query="", fragment="" ).geturl() - local_location = target_path.absolute().as_posix() - local_location_url = ParseResult( - scheme="file", netloc="", path=local_location, params="", query="", fragment="" - ).geturl() + local_location = target_path.resolve().as_posix() + local_location_url = f"file://{local_location}" - self._session.sql( - f"GET {_normalize_url_for_sql(stage_location_url)} {_normalize_url_for_sql(local_location_url)}" - ).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + f"GET {_normalize_url_for_sql(stage_location_url)} {_normalize_url_for_sql(local_location_url)}", + statement_params=statement_params, + ).has_dimensions(expected_rows=1).validate() return target_path / file_path.name def set_comment( @@ -126,11 +126,14 @@ def set_comment( version_name: sql_identifier.SqlIdentifier, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - comment_sql = ( - f"ALTER MODEL {self.fully_qualified_model_name(model_name)} " - f"MODIFY VERSION {version_name.identifier()} SET COMMENT=$${comment}$$" - ) - self._session.sql(comment_sql).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + ( + f"ALTER MODEL {self.fully_qualified_model_name(model_name)} " + f"MODIFY VERSION {version_name.identifier()} SET COMMENT=$${comment}$$" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() def invoke_method( self, @@ -206,8 +209,11 @@ def set_metadata( statement_params: Optional[Dict[str, Any]] = None, ) -> None: json_metadata = json.dumps(metadata_dict) - sql = ( - f"ALTER MODEL {self.fully_qualified_model_name(model_name)} MODIFY VERSION {version_name.identifier()}" - f" SET METADATA=$${json_metadata}$$" - ) - self._session.sql(sql).collect(statement_params=statement_params) + query_result_checker.SqlResultValidator( + self._session, + ( + f"ALTER MODEL {self.fully_qualified_model_name(model_name)} MODIFY VERSION {version_name.identifier()}" + f" SET METADATA=$${json_metadata}$$" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() diff --git a/snowflake/ml/model/_client/sql/model_version_test.py b/snowflake/ml/model/_client/sql/model_version_test.py index f732f714..0557fe77 100644 --- a/snowflake/ml/model/_client/sql/model_version_test.py +++ b/snowflake/ml/model/_client/sql/model_version_test.py @@ -53,6 +53,23 @@ def test_add_version_from_stage(self) -> None: statement_params=m_statement_params, ) + def test_set_default_version(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row("Model MODEL successfully altered.")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql("""ALTER MODEL TEMP."test".MODEL SET DEFAULT_VERSION = V2""", m_df) + c_session = cast(Session, self.m_session) + model_version_sql.ModelVersionSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).set_default_version( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V2"), + statement_params=m_statement_params, + ) + def test_set_comment(self) -> None: m_statement_params = {"test": "1"} m_df = mock_data_frame.MockDataFrame(collect_result=[Row("")], collect_statement_params=m_statement_params) @@ -74,7 +91,10 @@ def test_set_comment(self) -> None: def test_get_file(self) -> None: m_statement_params = {"test": "1"} - m_df = mock_data_frame.MockDataFrame(collect_result=[Row()], collect_statement_params=m_statement_params) + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row(file="946964364/MANIFEST.yml", size=419, status="DOWNLOADED", message="")], + collect_statement_params=m_statement_params, + ) self.m_session.add_mock_sql( """GET 'snow://model/TEMP."test".MODEL/versions/v1/model.yaml' 'file:///tmp'""", m_df ) diff --git a/snowflake/ml/model/_client/sql/stage.py b/snowflake/ml/model/_client/sql/stage.py index 8b9750a6..b40de375 100644 --- a/snowflake/ml/model/_client/sql/stage.py +++ b/snowflake/ml/model/_client/sql/stage.py @@ -1,6 +1,10 @@ from typing import Any, Dict, Optional -from snowflake.ml._internal.utils import identifier, sql_identifier +from snowflake.ml._internal.utils import ( + identifier, + query_result_checker, + sql_identifier, +) from snowflake.snowpark import session @@ -35,6 +39,8 @@ def create_tmp_stage( stage_name: sql_identifier.SqlIdentifier, statement_params: Optional[Dict[str, Any]] = None, ) -> None: - self._session.sql(f"CREATE TEMPORARY STAGE {self.fully_qualified_stage_name(stage_name)}").collect( - statement_params=statement_params - ) + query_result_checker.SqlResultValidator( + self._session, + f"CREATE TEMPORARY STAGE {self.fully_qualified_stage_name(stage_name)}", + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() diff --git a/snowflake/ml/model/_client/sql/tag.py b/snowflake/ml/model/_client/sql/tag.py new file mode 100644 index 00000000..ac015a1a --- /dev/null +++ b/snowflake/ml/model/_client/sql/tag.py @@ -0,0 +1,118 @@ +from typing import Any, Dict, List, Optional + +from snowflake.ml._internal.utils import ( + identifier, + query_result_checker, + sql_identifier, +) +from snowflake.snowpark import row, session + + +class ModuleTagSQLClient: + def __init__( + self, + session: session.Session, + *, + database_name: sql_identifier.SqlIdentifier, + schema_name: sql_identifier.SqlIdentifier, + ) -> None: + self._session = session + self._database_name = database_name + self._schema_name = schema_name + + def __eq__(self, __value: object) -> bool: + if not isinstance(__value, ModuleTagSQLClient): + return False + return self._database_name == __value._database_name and self._schema_name == __value._schema_name + + def fully_qualified_module_name( + self, + module_name: sql_identifier.SqlIdentifier, + ) -> str: + return identifier.get_schema_level_object_identifier( + self._database_name.identifier(), self._schema_name.identifier(), module_name.identifier() + ) + + def set_tag_on_model( + self, + model_name: sql_identifier.SqlIdentifier, + *, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + tag_value: str, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + fq_model_name = self.fully_qualified_module_name(model_name) + fq_tag_name = identifier.get_schema_level_object_identifier( + tag_database_name.identifier(), tag_schema_name.identifier(), tag_name.identifier() + ) + query_result_checker.SqlResultValidator( + self._session, + f"ALTER MODEL {fq_model_name} SET TAG {fq_tag_name} = $${tag_value}$$", + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() + + def unset_tag_on_model( + self, + model_name: sql_identifier.SqlIdentifier, + *, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + fq_model_name = self.fully_qualified_module_name(model_name) + fq_tag_name = identifier.get_schema_level_object_identifier( + tag_database_name.identifier(), tag_schema_name.identifier(), tag_name.identifier() + ) + query_result_checker.SqlResultValidator( + self._session, + f"ALTER MODEL {fq_model_name} UNSET TAG {fq_tag_name}", + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() + + def get_tag_value( + self, + module_name: sql_identifier.SqlIdentifier, + *, + tag_database_name: sql_identifier.SqlIdentifier, + tag_schema_name: sql_identifier.SqlIdentifier, + tag_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> row.Row: + fq_module_name = self.fully_qualified_module_name(module_name) + fq_tag_name = identifier.get_schema_level_object_identifier( + tag_database_name.identifier(), tag_schema_name.identifier(), tag_name.identifier() + ) + return ( + query_result_checker.SqlResultValidator( + self._session, + f"SELECT SYSTEM$GET_TAG($${fq_tag_name}$$, $${fq_module_name}$$, 'MODULE') AS TAG_VALUE", + statement_params=statement_params, + ) + .has_dimensions(expected_rows=1, expected_cols=1) + .has_column("TAG_VALUE") + .validate()[0] + ) + + def get_tag_list( + self, + module_name: sql_identifier.SqlIdentifier, + *, + statement_params: Optional[Dict[str, Any]] = None, + ) -> List[row.Row]: + fq_module_name = self.fully_qualified_module_name(module_name) + return ( + query_result_checker.SqlResultValidator( + self._session, + f"""SELECT TAG_DATABASE, TAG_SCHEMA, TAG_NAME, TAG_VALUE +FROM TABLE({self._database_name.identifier()}.INFORMATION_SCHEMA.TAG_REFERENCES($${fq_module_name}$$, 'MODULE'))""", + statement_params=statement_params, + ) + .has_column("TAG_DATABASE", allow_empty=True) + .has_column("TAG_SCHEMA", allow_empty=True) + .has_column("TAG_NAME", allow_empty=True) + .has_column("TAG_VALUE", allow_empty=True) + .validate() + ) diff --git a/snowflake/ml/model/_client/sql/tag_test.py b/snowflake/ml/model/_client/sql/tag_test.py new file mode 100644 index 00000000..d2512268 --- /dev/null +++ b/snowflake/ml/model/_client/sql/tag_test.py @@ -0,0 +1,104 @@ +from typing import cast + +from absl.testing import absltest + +from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml.model._client.sql import tag as tag_sql +from snowflake.ml.test_utils import mock_data_frame, mock_session +from snowflake.snowpark import Row, Session + + +class ModuleTagSQLTest(absltest.TestCase): + def setUp(self) -> None: + self.m_session = mock_session.MockSession(conn=None, test_case=self) + + def test_set_tag_on_model(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row("Tag MYTAG successfully set.")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql( + """ALTER MODEL TEMP."test".MODEL SET TAG DB."schema".MYTAG = $$tag content$$""", m_df + ) + c_session = cast(Session, self.m_session) + tag_sql.ModuleTagSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).set_tag_on_model( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + tag_value="tag content", + statement_params=m_statement_params, + ) + + def test_unset_tag_on_model(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row("Tag MYTAG successfully unset.")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql("""ALTER MODEL TEMP."test".MODEL UNSET TAG DB."schema".MYTAG""", m_df) + c_session = cast(Session, self.m_session) + tag_sql.ModuleTagSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).unset_tag_on_model( + model_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=m_statement_params, + ) + + def test_get_tag_value(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row(TAG_VALUE="tag content")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql( + """SELECT SYSTEM$GET_TAG($$DB."schema".MYTAG$$, $$TEMP."test".MODEL$$, 'MODULE') AS TAG_VALUE""", m_df + ) + c_session = cast(Session, self.m_session) + res = tag_sql.ModuleTagSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).get_tag_value( + module_name=sql_identifier.SqlIdentifier("MODEL"), + tag_database_name=sql_identifier.SqlIdentifier("DB"), + tag_schema_name=sql_identifier.SqlIdentifier("schema", case_sensitive=True), + tag_name=sql_identifier.SqlIdentifier("MYTAG"), + statement_params=m_statement_params, + ) + self.assertEqual(res, Row(TAG_VALUE="tag content")) + + def test_list_tags(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row(TAG_DATABASE="DB", TAG_SCHEMA="schema", TAG_NAME="MYTAG", TAG_VALUE="tag content")], + collect_statement_params=m_statement_params, + ) + self.m_session.add_mock_sql( + """SELECT TAG_DATABASE, TAG_SCHEMA, TAG_NAME, TAG_VALUE +FROM TABLE(TEMP.INFORMATION_SCHEMA.TAG_REFERENCES($$TEMP."test".MODEL$$, 'MODULE'))""", + m_df, + ) + c_session = cast(Session, self.m_session) + res = tag_sql.ModuleTagSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).get_tag_list( + module_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=m_statement_params, + ) + self.assertListEqual( + res, [Row(TAG_DATABASE="DB", TAG_SCHEMA="schema", TAG_NAME="MYTAG", TAG_VALUE="tag content")] + ) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/model/_deploy_client/image_builds/BUILD.bazel b/snowflake/ml/model/_deploy_client/image_builds/BUILD.bazel index 37fb5129..e1b3e968 100644 --- a/snowflake/ml/model/_deploy_client/image_builds/BUILD.bazel +++ b/snowflake/ml/model/_deploy_client/image_builds/BUILD.bazel @@ -13,9 +13,9 @@ py_library( deps = [ ":base_image_builder", ":docker_context", + "//snowflake/ml/_internal/container_services/image_registry:credential", "//snowflake/ml/_internal/exceptions", "//snowflake/ml/_internal/utils:query_result_checker", - "//snowflake/ml/_internal/utils:spcs_image_registry", "//snowflake/ml/model/_packager/model_meta", ], ) @@ -31,10 +31,10 @@ py_library( ":base_image_builder", ":docker_context", "//snowflake/ml/_internal:file_utils", + "//snowflake/ml/_internal/container_services/image_registry:registry_client", "//snowflake/ml/_internal/exceptions", "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/model/_deploy_client/utils:constants", - "//snowflake/ml/model/_deploy_client/utils:image_registry_client", "//snowflake/ml/model/_deploy_client/utils:snowservice_client", ], ) diff --git a/snowflake/ml/model/_deploy_client/image_builds/client_image_builder.py b/snowflake/ml/model/_deploy_client/image_builds/client_image_builder.py index 2f0ef72c..11878bc2 100644 --- a/snowflake/ml/model/_deploy_client/image_builds/client_image_builder.py +++ b/snowflake/ml/model/_deploy_client/image_builds/client_image_builder.py @@ -9,11 +9,11 @@ from typing import List from snowflake import snowpark +from snowflake.ml._internal.container_services.image_registry import credential from snowflake.ml._internal.exceptions import ( error_codes, exceptions as snowml_exceptions, ) -from snowflake.ml._internal.utils import spcs_image_registry from snowflake.ml.model._deploy_client.image_builds import base_image_builder logger = logging.getLogger(__name__) @@ -106,7 +106,7 @@ def _cleanup_local_image(docker_config_dir: str) -> None: self._run_docker_commands(commands) self.validate_docker_client_env() - with spcs_image_registry.generate_image_registry_credential( + with credential.generate_image_registry_credential( self.session ) as registry_cred, tempfile.TemporaryDirectory() as docker_config_dir: try: diff --git a/snowflake/ml/model/_deploy_client/image_builds/docker_context.py b/snowflake/ml/model/_deploy_client/image_builds/docker_context.py index 56aa0b57..0d732045 100644 --- a/snowflake/ml/model/_deploy_client/image_builds/docker_context.py +++ b/snowflake/ml/model/_deploy_client/image_builds/docker_context.py @@ -2,7 +2,6 @@ import posixpath import shutil import string -from abc import ABC from typing import Optional import importlib_resources @@ -15,7 +14,7 @@ from snowflake.snowpark import FileOperation, Session -class DockerContext(ABC): +class DockerContext: """ Constructs the Docker context directory required for image building. """ @@ -53,12 +52,13 @@ def build(self) -> None: def _copy_entrypoint_script_to_docker_context(self) -> None: """Copy gunicorn_run.sh entrypoint to docker context directory.""" - with importlib_resources.as_file( - importlib_resources.files(image_builds).joinpath( # type: ignore[no-untyped-call] - constants.ENTRYPOINT_SCRIPT - ) - ) as path: - shutil.copy(path, os.path.join(self.context_dir, constants.ENTRYPOINT_SCRIPT)) + script_path = importlib_resources.files(image_builds).joinpath( # type: ignore[no-untyped-call] + constants.ENTRYPOINT_SCRIPT + ) + target_path = os.path.join(self.context_dir, constants.ENTRYPOINT_SCRIPT) + + with open(script_path, encoding="utf-8") as source_file, file_utils.open_file(target_path, "w") as target_file: + target_file.write(source_file.read()) def _copy_model_env_dependency_to_docker_context(self) -> None: """ diff --git a/snowflake/ml/model/_deploy_client/image_builds/inference_server/main.py b/snowflake/ml/model/_deploy_client/image_builds/inference_server/main.py index 77bdc5eb..0626ec73 100644 --- a/snowflake/ml/model/_deploy_client/image_builds/inference_server/main.py +++ b/snowflake/ml/model/_deploy_client/image_builds/inference_server/main.py @@ -105,6 +105,8 @@ def _run_setup() -> None: # TODO (Server-side Model Rollout): # Keep try block only + # SPCS spec will convert all environment variables as strings. + use_gpu = os.environ.get("SNOWML_USE_GPU", "False").lower() == "true" try: from snowflake.ml.model._packager import model_packager @@ -112,9 +114,7 @@ def _run_setup() -> None: pk.load( as_custom_model=True, meta_only=False, - options=model_types.ModelLoadOption( - {"use_gpu": cast(bool, os.environ.get("SNOWML_USE_GPU", False))} - ), + options=model_types.ModelLoadOption({"use_gpu": use_gpu}), ) _LOADED_MODEL = pk.model _LOADED_META = pk.meta @@ -132,9 +132,7 @@ def _run_setup() -> None: _LOADED_MODEL, meta_LOADED_META = model_api._load( local_dir_path=extracted_dir, as_custom_model=True, - options=model_types.ModelLoadOption( - {"use_gpu": cast(bool, os.environ.get("SNOWML_USE_GPU", False))} - ), + options=model_types.ModelLoadOption({"use_gpu": use_gpu}), ) _MODEL_LOADING_STATE = _ModelLoadingState.SUCCEEDED logger.info("Successfully loaded model into memory") diff --git a/snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py b/snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py index 963a3c0f..0713911d 100644 --- a/snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +++ b/snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py @@ -7,6 +7,9 @@ from snowflake import snowpark from snowflake.ml._internal import file_utils +from snowflake.ml._internal.container_services.image_registry import ( + registry_client as image_registry_client, +) from snowflake.ml._internal.exceptions import ( error_codes, exceptions as snowml_exceptions, @@ -14,11 +17,7 @@ from snowflake.ml._internal.utils import identifier from snowflake.ml.model._deploy_client import image_builds from snowflake.ml.model._deploy_client.image_builds import base_image_builder -from snowflake.ml.model._deploy_client.utils import ( - constants, - image_registry_client, - snowservice_client, -) +from snowflake.ml.model._deploy_client.utils import constants, snowservice_client logger = logging.getLogger(__name__) @@ -117,7 +116,7 @@ def _construct_and_upload_docker_entrypoint_script(self, context_tarball_stage_l kaniko_shell_file = os.path.join(self.context_dir, constants.KANIKO_SHELL_SCRIPT_NAME) - with open(kaniko_shell_file, "w+", encoding="utf-8") as script_file: + with file_utils.open_file(kaniko_shell_file, "w+") as script_file: normed_artifact_stage_path = posixpath.normpath(identifier.remove_prefix(self.artifact_stage_location, "@")) params = { # Remove @ in the beginning, append "/" to denote root directory. @@ -175,7 +174,7 @@ def _construct_and_upload_job_spec(self, base_image: str, kaniko_shell_script_st os.path.dirname(self.context_dir), f"{constants.IMAGE_BUILD_JOB_SPEC_TEMPLATE}.yaml" ) - with open(spec_file_path, "w+", encoding="utf-8") as spec_file: + with file_utils.open_file(spec_file_path, "w+") as spec_file: assert self.artifact_stage_location.startswith("@") normed_artifact_stage_path = posixpath.normpath(identifier.remove_prefix(self.artifact_stage_location, "@")) (db, schema, stage, path) = identifier.parse_schema_level_object_identifier(normed_artifact_stage_path) diff --git a/snowflake/ml/model/_deploy_client/snowservice/BUILD.bazel b/snowflake/ml/model/_deploy_client/snowservice/BUILD.bazel index 2cc37a5c..7579d010 100644 --- a/snowflake/ml/model/_deploy_client/snowservice/BUILD.bazel +++ b/snowflake/ml/model/_deploy_client/snowservice/BUILD.bazel @@ -22,6 +22,7 @@ py_library( ":deploy_options", ":instance_types", "//snowflake/ml/_internal:env_utils", + "//snowflake/ml/_internal/container_services/image_registry:registry_client", "//snowflake/ml/_internal/exceptions", "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/_internal/utils:spcs_attribution_utils", @@ -29,7 +30,6 @@ py_library( "//snowflake/ml/model/_deploy_client/image_builds:base_image_builder", "//snowflake/ml/model/_deploy_client/image_builds:client_image_builder", "//snowflake/ml/model/_deploy_client/image_builds:server_image_builder", - "//snowflake/ml/model/_deploy_client/utils:image_registry_client", "//snowflake/ml/model/_deploy_client/utils:snowservice_client", "//snowflake/ml/model/_packager/model_meta", ], diff --git a/snowflake/ml/model/_deploy_client/snowservice/deploy.py b/snowflake/ml/model/_deploy_client/snowservice/deploy.py index 748ce594..182c417b 100644 --- a/snowflake/ml/model/_deploy_client/snowservice/deploy.py +++ b/snowflake/ml/model/_deploy_client/snowservice/deploy.py @@ -14,6 +14,9 @@ from typing_extensions import Unpack from snowflake.ml._internal import env_utils, file_utils +from snowflake.ml._internal.container_services.image_registry import ( + registry_client as image_registry_client, +) from snowflake.ml._internal.exceptions import ( error_codes, exceptions as snowml_exceptions, @@ -32,11 +35,7 @@ server_image_builder, ) from snowflake.ml.model._deploy_client.snowservice import deploy_options, instance_types -from snowflake.ml.model._deploy_client.utils import ( - constants, - image_registry_client, - snowservice_client, -) +from snowflake.ml.model._deploy_client.utils import constants, snowservice_client from snowflake.ml.model._packager.model_meta import model_meta, model_meta_schema from snowflake.snowpark import Session diff --git a/snowflake/ml/model/_deploy_client/snowservice/instance_types.py b/snowflake/ml/model/_deploy_client/snowservice/instance_types.py index 11a9e09a..ab27b15b 100644 --- a/snowflake/ml/model/_deploy_client/snowservice/instance_types.py +++ b/snowflake/ml/model/_deploy_client/snowservice/instance_types.py @@ -1,2 +1,10 @@ # Snowpark Container Service GPU instance type and corresponding GPU counts. -INSTANCE_TYPE_TO_GPU_COUNT = {"GPU_3": 1, "GPU_5": 1, "GPU_7": 4, "GPU_10": 8} +INSTANCE_TYPE_TO_GPU_COUNT = { + "GPU_3": 1, + "GPU_5": 1, + "GPU_7": 4, + "GPU_10": 8, + "GPU_NV_S": 1, + "GPU_NV_M": 4, + "GPU_NV_L": 8, +} diff --git a/snowflake/ml/model/_deploy_client/utils/BUILD.bazel b/snowflake/ml/model/_deploy_client/utils/BUILD.bazel index d5fa0a02..31a123fd 100644 --- a/snowflake/ml/model/_deploy_client/utils/BUILD.bazel +++ b/snowflake/ml/model/_deploy_client/utils/BUILD.bazel @@ -18,24 +18,6 @@ py_library( ], ) -py_library( - name = "image_registry_client", - srcs = ["image_registry_client.py"], - deps = [ - ":imagelib", - "//snowflake/ml/_internal/exceptions", - "//snowflake/ml/_internal/utils:image_registry_http_client", - ], -) - -py_library( - name = "imagelib", - srcs = ["imagelib.py"], - deps = [ - "//snowflake/ml/_internal/utils:image_registry_http_client", - ], -) - py_test( name = "snowservice_client_test", srcs = ["snowservice_client_test.py"], @@ -45,13 +27,3 @@ py_test( "//snowflake/ml/test_utils:mock_session", ], ) - -py_test( - name = "image_registry_client_test", - srcs = ["image_registry_client_test.py"], - deps = [ - ":image_registry_client", - "//snowflake/ml/test_utils:exception_utils", - "//snowflake/ml/test_utils:mock_session", - ], -) diff --git a/snowflake/ml/model/_deploy_client/warehouse/deploy.py b/snowflake/ml/model/_deploy_client/warehouse/deploy.py index 0f20fc4a..e89b49e6 100644 --- a/snowflake/ml/model/_deploy_client/warehouse/deploy.py +++ b/snowflake/ml/model/_deploy_client/warehouse/deploy.py @@ -2,6 +2,7 @@ import logging import posixpath import tempfile +import textwrap from types import ModuleType from typing import IO, List, Optional, Tuple, TypedDict, Union @@ -154,7 +155,7 @@ def _get_model_final_packages( Returns: List of final packages string that is accepted by Snowpark register UDF call. """ - final_packages = None + if ( any(channel.lower() not in [env_utils.DEFAULT_CHANNEL_NAME] for channel in meta.env._conda_dependencies.keys()) or meta.env.pip_requirements @@ -173,21 +174,29 @@ def _get_model_final_packages( else: required_packages = meta.env._conda_dependencies[env_utils.DEFAULT_CHANNEL_NAME] - final_packages = env_utils.validate_requirements_in_information_schema( + package_availability_dict = env_utils.get_matched_package_versions_in_information_schema( session, required_packages, python_version=meta.env.python_version ) - - if final_packages is None: + no_version_available_packages = [ + req_name for req_name, ver_list in package_availability_dict.items() if len(ver_list) < 1 + ] + unavailable_packages = [req.name for req in required_packages if req.name not in package_availability_dict] + if no_version_available_packages or unavailable_packages: relax_version_info_str = "" if relax_version else "Try to set relax_version as True in the options. " + required_package_str = " ".join(map(lambda x: f'"{x}"', required_packages)) raise snowml_exceptions.SnowflakeMLException( error_code=error_codes.DEPENDENCY_VERSION_ERROR, original_exception=RuntimeError( - "The model's dependencies are not available in Snowflake Anaconda Channel. " - + relax_version_info_str - + "Required packages are:\n" - + " ".join(map(lambda x: f'"{x}"', required_packages)) - + "\n Required Python version is: " - + meta.env.python_version + textwrap.dedent( + f""" + The model's dependencies are not available in Snowflake Anaconda Channel. {relax_version_info_str} + Required packages are: {required_package_str} + Required Python version is: {meta.env.python_version} + Packages that are not available are: {unavailable_packages} + Packages that cannot meet your requirements are: {no_version_available_packages} + Package availability information of those you requested is: {package_availability_dict} + """ + ), ), ) - return final_packages + return list(sorted(map(str, required_packages))) diff --git a/snowflake/ml/model/_deploy_client/warehouse/deploy_test.py b/snowflake/ml/model/_deploy_client/warehouse/deploy_test.py index 40250df4..4bbfa6df 100644 --- a/snowflake/ml/model/_deploy_client/warehouse/deploy_test.py +++ b/snowflake/ml/model/_deploy_client/warehouse/deploy_test.py @@ -1,3 +1,4 @@ +import platform import tempfile import textwrap from importlib import metadata as importlib_metadata @@ -40,18 +41,7 @@ class TestFinalPackagesWithoutConda(absltest.TestCase): @classmethod def setUpClass(cls) -> None: - env_utils._INFO_SCHEMA_PACKAGES_HAS_RUNTIME_VERSION = None cls.m_session = mock_session.MockSession(conn=None, test_case=None) - cls.m_session.add_mock_sql( - query=textwrap.dedent( - """ - SHOW COLUMNS - LIKE 'runtime_version' - IN TABLE information_schema.packages; - """ - ), - result=mock_data_frame.MockDataFrame(count_result=0), - ) def setUp(self) -> None: self.add_packages( @@ -76,7 +66,9 @@ def add_packages(self, packages_dicts: Dict[str, List[str]]) -> None: SELECT PACKAGE_NAME, VERSION FROM information_schema.packages WHERE ({pkg_names_str}) - AND language = 'python'; + AND language = 'python' + AND (runtime_version = '{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}' + OR runtime_version is null); """ ) sql_result = [ diff --git a/snowflake/ml/model/_model_composer/model_composer_test.py b/snowflake/ml/model/_model_composer/model_composer_test.py index 6a46977a..9611995f 100644 --- a/snowflake/ml/model/_model_composer/model_composer_test.py +++ b/snowflake/ml/model/_model_composer/model_composer_test.py @@ -39,7 +39,11 @@ def test_save_interface(self) -> None: with mock.patch.object( file_utils, "upload_directory_to_stage", return_value=None ) as mock_upload_directory_to_stage: - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): m.save( name="model1", model=LinearRegression(), @@ -59,7 +63,11 @@ def test_save_interface(self) -> None: with mock.patch.object( file_utils, "upload_directory_to_stage", return_value=None ) as mock_upload_directory_to_stage: - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): m.save( name="model1", model=linear_model.LinearRegression(), diff --git a/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel b/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel index 0b0d9706..0c6b509c 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel +++ b/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel @@ -2,11 +2,22 @@ load("//bazel:py_rules.bzl", "py_library", "py_test") package(default_visibility = ["//visibility:public"]) +filegroup( + name = "manifest_fixtures", + srcs = [ + "fixtures/MANIFEST_0.yml", + "fixtures/MANIFEST_1.yml", + "fixtures/MANIFEST_2.yml", + "fixtures/MANIFEST_3.yml", + ], +) + py_library( name = "model_manifest", srcs = ["model_manifest.py"], deps = [ ":model_manifest_schema", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/model/_model_composer/model_method", "//snowflake/ml/model/_model_composer/model_method:function_generator", "//snowflake/ml/model/_model_composer/model_runtime", @@ -17,15 +28,22 @@ py_library( py_library( name = "model_manifest_schema", srcs = ["model_manifest_schema.py"], + deps = [ + "//snowflake/ml/model:model_signature", + ], ) py_test( name = "model_manifest_test", srcs = ["model_manifest_test.py"], - data = ["//snowflake/ml/model/_model_composer/model_method:function_fixtures"], + data = [ + ":manifest_fixtures", + "//snowflake/ml/model/_model_composer/model_method:function_fixtures", + ], deps = [ ":model_manifest", "//snowflake/ml/_internal:env_utils", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/model:model_signature", "//snowflake/ml/model:type_hints", "//snowflake/ml/model/_packager/model_meta", diff --git a/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_0.yml b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_0.yml new file mode 100644 index 00000000..d1914caf --- /dev/null +++ b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_0.yml @@ -0,0 +1,41 @@ +manifest_version: '1.0' +methods: +- handler: functions.predict.infer + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + type: ARRAY + - name: input_3 + type: ARRAY + - name: input_4 + type: ARRAY + name: predict + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +- handler: functions.__call__.infer + inputs: + - name: INPUT_1 + type: FLOAT + - name: INPUT_2 + type: ARRAY + - name: INPUT_3 + type: ARRAY + - name: INPUT_4 + type: ARRAY + name: __CALL__ + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +runtimes: + python_runtime: + dependencies: + conda: runtimes/python_runtime/env/conda.yml + imports: + - model.zip + - runtimes/python_runtime/snowflake-ml-python.zip + language: PYTHON + version: '3.8' diff --git a/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_1.yml b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_1.yml new file mode 100644 index 00000000..bc4f4434 --- /dev/null +++ b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_1.yml @@ -0,0 +1,65 @@ +manifest_version: '1.0' +methods: +- handler: functions.predict.infer + inputs: + - name: INPUT_1 + type: FLOAT + - name: INPUT_2 + type: ARRAY + - name: INPUT_3 + type: ARRAY + - name: INPUT_4 + type: ARRAY + name: PREDICT + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +runtimes: + python_runtime: + dependencies: + conda: runtimes/python_runtime/env/conda.yml + imports: + - model.zip + language: PYTHON + version: '3.8' +user_data: + snowpark_ml_data: + functions: + - name: PREDICT + signature: + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + shape: + - -1 + type: FLOAT + - name: input_3 + shape: + - -1 + type: FLOAT + - name: input_4 + shape: + - -1 + type: FLOAT + outputs: + - name: output_1 + type: FLOAT + - name: output_2 + shape: + - 2 + - 2 + type: FLOAT + - name: output_3 + shape: + - 2 + - 2 + type: FLOAT + - name: output_4 + shape: + - 2 + - 2 + type: FLOAT + target_method: predict + schema_version: '2024-02-01' diff --git a/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_2.yml b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_2.yml new file mode 100644 index 00000000..73d76bda --- /dev/null +++ b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_2.yml @@ -0,0 +1,66 @@ +manifest_version: '1.0' +methods: +- handler: functions.__call__.infer + inputs: + - name: INPUT_1 + type: FLOAT + - name: INPUT_2 + type: ARRAY + - name: INPUT_3 + type: ARRAY + - name: INPUT_4 + type: ARRAY + name: __CALL__ + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +runtimes: + python_runtime: + dependencies: + conda: runtimes/python_runtime/env/conda.yml + imports: + - model.zip + - runtimes/python_runtime/snowflake-ml-python.zip + language: PYTHON + version: '3.8' +user_data: + snowpark_ml_data: + functions: + - name: __CALL__ + signature: + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + shape: + - -1 + type: FLOAT + - name: input_3 + shape: + - -1 + type: FLOAT + - name: input_4 + shape: + - -1 + type: FLOAT + outputs: + - name: output_1 + type: FLOAT + - name: output_2 + shape: + - 2 + - 2 + type: FLOAT + - name: output_3 + shape: + - 2 + - 2 + type: FLOAT + - name: output_4 + shape: + - 2 + - 2 + type: FLOAT + target_method: __call__ + schema_version: '2024-02-01' diff --git a/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_3.yml b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_3.yml new file mode 100644 index 00000000..08b5a91c --- /dev/null +++ b/snowflake/ml/model/_model_composer/model_manifest/fixtures/MANIFEST_3.yml @@ -0,0 +1,117 @@ +manifest_version: '1.0' +methods: +- handler: functions.predict.infer + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + type: ARRAY + - name: input_3 + type: ARRAY + - name: input_4 + type: ARRAY + name: predict + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +- handler: functions.__call__.infer + inputs: + - name: INPUT_1 + type: FLOAT + - name: INPUT_2 + type: ARRAY + - name: INPUT_3 + type: ARRAY + - name: INPUT_4 + type: ARRAY + name: __CALL__ + outputs: + - type: OBJECT + runtime: python_runtime + type: FUNCTION +runtimes: + python_runtime: + dependencies: + conda: runtimes/python_runtime/env/conda.yml + imports: + - model.zip + - runtimes/python_runtime/snowflake-ml-python.zip + language: PYTHON + version: '3.8' +user_data: + snowpark_ml_data: + functions: + - name: '"predict"' + signature: + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + shape: + - -1 + type: FLOAT + - name: input_3 + shape: + - -1 + type: FLOAT + - name: input_4 + shape: + - -1 + type: FLOAT + outputs: + - name: output_1 + type: FLOAT + - name: output_2 + shape: + - 2 + - 2 + type: FLOAT + - name: output_3 + shape: + - 2 + - 2 + type: FLOAT + - name: output_4 + shape: + - 2 + - 2 + type: FLOAT + target_method: predict + - name: __CALL__ + signature: + inputs: + - name: input_1 + type: FLOAT + - name: input_2 + shape: + - -1 + type: FLOAT + - name: input_3 + shape: + - -1 + type: FLOAT + - name: input_4 + shape: + - -1 + type: FLOAT + outputs: + - name: output_1 + type: FLOAT + - name: output_2 + shape: + - 2 + - 2 + type: FLOAT + - name: output_3 + shape: + - 2 + - 2 + type: FLOAT + - name: output_4 + shape: + - 2 + - 2 + type: FLOAT + target_method: __call__ + schema_version: '2024-02-01' diff --git a/snowflake/ml/model/_model_composer/model_manifest/model_manifest.py b/snowflake/ml/model/_model_composer/model_manifest/model_manifest.py index 3d4d4f70..78a782cf 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +++ b/snowflake/ml/model/_model_composer/model_manifest/model_manifest.py @@ -1,9 +1,10 @@ import collections import pathlib -from typing import List, Optional, cast +from typing import Any, Dict, List, Optional, cast import yaml +from snowflake.ml._internal.utils import snowflake_env from snowflake.ml.model import type_hints from snowflake.ml.model._model_composer.model_manifest import model_manifest_schema from snowflake.ml.model._model_composer.model_method import ( @@ -83,7 +84,15 @@ def save( ], ) + if ( + snowflake_env.get_current_snowflake_version(session) + >= model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ): + manifest_dict["user_data"] = self.generate_user_data_with_client_data(model_meta) + with (self.workspace_path / ModelManifest.MANIFEST_FILE_REL_PATH).open("w", encoding="utf-8") as f: + # Anchors are not supported in the server, avoid that. + yaml.SafeDumper.ignore_aliases = lambda *args: True # type: ignore[method-assign] yaml.safe_dump(manifest_dict, f) def load(self) -> model_manifest_schema.ModelManifestDict: @@ -99,3 +108,43 @@ def load(self) -> model_manifest_schema.ModelManifestDict: res = cast(model_manifest_schema.ModelManifestDict, raw_input) return res + + def generate_user_data_with_client_data(self, model_meta: model_meta_api.ModelMetadata) -> Dict[str, Any]: + client_data = model_manifest_schema.SnowparkMLDataDict( + schema_version=model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION, + functions=[ + model_manifest_schema.ModelFunctionInfoDict( + name=method.method_name.identifier(), + target_method=method.target_method, + signature=model_meta.signatures[method.target_method].to_dict(), + ) + for method in self.methods + ], + ) + return {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: client_data} + + @staticmethod + def parse_client_data_from_user_data(raw_user_data: Dict[str, Any]) -> model_manifest_schema.SnowparkMLDataDict: + raw_client_data = raw_user_data.get(model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME, {}) + if not isinstance(raw_client_data, dict) or "schema_version" not in raw_client_data: + raise ValueError(f"Ill-formatted client data {raw_client_data} in user data found.") + loaded_client_data_schema_version = raw_client_data["schema_version"] + if ( + not isinstance(loaded_client_data_schema_version, str) + or loaded_client_data_schema_version != model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION + ): + raise ValueError(f"Unsupported client data schema version {loaded_client_data_schema_version} confronted.") + + return_functions_info: List[model_manifest_schema.ModelFunctionInfoDict] = [] + loaded_functions_info = raw_client_data.get("functions", []) + for func in loaded_functions_info: + fi = model_manifest_schema.ModelFunctionInfoDict( + name=func["name"], + target_method=func["target_method"], + signature=func["signature"], + ) + return_functions_info.append(fi) + + return model_manifest_schema.SnowparkMLDataDict( + schema_version=loaded_client_data_schema_version, functions=return_functions_info + ) diff --git a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py index 2df33b9b..bf2d5473 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +++ b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py @@ -2,10 +2,17 @@ from typing import Any, Dict, List, Literal, TypedDict +from packaging import version from typing_extensions import NotRequired, Required +from snowflake.ml.model import model_signature + MODEL_MANIFEST_VERSION = "1.0" +MANIFEST_USER_DATA_ENABLE_VERSION = version.parse("8.2.0") +MANIFEST_CLIENT_DATA_KEY_NAME = "snowpark_ml_data" +MANIFEST_CLIENT_DATA_SCHEMA_VERSION = "2024-02-01" + class ModelRuntimeDependenciesDict(TypedDict): conda: Required[str] @@ -38,6 +45,31 @@ class ModelFunctionMethodDict(TypedDict): ModelMethodDict = ModelFunctionMethodDict +class ModelFunctionInfo(TypedDict): + """Function information. + + Attributes: + name: Name of the function to be called via SQL. + target_method: actual target method name to be called. + signature: The signature of the model method. + """ + + name: Required[str] + target_method: Required[str] + signature: Required[model_signature.ModelSignature] + + +class ModelFunctionInfoDict(TypedDict): + name: Required[str] + target_method: Required[str] + signature: Required[Dict[str, Any]] + + +class SnowparkMLDataDict(TypedDict): + schema_version: Required[str] + functions: Required[List[ModelFunctionInfoDict]] + + class ModelManifestDict(TypedDict): manifest_version: Required[str] runtimes: Required[Dict[str, ModelRuntimeDict]] diff --git a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py index 50bfacd1..acc159cb 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py +++ b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py @@ -1,23 +1,37 @@ import os import pathlib import tempfile +from typing import Any, Dict from unittest import mock import importlib_resources import yaml from absl.testing import absltest +from packaging import version from snowflake.ml._internal import env_utils +from snowflake.ml._internal.utils import snowflake_env from snowflake.ml.model import model_signature, type_hints -from snowflake.ml.model._model_composer.model_manifest import model_manifest +from snowflake.ml.model._model_composer.model_manifest import ( + model_manifest, + model_manifest_schema, +) from snowflake.ml.model._packager.model_meta import model_blob_meta, model_meta _DUMMY_SIG = { "predict": model_signature.ModelSignature( inputs=[ - model_signature.FeatureSpec(dtype=model_signature.DataType.FLOAT, name="input"), + model_signature.FeatureSpec(dtype=model_signature.DataType.FLOAT, name="input_1"), + model_signature.FeatureSpec(dtype=model_signature.DataType.FLOAT, name="input_2", shape=(-1,)), + model_signature.FeatureSpec(dtype=model_signature.DataType.FLOAT, name="input_3", shape=(-1,)), + model_signature.FeatureSpec(dtype=model_signature.DataType.FLOAT, name="input_4", shape=(-1,)), + ], + outputs=[ + model_signature.FeatureSpec(name="output_1", dtype=model_signature.DataType.FLOAT), + model_signature.FeatureSpec(name="output_2", dtype=model_signature.DataType.FLOAT, shape=(2, 2)), + model_signature.FeatureSpec(name="output_3", dtype=model_signature.DataType.FLOAT, shape=(2, 2)), + model_signature.FeatureSpec(name="output_4", dtype=model_signature.DataType.FLOAT, shape=(2, 2)), ], - outputs=[model_signature.FeatureSpec(name="output", dtype=model_signature.DataType.FLOAT)], ) } @@ -30,48 +44,102 @@ class ModelManifestTest(absltest.TestCase): def setUp(self) -> None: self.m_session = mock.MagicMock() + snowflake_env.get_current_snowflake_version = mock.MagicMock( + return_value=model_manifest_schema.MANIFEST_USER_DATA_ENABLE_VERSION + ) + + def test_model_manifest_old(self) -> None: + snowflake_env.get_current_snowflake_version = mock.MagicMock(return_value=version.parse("8.0.0")) + with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: + mm = model_manifest.ModelManifest(pathlib.Path(workspace)) + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures={"predict": _DUMMY_SIG["predict"], "__call__": _DUMMY_SIG["predict"]}, + python_version="3.8", + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): + mm.save( + self.m_session, + meta, + pathlib.PurePosixPath("model.zip"), + options=type_hints.BaseModelSaveOption( + method_options={ + "predict": type_hints.ModelMethodSaveOptions(case_sensitive=True), + "__call__": type_hints.ModelMethodSaveOptions(max_batch_size=10), + } + ), + ) + with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("MANIFEST_0.yml") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("function_1.py") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("function_2.py") + .read_text() + ), + f.read(), + ) def test_model_manifest_1(self) -> None: with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: mm = model_manifest.ModelManifest(pathlib.Path(workspace)) with model_meta.create_model_metadata( - model_dir_path=tmpdir, name="model1", model_type="custom", signatures=_DUMMY_SIG + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures=_DUMMY_SIG, + python_version="3.8", ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mm.save(self.m_session, meta, pathlib.PurePosixPath("model.zip")) with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - loaded_manifest = yaml.safe_load(f) - self.assertDictEqual( - loaded_manifest, - { - "manifest_version": "1.0", - "runtimes": { - "python_runtime": { - "language": "PYTHON", - "version": meta.env.python_version, - "imports": ["model.zip"], - "dependencies": {"conda": "runtimes/python_runtime/env/conda.yml"}, - } - }, - "methods": [ - { - "name": "PREDICT", - "runtime": "python_runtime", - "type": "FUNCTION", - "handler": "functions.predict.infer", - "inputs": [{"name": "INPUT", "type": "FLOAT"}], - "outputs": [{"type": "OBJECT"}], - } - ], - }, - ) + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("MANIFEST_1.yml") + .read_text() + ), + f.read(), + ) with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: self.assertEqual( ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_1.py_fixture") + .joinpath("function_1.py") .read_text() ), f.read(), @@ -85,9 +153,14 @@ def test_model_manifest_2(self) -> None: name="model1", model_type="custom", signatures={"__call__": _DUMMY_SIG["predict"]}, + python_version="3.8", ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): mm.save( self.m_session, meta, @@ -97,37 +170,21 @@ def test_model_manifest_2(self) -> None: ), ) with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - loaded_manifest = yaml.safe_load(f) - self.assertDictEqual( - loaded_manifest, - { - "manifest_version": "1.0", - "runtimes": { - "python_runtime": { - "language": "PYTHON", - "version": meta.env.python_version, - "imports": ["model.zip"], - "dependencies": {"conda": "runtimes/python_runtime/env/conda.yml"}, - } - }, - "methods": [ - { - "name": "__CALL__", - "runtime": "python_runtime", - "type": "FUNCTION", - "handler": "functions.__call__.infer", - "inputs": [{"name": "INPUT", "type": "FLOAT"}], - "outputs": [{"type": "OBJECT"}], - } - ], - }, - ) + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("MANIFEST_2.yml") + .read_text() + ), + f.read(), + ) with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: self.assertEqual( ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_2.py_fixture") + .joinpath("function_2.py") .read_text() ), f.read(), @@ -141,9 +198,14 @@ def test_model_manifest_mix(self) -> None: name="model1", model_type="custom", signatures={"predict": _DUMMY_SIG["predict"], "__call__": _DUMMY_SIG["predict"]}, + python_version="3.8", ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=None): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): mm.save( self.m_session, meta, @@ -156,45 +218,21 @@ def test_model_manifest_mix(self) -> None: ), ) with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - loaded_manifest = yaml.safe_load(f) - self.assertDictEqual( - loaded_manifest, - { - "manifest_version": "1.0", - "runtimes": { - "python_runtime": { - "language": "PYTHON", - "version": meta.env.python_version, - "imports": ["model.zip", "runtimes/python_runtime/snowflake-ml-python.zip"], - "dependencies": {"conda": "runtimes/python_runtime/env/conda.yml"}, - } - }, - "methods": [ - { - "name": "predict", - "runtime": "python_runtime", - "type": "FUNCTION", - "handler": "functions.predict.infer", - "inputs": [{"name": "input", "type": "FLOAT"}], - "outputs": [{"type": "OBJECT"}], - }, - { - "name": "__CALL__", - "runtime": "python_runtime", - "type": "FUNCTION", - "handler": "functions.__call__.infer", - "inputs": [{"name": "INPUT", "type": "FLOAT"}], - "outputs": [{"type": "OBJECT"}], - }, - ], - }, - ) + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") # type: ignore[no-untyped-call] + .joinpath("MANIFEST_3.yml") + .read_text() + ), + f.read(), + ) with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: self.assertEqual( ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_1.py_fixture") + .joinpath("function_1.py") .read_text() ), f.read(), @@ -204,7 +242,7 @@ def test_model_manifest_mix(self) -> None: ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_2.py_fixture") + .joinpath("function_2.py") .read_text() ), f.read(), @@ -220,7 +258,11 @@ def test_model_manifest_bad(self) -> None: signatures={"predict": _DUMMY_SIG["predict"], "PREDICT": _DUMMY_SIG["predict"]}, ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): with self.assertRaisesRegex( ValueError, "Found duplicate method named resolved as PREDICT in the model." ): @@ -297,6 +339,59 @@ def test_load(self) -> None: self.assertDictEqual(raw_input, mm.load()) + def test_generate_user_data_with_client_data_1(self) -> None: + m_user_data: Dict[str, Any] = {"description": "a"} + with self.assertRaisesRegex(ValueError, "Ill-formatted client data .* in user data found."): + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data) + + m_user_data = {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: "a"} + with self.assertRaisesRegex(ValueError, "Ill-formatted client data .* in user data found."): + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data) + + m_user_data = {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: {"description": "a"}} + with self.assertRaisesRegex(ValueError, "Ill-formatted client data .* in user data found."): + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data) + + m_user_data = {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: {"schema_version": 1}} + with self.assertRaisesRegex(ValueError, "Unsupported client data schema version .* confronted."): + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data) + + m_user_data = {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: {"schema_version": "2023-12-01"}} + with self.assertRaisesRegex(ValueError, "Unsupported client data schema version .* confronted."): + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data) + + m_user_data = { + model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: { + "schema_version": model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION + } + } + self.assertDictEqual( + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data), + {"schema_version": model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION, "functions": []}, + ) + + def test_generate_user_data_with_client_data_2(self) -> None: + m_client_data = { + "schema_version": model_manifest_schema.MANIFEST_CLIENT_DATA_SCHEMA_VERSION, + "functions": [ + { + "name": '"predict"', + "target_method": "predict", + "signature": _DUMMY_SIG["predict"].to_dict(), + }, + { + "name": "__CALL__", + "target_method": "__call__", + "signature": _DUMMY_SIG["predict"].to_dict(), + }, + ], + } + m_user_data = {model_manifest_schema.MANIFEST_CLIENT_DATA_KEY_NAME: m_client_data} + self.assertDictEqual( + model_manifest.ModelManifest.parse_client_data_from_user_data(m_user_data), + m_client_data, + ) + if __name__ == "__main__": absltest.main() diff --git a/snowflake/ml/model/_model_composer/model_method/BUILD.bazel b/snowflake/ml/model/_model_composer/model_method/BUILD.bazel index 27924817..e5bcc0b2 100644 --- a/snowflake/ml/model/_model_composer/model_method/BUILD.bazel +++ b/snowflake/ml/model/_model_composer/model_method/BUILD.bazel @@ -5,8 +5,8 @@ package(default_visibility = ["//visibility:public"]) filegroup( name = "function_fixtures", srcs = [ - "fixtures/function_fixture_1.py_fixture", - "fixtures/function_fixture_2.py_fixture", + "fixtures/function_1.py", + "fixtures/function_2.py", ], ) diff --git a/snowflake/ml/model/_model_composer/model_method/fixtures/function_fixture_1.py_fixture b/snowflake/ml/model/_model_composer/model_method/fixtures/function_1.py similarity index 100% rename from snowflake/ml/model/_model_composer/model_method/fixtures/function_fixture_1.py_fixture rename to snowflake/ml/model/_model_composer/model_method/fixtures/function_1.py diff --git a/snowflake/ml/model/_model_composer/model_method/fixtures/function_fixture_2.py_fixture b/snowflake/ml/model/_model_composer/model_method/fixtures/function_2.py similarity index 100% rename from snowflake/ml/model/_model_composer/model_method/fixtures/function_fixture_2.py_fixture rename to snowflake/ml/model/_model_composer/model_method/fixtures/function_2.py diff --git a/snowflake/ml/model/_model_composer/model_method/function_generator.py b/snowflake/ml/model/_model_composer/model_method/function_generator.py index 192480fa..1cc23e22 100644 --- a/snowflake/ml/model/_model_composer/model_method/function_generator.py +++ b/snowflake/ml/model/_model_composer/model_method/function_generator.py @@ -1,7 +1,6 @@ import pathlib from typing import Optional, TypedDict -import importlib_resources from typing_extensions import NotRequired from snowflake.ml.model import type_hints @@ -33,6 +32,8 @@ def generate( target_method: str, options: Optional[FunctionGenerateOptions] = None, ) -> None: + import importlib_resources + if options is None: options = {} function_template = ( diff --git a/snowflake/ml/model/_model_composer/model_method/function_generator_test.py b/snowflake/ml/model/_model_composer/model_method/function_generator_test.py index c10b06d0..1776963b 100644 --- a/snowflake/ml/model/_model_composer/model_method/function_generator_test.py +++ b/snowflake/ml/model/_model_composer/model_method/function_generator_test.py @@ -20,7 +20,7 @@ def test_function_generator(self) -> None: ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_1.py_fixture") + .joinpath("function_1.py") .read_text() ), f.read(), @@ -35,7 +35,7 @@ def test_function_generator(self) -> None: ( importlib_resources.files("snowflake.ml.model._model_composer.model_method") .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_2.py_fixture") + .joinpath("function_2.py") .read_text() ), f.read(), diff --git a/snowflake/ml/model/_model_composer/model_method/model_method_test.py b/snowflake/ml/model/_model_composer/model_method/model_method_test.py index 0594641d..c6b6e45c 100644 --- a/snowflake/ml/model/_model_composer/model_method/model_method_test.py +++ b/snowflake/ml/model/_model_composer/model_method/model_method_test.py @@ -48,7 +48,7 @@ def test_model_method(self) -> None: ( importlib_resources.files(model_method_pkg) .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_1.py_fixture") + .joinpath("function_1.py") .read_text() ), f.read(), @@ -87,7 +87,7 @@ def test_model_method(self) -> None: ( importlib_resources.files(model_method_pkg) .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_2.py_fixture") + .joinpath("function_2.py") .read_text() ), f.read(), @@ -152,7 +152,7 @@ def test_model_method(self) -> None: ( importlib_resources.files(model_method_pkg) .joinpath("fixtures") # type: ignore[no-untyped-call] - .joinpath("function_fixture_1.py_fixture") + .joinpath("function_1.py") .read_text() ), f.read(), diff --git a/snowflake/ml/model/_model_composer/model_runtime/model_runtime.py b/snowflake/ml/model/_model_composer/model_runtime/model_runtime.py index aa90d3f3..a68a0f87 100644 --- a/snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +++ b/snowflake/ml/model/_model_composer/model_runtime/model_runtime.py @@ -44,12 +44,17 @@ def __init__( if self.runtime_env._snowpark_ml_version.local: self.embed_local_ml_library = True else: - snowml_server_availability = env_utils.validate_requirements_in_information_schema( - session=session, - reqs=[requirements.Requirement(snowml_pkg_spec)], - python_version=snowml_env.PYTHON_VERSION, + snowml_server_availability = ( + len( + env_utils.get_matched_package_versions_in_information_schema( + session=session, + reqs=[requirements.Requirement(snowml_pkg_spec)], + python_version=snowml_env.PYTHON_VERSION, + ).get(env_utils.SNOWPARK_ML_PKG_NAME, []) + ) + >= 1 ) - self.embed_local_ml_library = snowml_server_availability is None + self.embed_local_ml_library = not snowml_server_availability if self.embed_local_ml_library: self.runtime_env.include_if_absent( diff --git a/snowflake/ml/model/_model_composer/model_runtime/model_runtime_test.py b/snowflake/ml/model/_model_composer/model_runtime/model_runtime_test.py index 1a10e220..9ad8b4c3 100644 --- a/snowflake/ml/model/_model_composer/model_runtime/model_runtime_test.py +++ b/snowflake/ml/model/_model_composer/model_runtime/model_runtime_test.py @@ -56,7 +56,11 @@ def test_model_runtime(self) -> None: ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -83,7 +87,11 @@ def test_model_runtime_local_snowml(self) -> None: ) as meta: meta.models["model1"] = _DUMMY_BLOB - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=None): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -118,7 +126,11 @@ def test_model_runtime_dup_basic_dep(self) -> None: dep_target.append("pandas") dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -144,7 +156,11 @@ def test_model_runtime_dup_basic_dep_other_channel(self) -> None: dep_target.append("conda-forge::pandas") dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -169,7 +185,11 @@ def test_model_runtime_dup_basic_dep_pip(self) -> None: dep_target.remove(f"pandas=={importlib_metadata.version('pandas')}") dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -194,7 +214,11 @@ def test_model_runtime_additional_conda_dep(self) -> None: dep_target.append("pytorch") dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -218,7 +242,11 @@ def test_model_runtime_additional_pip_dep(self) -> None: dep_target = _BASIC_DEPENDENCIES_TARGET_WITH_SNOWML[:] dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) @@ -244,7 +272,11 @@ def test_model_runtime_additional_dep_both(self) -> None: dep_target.append("pytorch") dep_target.sort() - with mock.patch.object(env_utils, "validate_requirements_in_information_schema", return_value=[""]): + with mock.patch.object( + env_utils, + "get_matched_package_versions_in_information_schema", + return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, + ): mr = model_runtime.ModelRuntime( self.m_session, "python_runtime", meta, [pathlib.PurePosixPath("model.zip")] ) diff --git a/snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py b/snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py index 47459cfc..be5ca718 100644 --- a/snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +++ b/snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py @@ -59,7 +59,7 @@ def get_requirements_from_task(task: str, spcs_only: bool = False) -> List[model return ( [model_env.ModelDependency(requirement="tokenizers>=0.13.3", pip_name="tokenizers")] if spcs_only - else [model_env.ModelDependency(requirement="tokenizers<=0.13.2", pip_name="tokenizers")] + else [model_env.ModelDependency(requirement="tokenizers", pip_name="tokenizers")] ) return [] diff --git a/snowflake/ml/model/_packager/model_handlers/xgboost.py b/snowflake/ml/model/_packager/model_handlers/xgboost.py index 478cdac8..d92d3729 100644 --- a/snowflake/ml/model/_packager/model_handlers/xgboost.py +++ b/snowflake/ml/model/_packager/model_handlers/xgboost.py @@ -1,6 +1,16 @@ # mypy: disable-error-code="import" import os -from typing import TYPE_CHECKING, Callable, Dict, Optional, Type, Union, cast, final +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Optional, + Type, + Union, + cast, + final, +) import numpy as np import pandas as pd @@ -150,6 +160,7 @@ def load_model( m.load_model(os.path.join(model_blob_path, model_blob_filename)) if kwargs.get("use_gpu", False): + assert type(kwargs.get("use_gpu", False)) == bool gpu_params = {"tree_method": "gpu_hist", "predictor": "gpu_predictor"} if isinstance(m, xgboost.Booster): m.set_param(gpu_params) @@ -197,7 +208,7 @@ def fn(self: custom_model.CustomModel, X: pd.DataFrame) -> pd.DataFrame: return fn - type_method_dict = {} + type_method_dict: Dict[str, Any] = {"_raw_model": raw_model} for target_method_name, sig in model_meta.signatures.items(): type_method_dict[target_method_name] = fn_factory(raw_model, sig, target_method_name) diff --git a/snowflake/ml/model/_signatures/core.py b/snowflake/ml/model/_signatures/core.py index 1df982fc..a245b181 100644 --- a/snowflake/ml/model/_signatures/core.py +++ b/snowflake/ml/model/_signatures/core.py @@ -146,7 +146,8 @@ def from_snowpark_type(cls, snowpark_type: spt.DataType) -> "DataType": " is being automatically converted to INT64 in the Snowpark DataFrame. " "This automatic conversion may lead to potential precision loss and rounding errors. " "If you wish to prevent this conversion, you should manually perform " - "the necessary data type conversion." + "the necessary data type conversion.", + stacklevel=2, ) return DataType.INT64 else: @@ -155,7 +156,8 @@ def from_snowpark_type(cls, snowpark_type: spt.DataType) -> "DataType": " is being automatically converted to DOUBLE in the Snowpark DataFrame. " "This automatic conversion may lead to potential precision loss and rounding errors. " "If you wish to prevent this conversion, you should manually perform " - "the necessary data type conversion." + "the necessary data type conversion.", + stacklevel=2, ) return DataType.DOUBLE raise snowml_exceptions.SnowflakeMLException( @@ -202,23 +204,24 @@ def __init__( dtype: DataType, shape: Optional[Tuple[int, ...]] = None, ) -> None: - """Initialize a feature. + """ + Initialize a feature. Args: name: Name of the feature. dtype: Type of the elements in the feature. - shape: Used to represent scalar feature, 1-d feature list or n-d tensor. - -1 is used to represent variable length.Defaults to None. + shape: Used to represent scalar feature, 1-d feature list, + or n-d tensor. Use -1 to represent variable length. Defaults to None. - E.g. - None: scalar - (2,): 1d list with fixed len of 2. - (-1,): 1d list with variable length. Used for ragged tensor representation. - (d1, d2, d3): 3d tensor. + Examples: + - None: scalar + - (2,): 1d list with a fixed length of 2. + - (-1,): 1d list with variable length, used for ragged tensor representation. + - (d1, d2, d3): 3d tensor. Raises: - SnowflakeMLException: TypeError: Raised when the dtype input type is incorrect. - SnowflakeMLException: TypeError: Raised when the shape input type is incorrect. + SnowflakeMLException: TypeError: When the dtype input type is incorrect. + SnowflakeMLException: TypeError: When the shape input type is incorrect. """ super().__init__(name=name) @@ -408,13 +411,13 @@ class ModelSignature: """Signature of a model that specifies the input and output of a model.""" def __init__(self, inputs: Sequence[BaseFeatureSpec], outputs: Sequence[BaseFeatureSpec]) -> None: - """Initialize a model signature + """Initialize a model signature. Args: - inputs: A sequence of feature specifications and feature group specifications that will compose the - input of the model. - outputs: A sequence of feature specifications and feature group specifications that will compose the - output of the model. + inputs: A sequence of feature specifications and feature group specifications that will compose + the input of the model. + outputs: A sequence of feature specifications and feature group specifications that will compose + the output of the model. """ self._inputs = inputs self._outputs = outputs diff --git a/snowflake/ml/model/custom_model.py b/snowflake/ml/model/custom_model.py index d1f576ce..f6d37e74 100644 --- a/snowflake/ml/model/custom_model.py +++ b/snowflake/ml/model/custom_model.py @@ -9,15 +9,16 @@ class MethodRef: - """Represents an method invocation of an instance of `ModelRef`. + """Represents a method invocation of an instance of `ModelRef`. + + This allows us to: + 1) Customize the place of actual execution of the method (inline, thread/process pool, or remote). + 2) Enrich the way of execution (sync versus async). - This allows us to - 1) Customize the place of actual execution of the method(inline, thread/process pool or remote). - 2) Enrich the way of execution(sync versus async). Example: - If you have a SKL model, you would normally invoke by `skl_ref.predict(df)` which has sync API. - Within inference graph, you could invoke `await skl_ref.predict.async_run(df)` which automatically - will be run on thread with async interface. + If you have an SKL model, you would normally invoke it by `skl_ref.predict(df)`, which has a synchronous API. + Within the inference graph, you could invoke `await skl_ref.predict.async_run(df)`, which will automatically + run on a thread with an asynchronous interface. """ def __init__(self, model_ref: "ModelRef", method_name: str) -> None: @@ -27,11 +28,11 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: return self._func(*args, **kwargs) async def async_run(self, *args: Any, **kwargs: Any) -> Any: - """Run the method in a async way. If the method is defined as async, this will simply run it. If not, this will - be run in a separate thread. + """Run the method in an asynchronous way. If the method is defined as async, this will simply run it. + If not, this will be run in a separate thread. Args: - *args: Arguments of the original method, + *args: Arguments of the original method. **kwargs: Keyword arguments of the original method. Returns: @@ -43,19 +44,20 @@ async def async_run(self, *args: Any, **kwargs: Any) -> Any: class ModelRef: - """Represents an model in the inference graph. Method could be directly called using this reference object as if - with the original model object. + """ + Represents a model in the inference graph. Methods can be directly called using this reference object + as if with the original model object. - This enables us to separate physical and logical representation of a model which - will allows us to deeply understand the graph and perform optimization at entire - graph level. + This enables us to separate the physical and logical representation of a model, allowing for a deep understanding + of the graph and enabling optimization at the entire graph level. """ def __init__(self, name: str, model: model_types.SupportedModelType) -> None: - """Initialize the ModelRef. + """ + Initialize the ModelRef. Args: - name: The name of a model to refer it. + name: The name of the model to refer to. model: The model object. """ self._model = model @@ -91,11 +93,12 @@ def __setstate__(self, state: Any) -> None: class ModelContext: - """Context for a custom model showing path to artifacts and mapping between model name and object reference. + """ + Context for a custom model showing paths to artifacts and mapping between model name and object reference. Attributes: - artifacts: A dict mapping name of the artifact to its path. - model_refs: A dict mapping name of the sub-model to its ModelRef object. + artifacts: A dictionary mapping the name of the artifact to its path. + model_refs: A dictionary mapping the name of the sub-model to its ModelRef object. """ def __init__( @@ -104,11 +107,11 @@ def __init__( artifacts: Optional[Dict[str, str]] = None, models: Optional[Dict[str, model_types.SupportedModelType]] = None, ) -> None: - """Initialize the model context + """Initialize the model context. Args: - artifacts: A dict mapping name of the artifact to its currently available path. Defaults to None. - models: A dict mapping name of the sub-model to the corresponding model object. Defaults to None. + artifacts: A dictionary mapping the name of the artifact to its currently available path. Defaults to None. + models: A dictionary mapping the name of the sub-model to the corresponding model object. Defaults to None. """ self.artifacts: Dict[str, str] = artifacts if artifacts else dict() self.model_refs: Dict[str, ModelRef] = ( @@ -116,7 +119,8 @@ def __init__( ) def path(self, key: str) -> str: - """Get the actual path to a specific artifact. + """Get the actual path to a specific artifact. This could be used when defining a Custom Model to retrieve + artifacts. Args: key: The name of the artifact. @@ -127,14 +131,13 @@ def path(self, key: str) -> str: return self.artifacts[key] def model_ref(self, name: str) -> ModelRef: - """Get a ModelRef object of a sub-model containing the name and model object, while able to call its method - directly as well. + """Get a ModelRef object of a sub-model containing the name and model object, allowing direct method calls. Args: name: The name of the sub-model. Returns: - The ModelRef object to the sub-model. + The ModelRef object representing the sub-model. """ return self.model_refs[name] diff --git a/snowflake/ml/model/model_signature.py b/snowflake/ml/model/model_signature.py index 7abfa9f3..4309ecce 100644 --- a/snowflake/ml/model/model_signature.py +++ b/snowflake/ml/model/model_signature.py @@ -570,32 +570,31 @@ def infer_signature( input_feature_names: Optional[List[str]] = None, output_feature_names: Optional[List[str]] = None, ) -> core.ModelSignature: - """Infer model signature from given input and output sample data. + """ + Infer model signature from given input and output sample data. + + Currently supports inferring model signatures from the following data types: - Currently, we support infer the model signature from example input/output data in the following cases: - - Pandas data frame whose column could have types of supported data types, - list (including list of supported data types, list of numpy array of supported data types, and nested list), - and numpy array of supported data types. + - Pandas DataFrame with columns of supported data types, lists (including nested lists) of supported data types, + and NumPy arrays of supported data types. - Does not support DataFrame with CategoricalIndex column index. - - Does not support DataFrame with column of variant length list or numpy array. - - Numpy array of supported data types. - - List of Numpy array of supported data types. - - List of supported data types, or nested list of supported data types. - - Does not support list of list of variant length list. + - NumPy arrays of supported data types. + - Lists of NumPy arrays of supported data types. + - Lists of supported data types or nested lists of supported data types. + + When inferring the signature, a ValueError indicates that the data is insufficient or invalid. - When a ValueError is raised when inferring the signature, it indicates that the data is ill and it is impossible to - create a signature reflecting that. - When a NotImplementedError is raised, it indicates that it might be possible to create a signature reflecting the - provided data, however, we could not infer it. + When it might be possible to create a signature reflecting the provided data, but it could not be inferred, + a NotImplementedError is raised Args: input_data: Sample input data for the model. output_data: Sample output data for the model. - input_feature_names: Name for input features. Defaults to None. - output_feature_names: Name for output features. Defaults to None. + input_feature_names: Names for input features. Defaults to None. + output_feature_names: Names for output features. Defaults to None. Returns: - A model signature. + A model signature inferred from the given input and output sample data. """ inputs = _infer_signature(input_data, role="input") inputs = utils.rename_features(inputs, input_feature_names) diff --git a/snowflake/ml/model/package_visibility_test.py b/snowflake/ml/model/package_visibility_test.py new file mode 100644 index 00000000..fe74e208 --- /dev/null +++ b/snowflake/ml/model/package_visibility_test.py @@ -0,0 +1,34 @@ +from types import ModuleType + +from absl.testing import absltest + +from snowflake.ml import model +from snowflake.ml.model import ( + _api, + custom_model, + deploy_platforms, + model_signature, + type_hints, +) + + +class PackageVisibilityTest(absltest.TestCase): + """Ensure that the functions in this package are visible externally.""" + + def test_class_visible(self) -> None: + self.assertIsInstance(model.Model, type) + self.assertIsInstance(model.ModelVersion, type) + self.assertIsInstance(model.HuggingFacePipelineModel, type) + self.assertIsInstance(model.LLM, type) + self.assertIsInstance(model.LLMOptions, type) + + def test_module_visible(self) -> None: + self.assertIsInstance(_api, ModuleType) + self.assertIsInstance(custom_model, ModuleType) + self.assertIsInstance(model_signature, ModuleType) + self.assertIsInstance(deploy_platforms, ModuleType) + self.assertIsInstance(type_hints, ModuleType) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/modeling/_internal/BUILD.bazel b/snowflake/ml/modeling/_internal/BUILD.bazel index c4d34c58..1ab6e5c2 100644 --- a/snowflake/ml/modeling/_internal/BUILD.bazel +++ b/snowflake/ml/modeling/_internal/BUILD.bazel @@ -51,6 +51,7 @@ py_library( name = "model_specifications", srcs = ["model_specifications.py"], deps = [ + ":estimator_utils", "//snowflake/ml/_internal/exceptions", ], ) @@ -59,7 +60,9 @@ py_test( name = "model_specifications_test", srcs = ["model_specifications_test.py"], deps = [ + ":distributed_hpo_trainer", ":model_specifications", + "//snowflake/ml/utils:connection_params", ], ) @@ -88,6 +91,7 @@ py_library( "//snowflake/ml/_internal/exceptions", "//snowflake/ml/_internal/exceptions:modeling_error_messages", "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:pkg_version_utils", "//snowflake/ml/_internal/utils:query_result_checker", "//snowflake/ml/_internal/utils:snowpark_dataframe_utils", "//snowflake/ml/_internal/utils:temp_file_utils", @@ -105,6 +109,23 @@ py_library( "//snowflake/ml/_internal/exceptions", "//snowflake/ml/_internal/exceptions:modeling_error_messages", "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:pkg_version_utils", + "//snowflake/ml/_internal/utils:snowpark_dataframe_utils", + "//snowflake/ml/_internal/utils:temp_file_utils", + ], +) + +py_library( + name = "xgboost_external_memory_trainer", + srcs = ["xgboost_external_memory_trainer.py"], + deps = [ + ":model_specifications", + ":snowpark_trainer", + "//snowflake/ml/_internal:telemetry", + "//snowflake/ml/_internal/exceptions", + "//snowflake/ml/_internal/exceptions:modeling_error_messages", + "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:pkg_version_utils", "//snowflake/ml/_internal/utils:snowpark_dataframe_utils", "//snowflake/ml/_internal/utils:temp_file_utils", ], @@ -115,8 +136,33 @@ py_library( srcs = ["model_trainer_builder.py"], deps = [ ":distributed_hpo_trainer", + ":estimator_utils", ":model_trainer", ":pandas_trainer", ":snowpark_trainer", + ":xgboost_external_memory_trainer", + ], +) + +py_test( + name = "model_trainer_builder_test", + srcs = ["model_trainer_builder_test.py"], + deps = [ + ":distributed_hpo_trainer", + ":model_trainer", + ":model_trainer_builder", + ":pandas_trainer", + ":snowpark_trainer", + ":xgboost_external_memory_trainer", + "//snowflake/ml/utils:connection_params", + ], +) + +py_test( + name = "xgboost_external_memory_trainer_test", + srcs = ["xgboost_external_memory_trainer_test.py"], + deps = [ + ":xgboost_external_memory_trainer", + "//snowflake/ml/_internal/utils:temp_file_utils", ], ) diff --git a/snowflake/ml/modeling/_internal/distributed_hpo_trainer.py b/snowflake/ml/modeling/_internal/distributed_hpo_trainer.py index 9dc57ee2..8928f61b 100644 --- a/snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +++ b/snowflake/ml/modeling/_internal/distributed_hpo_trainer.py @@ -4,15 +4,18 @@ import os import posixpath import sys -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union import cloudpickle as cp import numpy as np -from scipy.stats import rankdata from sklearn import model_selection from snowflake.ml._internal import telemetry -from snowflake.ml._internal.utils import identifier, snowpark_dataframe_utils +from snowflake.ml._internal.utils import ( + identifier, + pkg_version_utils, + snowpark_dataframe_utils, +) from snowflake.ml._internal.utils.temp_file_utils import ( cleanup_temp_files, get_temp_file_path, @@ -26,7 +29,8 @@ TempObjectType, random_name_for_temp_object, ) -from snowflake.snowpark.functions import col, sproc, udtf +from snowflake.snowpark.functions import sproc, udtf +from snowflake.snowpark.row import Row from snowflake.snowpark.types import IntegerType, StringType, StructField, StructType cp.register_pickle_by_value(inspect.getmodule(get_temp_file_path)) @@ -36,6 +40,153 @@ DEFAULT_UDTF_NJOBS = 3 +def construct_cv_results( + cv_results_raw_hex: List[Row], + cross_validator_indices_length: int, + parameter_grid_length: int, + search_cv_kwargs: Dict[str, Any], +) -> Tuple[bool, Dict[str, Any], int, Set[str]]: + """Construct the cross validation result from the UDF. Because we accelerate the process + by the number of cross validation number, and the combination of parameter grids. + Therefore, we need to stick them back together instead of returning the raw result + to align with original sklearn result. + + Args: + cv_results_raw_hex (List[Row]): the list of cv_results from each cv and parameter grid combination. + Because UDxF can only return string, and numpy array/masked arrays cannot be encoded in a + json format. Each cv_result is encoded into hex string. + cross_validator_indices_length (int): the length of cross validator indices + parameter_grid_length (int): the length of parameter grid combination + search_cv_kwargs (Dict[str, Any]): the kwargs for GridSearchCV/RandomSearchCV. + + Raises: + ValueError: Retrieved empty cross validation results + ValueError: Cross validator index length is 0 + ValueError: Parameter index length is 0 + ValueError: Retrieved incorrect dataframe dimension from Snowpark's UDTF. + RuntimeError: Cross validation results are unexpectedly empty for one fold. + + Returns: + Tuple[bool, Dict[str, Any], int, Set[str]]: returns multimetric, cv_results_, best_param_index, scorers + """ + # Filter corner cases: either the snowpark dataframe result is empty; or index length is empty + if len(cv_results_raw_hex) == 0: + raise ValueError( + "Retrieved empty cross validation results from snowpark. Please retry or contact snowflake support." + ) + if cross_validator_indices_length == 0: + raise ValueError("Cross validator index length is 0. Was the CV iterator empty? ") + if parameter_grid_length == 0: + raise ValueError("Parameter index length is 0. Were there no candidates?") + + from scipy.stats import rankdata + + # cv_result maintains the original order + multimetric = False + cv_results_ = dict() + scorers = set() + # retrieve the cv_results from udtf table; results are encoded by hex and cloudpickle; + # We are constructing the raw information back to original form + if len(cv_results_raw_hex) != cross_validator_indices_length * parameter_grid_length: + raise ValueError( + "Retrieved incorrect dataframe dimension from Snowpark's UDTF." + f"Expected {cross_validator_indices_length * parameter_grid_length}, got {len(cv_results_raw_hex)}. " + "Please retry or contact snowflake support." + ) + + for param_cv_indices, each_cv_result_hex in enumerate(cv_results_raw_hex): + # convert the hex string back to cv_results_ + hex_str = bytes.fromhex(each_cv_result_hex[0]) + with io.BytesIO(hex_str) as f_reload: + each_cv_result = cp.load(f_reload) + if not each_cv_result: + raise RuntimeError( + "Cross validation response is empty. This issue may be temporary - please try again." + ) + for k, v in each_cv_result.items(): + cur_cv_idx = param_cv_indices % cross_validator_indices_length + key = k + if "split0_test_" in k: + # For multi-metric evaluation, the scores for all the scorers are available in the + # cv_results_ dict at the keys ending with that scorer’s name ('_') + # instead of '_score'. + scorers.add(k[len("split0_test_") :]) + key = k.replace("split0_test", f"split{cur_cv_idx}_test") + if search_cv_kwargs.get("return_train_score", None) and "split0_train_" in k: + key = k.replace("split0_train", f"split{cur_cv_idx}_train") + elif k.startswith("param"): + if cur_cv_idx != 0: + continue + if key: + if key not in cv_results_: + cv_results_[key] = v + else: + cv_results_[key] = np.concatenate([cv_results_[key], v]) + + multimetric = len(scorers) > 1 + # Use numpy to re-calculate all the information in cv_results_ again + # Generally speaking, reshape all the results into the (scorers+2, idx_length, params_length) shape, + # and average them by the idx_length; + # idx_length is the number of cv folds; params_length is the number of parameter combinations + scores_test = [ + np.reshape( + np.concatenate( + [cv_results_[f"split{cur_cv}_test_{score}"] for cur_cv in range(cross_validator_indices_length)] + ), + (cross_validator_indices_length, -1), + ) + for score in scorers + ] + + fit_score_test_matrix = np.stack( + [ + np.reshape(cv_results_["mean_fit_time"], (cross_validator_indices_length, -1)), + np.reshape(cv_results_["mean_score_time"], (cross_validator_indices_length, -1)), + ] + + scores_test + ) + mean_fit_score_test_matrix = np.mean(fit_score_test_matrix, axis=1) + std_fit_score_test_matrix = np.std(fit_score_test_matrix, axis=1) + + if search_cv_kwargs.get("return_train_score", None): + scores_train = [ + np.reshape( + np.concatenate( + [cv_results_[f"split{cur_cv}_train_{score}"] for cur_cv in range(cross_validator_indices_length)] + ), + (cross_validator_indices_length, -1), + ) + for score in scorers + ] + mean_fit_score_train_matrix = np.mean(scores_train, axis=1) + std_fit_score_train_matrix = np.std(scores_train, axis=1) + + cv_results_["std_fit_time"] = std_fit_score_test_matrix[0] + cv_results_["mean_fit_time"] = mean_fit_score_test_matrix[0] + cv_results_["std_score_time"] = std_fit_score_test_matrix[1] + cv_results_["mean_score_time"] = mean_fit_score_test_matrix[1] + for idx, score in enumerate(scorers): + cv_results_[f"std_test_{score}"] = std_fit_score_test_matrix[idx + 2] + cv_results_[f"mean_test_{score}"] = mean_fit_score_test_matrix[idx + 2] + if search_cv_kwargs.get("return_train_score", None): + cv_results_[f"std_train_{score}"] = std_fit_score_train_matrix[idx] + cv_results_[f"mean_train_{score}"] = mean_fit_score_train_matrix[idx] + # re-compute the ranking again with mean_test_. + cv_results_[f"rank_test_{score}"] = rankdata(-cv_results_[f"mean_test_{score}"], method="min") + # The best param is the highest ranking (which is 1) and we choose the first time ranking 1 appeared. + # If all scores are `nan`, `rankdata` will also produce an array of `nan` values. + # In that case, default to first index. + best_param_index = ( + np.where(cv_results_[f"rank_test_{score}"] == 1)[0][0] + if not np.isnan(cv_results_[f"rank_test_{score}"]).all() + else 0 + ) + return multimetric, cv_results_, best_param_index, scorers + + +cp.register_pickle_by_value(inspect.getmodule(construct_cv_results)) + + class DistributedHPOTrainer(SnowparkModelTrainer): """ A class for performing distributed hyperparameter optimization (HPO) using Snowpark. @@ -105,7 +256,7 @@ def fit_search_snowpark( temp_stage_creation_query = f"CREATE OR REPLACE TEMP STAGE {temp_stage_name};" session.sql(temp_stage_creation_query).collect() - # Stage data. + # Stage data as parquet file dataset = snowpark_dataframe_utils.cast_snowpark_dataframe(dataset) remote_file_path = f"{temp_stage_name}/{temp_stage_name}.parquet" dataset.write.copy_into_location( # type:ignore[call-overload] @@ -114,6 +265,7 @@ def fit_search_snowpark( imports = [f"@{row.name}" for row in session.sql(f"LIST @{temp_stage_name}").collect()] # Store GridSearchCV's refit variable. If user set it as False, we don't need to refit it again + # refit variable can be boolean, string or callable original_refit = estimator.refit # Create a temp file and dump the estimator to that file. @@ -208,7 +360,7 @@ def _distributed_search( for file_name in data_files ] df = pd.concat(partial_df, ignore_index=True) - df.columns = [identifier.get_inferred_name(col) for col in df.columns] + df.columns = [identifier.get_inferred_name(col_) for col_ in df.columns] X = df[input_cols] y = df[label_cols].squeeze() if label_cols else None @@ -222,11 +374,12 @@ def _distributed_search( with open(local_estimator_file_path, mode="r+b") as local_estimator_file_obj: estimator = cp.load(local_estimator_file_obj)["estimator"] - cv_orig = check_cv(estimator.cv, y, classifier=is_classifier(estimator.estimator)) - indices = [test for _, test in cv_orig.split(X, y)] + build_cross_validator = check_cv(estimator.cv, y, classifier=is_classifier(estimator.estimator)) + # store the cross_validator's test indices only to save space + cross_validator_indices = [test for _, test in build_cross_validator.split(X, y)] local_indices_file_name = get_temp_file_path() with open(local_indices_file_name, mode="w+b") as local_indices_file_obj: - cp.dump(indices, local_indices_file_obj) + cp.dump(cross_validator_indices, local_indices_file_obj) # Put locally serialized indices on stage. put_result = session.file.put( @@ -237,7 +390,8 @@ def _distributed_search( ) indices_location = put_result[0].target imports.append(f"@{temp_stage_name}/{indices_location}") - indices_len = len(indices) + cross_validator_indices_length = int(len(cross_validator_indices)) + parameter_grid_length = len(param_grid) assert estimator is not None @@ -261,7 +415,7 @@ def _load_data_into_udf() -> Tuple[ for file_name in data_files ] df = pd.concat(partial_df, ignore_index=True) - df.columns = [identifier.get_inferred_name(col) for col in df.columns] + df.columns = [identifier.get_inferred_name(col_) for col_ in df.columns] # load estimator local_estimator_file_path = os.path.join( @@ -299,16 +453,30 @@ def __init__(self) -> None: self.data_length = data_length self.params_to_evaluate = params_to_evaluate - def process(self, params_idx: int, idx: int) -> Iterator[Tuple[str]]: + def process(self, params_idx: int, cv_idx: int) -> Iterator[Tuple[str]]: + # Assign parameter to GridSearchCV if hasattr(estimator, "param_grid"): self.estimator.param_grid = self.params_to_evaluate[params_idx] + # Assign parameter to RandomizedSearchCV else: self.estimator.param_distributions = self.params_to_evaluate[params_idx] + # cross validator's indices: we stored test indices only (to save space); + # use the full indices to re-construct the train indices back. full_indices = np.array([i for i in range(self.data_length)]) - test_indice = self.indices[idx] + test_indice = self.indices[cv_idx] train_indice = np.setdiff1d(full_indices, test_indice) + # assign the tuple of train and test indices to estimator's original cross validator self.estimator.cv = [(train_indice, test_indice)] self.estimator.fit(**self.args) + # If the cv_results_ is empty, then the udtf table will have different number of output rows + # from the input rows. Raise ValueError. + if not self.estimator.cv_results_: + raise RuntimeError( + """Cross validation results are unexpectedly empty for one fold. + This issue may be temporary - please try again.""" + ) + # Encode the dictionary of cv_results_ as binary (in hex format) to send it back + # because udtf doesn't allow numpy within json file binary_cv_results = None with io.BytesIO() as f: cp.dump(self.estimator.cv_results_, f) @@ -333,96 +501,44 @@ def end_partition(self) -> None: HP_TUNING = F.table_function(random_udtf_name) - idx_length = int(indices_len) - params_length = len(param_grid) - idxs = [i for i in range(idx_length)] - param_indices, training_indices = [], [] - for param_idx, cv_idx in product([param_index for param_index in range(params_length)], idxs): + # param_indices is for the index for each parameter grid; + # cv_indices is for the index for each cross_validator's fold; + # param_cv_indices is for the index for the product of (len(param_indices) * len(cv_indices)) + param_indices, cv_indices = [], [] + for param_idx, cv_idx in product( + [param_index for param_index in range(parameter_grid_length)], + [cv_index for cv_index in range(cross_validator_indices_length)], + ): param_indices.append(param_idx) - training_indices.append(cv_idx) + cv_indices.append(cv_idx) - pd_df = pd.DataFrame( + indices_info_pandas = pd.DataFrame( { - "PARAMS": param_indices, - "TRAIN_IND": training_indices, - "PARAM_INDEX": [i for i in range(idx_length * params_length)], + "PARAM_IND": param_indices, + "CV_IND": cv_indices, + "PARAM_CV_IND": [i for i in range(cross_validator_indices_length * parameter_grid_length)], } ) - df = session.create_dataframe(pd_df) - results = df.select( - F.cast(df["PARAM_INDEX"], IntegerType()).as_("PARAM_INDEX"), - (HP_TUNING(df["PARAMS"], df["TRAIN_IND"]).over(partition_by=df["PARAM_INDEX"])), + indices_info_sp = session.create_dataframe(indices_info_pandas) + # execute udtf by querying HP_TUNING table + HP_raw_results = indices_info_sp.select( + F.cast(indices_info_sp["PARAM_CV_IND"], IntegerType()).as_("PARAM_CV_IND"), + ( + HP_TUNING(indices_info_sp["PARAM_IND"], indices_info_sp["CV_IND"]).over( + partition_by=indices_info_sp["PARAM_CV_IND"] + ) + ), ) - # cv_result maintains the original order - multimetric = False - cv_results_ = dict() - scorers = set() - for i, val in enumerate(results.select("CV_RESULTS").sort(col("PARAM_INDEX")).collect()): - # retrieved string had one more double quote in the front and end of the string. - # use [1:-1] to remove the extra double quotes - hex_str = bytes.fromhex(val[0]) - with io.BytesIO(hex_str) as f_reload: - each_cv_result = cp.load(f_reload) - for k, v in each_cv_result.items(): - cur_cv = i % idx_length - key = k - if "split0_test_" in k: - # For multi-metric evaluation, the scores for all the scorers are available in the - # cv_results_ dict at the keys ending with that scorer’s name ('_') - # instead of '_score'. - scorers.add(k[len("split0_test_") :]) - key = k.replace("split0_test", f"split{cur_cv}_test") - elif k.startswith("param"): - if cur_cv != 0: - key = False - if key: - if key not in cv_results_: - cv_results_[key] = v - else: - cv_results_[key] = np.concatenate([cv_results_[key], v]) - - multimetric = len(scorers) > 1 - # Use numpy to re-calculate all the information in cv_results_ again - # Generally speaking, reshape all the results into the (scorers+2, idx_length, params_length) shape, - # and average them by the idx_length; - # idx_length is the number of cv folds; params_length is the number of parameter combinations - scores = [ - np.reshape( - np.concatenate([cv_results_[f"split{cur_cv}_test_{score}"] for cur_cv in range(idx_length)]), - (idx_length, -1), - ) - for score in scorers - ] - - fit_score_test_matrix = np.stack( - [ - np.reshape(cv_results_["mean_fit_time"], (idx_length, -1)), - np.reshape(cv_results_["mean_score_time"], (idx_length, -1)), - ] - + scores + multimetric, cv_results_, best_param_index, scorers = construct_cv_results( + HP_raw_results.select("CV_RESULTS").sort(F.col("PARAM_CV_IND")).collect(), + cross_validator_indices_length, + parameter_grid_length, + { + "return_train_score": estimator.return_train_score, + }, # TODO(xjiang): support more kwargs in here ) - mean_fit_score_test_matrix = np.mean(fit_score_test_matrix, axis=1) - std_fit_score_test_matrix = np.std(fit_score_test_matrix, axis=1) - cv_results_["std_fit_time"] = std_fit_score_test_matrix[0] - cv_results_["mean_fit_time"] = mean_fit_score_test_matrix[0] - cv_results_["std_score_time"] = std_fit_score_test_matrix[1] - cv_results_["mean_score_time"] = mean_fit_score_test_matrix[1] - for idx, score in enumerate(scorers): - cv_results_[f"std_test_{score}"] = std_fit_score_test_matrix[idx + 2] - cv_results_[f"mean_test_{score}"] = mean_fit_score_test_matrix[idx + 2] - # re-compute the ranking again with mean_test_. - cv_results_[f"rank_test_{score}"] = rankdata(-cv_results_[f"mean_test_{score}"], method="min") - # The best param is the highest ranking (which is 1) and we choose the first time ranking 1 appeared. - # If all scores are `nan`, `rankdata` will also produce an array of `nan` values. - # In that case, default to first index. - best_param_index = ( - np.where(cv_results_[f"rank_test_{score}"] == 1)[0][0] - if not np.isnan(cv_results_[f"rank_test_{score}"]).all() - else 0 - ) - estimator.cv_results_ = cv_results_ estimator.multimetric_ = multimetric @@ -541,12 +657,15 @@ def train(self) -> object: n_iter=self.estimator.n_iter, random_state=self.estimator.random_state, ) + relaxed_dependencies = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel( + pkg_versions=model_spec.pkgDependencies, session=self.session + ) return self.fit_search_snowpark( param_grid=param_grid, dataset=self.dataset, session=self.session, estimator=self.estimator, - dependencies=model_spec.pkgDependencies, + dependencies=relaxed_dependencies, udf_imports=["sklearn"], input_cols=self.input_cols, label_cols=self.label_cols, diff --git a/snowflake/ml/modeling/_internal/estimator_utils.py b/snowflake/ml/modeling/_internal/estimator_utils.py index e08306d5..74edf8a2 100644 --- a/snowflake/ml/modeling/_internal/estimator_utils.py +++ b/snowflake/ml/modeling/_internal/estimator_utils.py @@ -132,3 +132,24 @@ def is_single_node(session: Session) -> bool: # If current session cannot retrieve the warehouse name back, # Default as True; Let HPO fall back to stored procedure implementation return True + + +def get_module_name(model: object) -> str: + """Returns the source module of the given object. + + Args: + model: Object to inspect. + + Returns: + Source module of the given object. + + Raises: + SnowflakeMLException: If the source module of the given object is not found. + """ + module = inspect.getmodule(model) + if module is None: + raise exceptions.SnowflakeMLException( + error_code=error_codes.INVALID_TYPE, + original_exception=ValueError(f"Unable to infer the source module of the given object {model}."), + ) + return module.__name__ diff --git a/snowflake/ml/modeling/_internal/model_specifications.py b/snowflake/ml/modeling/_internal/model_specifications.py index e6f375c5..d9d1cebd 100644 --- a/snowflake/ml/modeling/_internal/model_specifications.py +++ b/snowflake/ml/modeling/_internal/model_specifications.py @@ -1,10 +1,9 @@ -import inspect from typing import List import cloudpickle as cp import numpy as np -from snowflake.ml._internal.exceptions import error_codes, exceptions +from snowflake.ml.modeling._internal.estimator_utils import get_module_name class ModelSpecifications: @@ -120,16 +119,10 @@ def build(cls, model: object) -> ModelSpecifications: Appropriate ModelSpecification object Raises: - SnowflakeMLException: Raises an exception the module of given model can't be determined. TypeError: Raises the exception for unsupported modules. """ - module = inspect.getmodule(model) - if module is None: - raise exceptions.SnowflakeMLException( - error_code=error_codes.INVALID_TYPE, - original_exception=ValueError("Unable to infer model type of the given native model object."), - ) - root_module_name = module.__name__.split(".")[0] + module_name = get_module_name(model=model) + root_module_name = module_name.split(".")[0] if root_module_name == "sklearn": from sklearn.model_selection import GridSearchCV, RandomizedSearchCV diff --git a/snowflake/ml/modeling/_internal/model_specifications_test.py b/snowflake/ml/modeling/_internal/model_specifications_test.py index 26671eb2..a7ec2a26 100644 --- a/snowflake/ml/modeling/_internal/model_specifications_test.py +++ b/snowflake/ml/modeling/_internal/model_specifications_test.py @@ -1,18 +1,214 @@ -from typing import Any +import io +from typing import Any, Dict from unittest import mock +import cloudpickle as cp +import numpy as np from absl.testing import absltest, parameterized from lightgbm import LGBMRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import GridSearchCV from xgboost import XGBRegressor +from snowflake.ml.modeling._internal.distributed_hpo_trainer import construct_cv_results from snowflake.ml.modeling._internal.model_specifications import ( ModelSpecificationsBuilder, ) +from snowflake.snowpark import Row + +each_cv_result_basic_sample = [ + { + "mean_fit_time": np.array([0.00315547]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00176454]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[2], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 2}], + "split0_test_score": np.array([-13.61564833]), + "mean_test_score": np.array([-13.61564833]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00257707]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00151849]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[2], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 2}], + "split0_test_score": np.array([-8.57012999]), + "mean_test_score": np.array([-8.57012999]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00270677]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00146675]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[1], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 1}], + "split0_test_score": np.array([-12.50893109]), + "mean_test_score": np.array([-12.50893109]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00293922]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00342846]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[1], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 1}], + "split0_test_score": np.array([-21.4394793]), + "mean_test_score": np.array([-21.4394793]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00297642]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00161123]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[1], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 1}], + "split0_test_score": np.array([-9.62685757]), + "mean_test_score": np.array([-9.62685757]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00596809]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00264239]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[2], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 2}], + "split0_test_score": np.array([-29.95119419]), + "mean_test_score": np.array([-29.95119419]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, +] + +each_cv_result_return_train = [ + { + "mean_fit_time": np.array([0.00315547]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00176454]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[2], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 2}], + "split0_train_score": np.array([-13.61564833]), + "split0_test_score": np.array([-13.61564833]), + "mean_train_score": np.array([-13.61564833]), + "std_train_score": np.array([0.0]), + "mean_test_score": np.array([-13.61564833]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, + { + "mean_fit_time": np.array([0.00257707]), + "std_fit_time": np.array([0.0]), + "mean_score_time": np.array([0.00151849]), + "std_score_time": np.array([0.0]), + "param_n_components": np.ma.array( + data=[2], mask=[False], fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": [{"n_components": 2}], + "split0_train_score": np.array([-8.57012999]), + "split0_test_score": np.array([-8.57012999]), + "mean_train_score": np.array([-8.57012999]), + "std_train_score": np.array([0.0]), + "mean_test_score": np.array([-8.57012999]), + "std_test_score": np.array([0.0]), + "rank_test_score": np.array([1], dtype=np.int32), + }, +] + +SAMPLES: Dict[str, Dict[str, Any]] = { + "basic": { + "each_cv_result": each_cv_result_basic_sample, + "IDX_LENGTH": 3, + "PARAM_LENGTH": 2, + "CV_RESULT_": { + "mean_fit_time": np.array([0.00770839, 0.00551335]), + "std_fit_time": np.array([0.00061078, 0.00179875]), + "mean_score_time": np.array([0.00173187, 0.00182652]), + "std_score_time": np.array([0.00016869, 0.00014979]), + "param_n_components": np.ma.masked_array( + data=[1, 2], mask=False, fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": np.array([{"n_components": 1}, {"n_components": 2}], dtype=object), + "split0_test_score": np.array([-21.4394793, -29.95119419]), + "mean_test_score": np.array([-14.52508932, -17.37899084]), + "std_test_score": np.array([5.02879565, 9.12540544]), + "rank_test_score": np.array([1, 2]), + "split1_test_score": np.array([-9.62685757, -8.57012999]), + "split2_test_score": np.array([-12.50893109, -13.61564833]), + }, + }, + "return_train_score": { + "each_cv_result": each_cv_result_return_train, + "IDX_LENGTH": 2, + "PARAM_LENGTH": 1, + "CV_RESULT_": { + "mean_fit_time": np.array( + [ + 0.00286627, + ] + ), + "std_fit_time": np.array([0.0002892]), + "mean_score_time": np.array([0.00164152]), + "std_score_time": np.array([0.00012303]), + "param_n_components": np.ma.masked_array( + data=[2], mask=False, fill_value="?", dtype=object + ), # type: ignore[no-untyped-call] + "params": np.array([{"n_components": 2}], dtype=object), + "mean_train_score": np.array([-11.09288916]), + "std_train_score": np.array([2.52275917]), + "mean_test_score": np.array([-11.09288916]), + "std_test_score": np.array([2.52275917]), + "rank_test_score": np.array([1]), + "split0_test_score": np.array([-13.61564833]), + "split1_test_score": np.array([-8.57012999]), + "split0_train_score": np.array([-13.61564833]), + "split1_train_score": np.array([-8.57012999]), + }, + }, +} + +for key, val in SAMPLES.items(): + combine_hex_cv_result = [] + for each_array in val["each_cv_result"]: + with io.BytesIO() as f: + cp.dump(each_array, f) + f.seek(0) + binary_cv_results = f.getvalue().hex() + combine_hex_cv_result.append(binary_cv_results) + SAMPLES[key]["combine_hex_cv_result"] = combine_hex_cv_result class SnowparkHandlersUnitTest(parameterized.TestCase): + def setUp(self) -> None: + """Creates Snowpark and Snowflake environments for testing.""" + zipped = sorted(zip([5, 4, 2, 0, 1, 3], SAMPLES["basic"]["combine_hex_cv_result"]), key=lambda x: x[0]) + self.RAW_DATA_SP = [Row(val) for _, val in zipped] + def test_sklearn_model_selection_wrapper_provider_lightgbm_installed(self) -> None: orig_import = __import__ @@ -65,6 +261,57 @@ def import_mock(name: str, *args: Any, **kwargs: Any) -> Any: provider = ModelSpecificationsBuilder.build(model=LGBMRegressor()) self.assertEqual(provider.imports, ["lightgbm"]) + def _compare_cv_results(self, cv_result_1: Dict[str, Any], cv_result_2: Dict[str, Any]) -> None: + # compare the keys + self.assertEqual(sorted(cv_result_1.keys()), sorted(cv_result_2.keys())) + # compare the values + for k, v in cv_result_1.items(): + if isinstance(v, np.ndarray): + if k.startswith("param_"): # compare the masked array + np.ma.allequal(v, cv_result_2[k]) # type: ignore[no-untyped-call] + elif k == "params": # compare the parameter combination + self.assertEqual(v.tolist(), cv_result_2[k].tolist()) + elif k.endswith("test_score"): # compare the test score + np.testing.assert_allclose(v, cv_result_2[k], rtol=1.0e-1, atol=1.0e-2) + # Do not compare the fit time + + def test_cv_result(self) -> None: + multimetric, cv_results_, best_param_index, scorers = construct_cv_results( + self.RAW_DATA_SP, + SAMPLES["basic"]["IDX_LENGTH"], + SAMPLES["basic"]["PARAM_LENGTH"], + {"return_train_score": False}, + ) + self.assertEqual(multimetric, False) + self.assertEqual(best_param_index, 0) + self._compare_cv_results(cv_results_, SAMPLES["basic"]["CV_RESULT_"]) + self.assertEqual(scorers, {"score"}) + + def test_cv_result_return_train_score(self) -> None: + multimetric, cv_results_, best_param_index, scorers = construct_cv_results( + [Row(val) for val in SAMPLES["return_train_score"]["combine_hex_cv_result"]], + SAMPLES["return_train_score"]["IDX_LENGTH"], + SAMPLES["return_train_score"]["PARAM_LENGTH"], + {"return_train_score": True}, + ) + self.assertEqual(multimetric, False) + self._compare_cv_results(cv_results_, SAMPLES["return_train_score"]["CV_RESULT_"]) + self.assertEqual(scorers, {"score"}) + + def test_cv_result_incorrect_param_length(self) -> None: + with self.assertRaises(ValueError): + construct_cv_results(self.RAW_DATA_SP, SAMPLES["basic"]["IDX_LENGTH"], 1, {"return_train_score": False}) + + def test_cv_result_nan(self) -> None: + # corner cases with nan values + with self.assertRaises(ValueError): + construct_cv_results(self.RAW_DATA_SP, 0, SAMPLES["basic"]["PARAM_LENGTH"], {"return_train_score": False}) + # empty list + with self.assertRaises(ValueError): + construct_cv_results( + [], SAMPLES["basic"]["IDX_LENGTH"], SAMPLES["basic"]["PARAM_LENGTH"], {"return_train_score": False} + ) + if __name__ == "__main__": absltest.main() diff --git a/snowflake/ml/modeling/_internal/model_trainer_builder.py b/snowflake/ml/modeling/_internal/model_trainer_builder.py index 4c4d7aca..c4947fab 100644 --- a/snowflake/ml/modeling/_internal/model_trainer_builder.py +++ b/snowflake/ml/modeling/_internal/model_trainer_builder.py @@ -3,13 +3,20 @@ import pandas as pd from sklearn import model_selection +from snowflake.ml._internal.exceptions import error_codes, exceptions from snowflake.ml.modeling._internal.distributed_hpo_trainer import ( DistributedHPOTrainer, ) -from snowflake.ml.modeling._internal.estimator_utils import is_single_node +from snowflake.ml.modeling._internal.estimator_utils import ( + get_module_name, + is_single_node, +) from snowflake.ml.modeling._internal.model_trainer import ModelTrainer from snowflake.ml.modeling._internal.pandas_trainer import PandasModelTrainer from snowflake.ml.modeling._internal.snowpark_trainer import SnowparkModelTrainer +from snowflake.ml.modeling._internal.xgboost_external_memory_trainer import ( + XGBoostExternalMemoryTrainer, +) from snowflake.snowpark import DataFrame, Session _PROJECT = "ModelDevelopment" @@ -30,6 +37,31 @@ class ModelTrainerBuilder: def _check_if_distributed_hpo_enabled(cls, session: Session) -> bool: return not is_single_node(session) and ModelTrainerBuilder._ENABLE_DISTRIBUTED is True + @classmethod + def _validate_external_memory_params(cls, estimator: object, batch_size: int) -> None: + """ + Validate the params are set appropriately for external memory training. + + Args: + estimator: Model object + batch_size: Number of rows in each batch of data processed during training. + + Raises: + SnowflakeMLException: If the params are not appropriate for the external memory training feature. + """ + module_name = get_module_name(model=estimator) + root_module_name = module_name.split(".")[0] + if root_module_name != "xgboost": + raise exceptions.SnowflakeMLException( + error_code=error_codes.INVALID_ARGUMENT, + original_exception=RuntimeError("External memory training is only supported for XGBoost models."), + ) + if batch_size <= 0: + raise exceptions.SnowflakeMLException( + error_code=error_codes.INVALID_ARGUMENT, + original_exception=RuntimeError("Batch size must be >= 0 when using external memory training feature."), + ) + @classmethod def build( cls, @@ -40,6 +72,8 @@ def build( sample_weight_col: Optional[str] = None, autogenerated: bool = False, subproject: str = "", + use_external_memory_version: bool = False, + batch_size: int = -1, ) -> ModelTrainer: """ Builder method that creates an approproiate ModelTrainer instance based on the given params. @@ -55,22 +89,32 @@ def build( ) elif isinstance(dataset, DataFrame): trainer_klass = SnowparkModelTrainer + init_args = { + "estimator": estimator, + "dataset": dataset, + "session": dataset._session, + "input_cols": input_cols, + "label_cols": label_cols, + "sample_weight_col": sample_weight_col, + "autogenerated": autogenerated, + "subproject": subproject, + } + assert dataset._session is not None # Make MyPy happpy if isinstance(estimator, model_selection.GridSearchCV) or isinstance( estimator, model_selection.RandomizedSearchCV ): if ModelTrainerBuilder._check_if_distributed_hpo_enabled(session=dataset._session): trainer_klass = DistributedHPOTrainer - return trainer_klass( - estimator=estimator, - dataset=dataset, - session=dataset._session, - input_cols=input_cols, - label_cols=label_cols, - sample_weight_col=sample_weight_col, - autogenerated=autogenerated, - subproject=subproject, - ) + elif use_external_memory_version: + ModelTrainerBuilder._validate_external_memory_params( + estimator=estimator, + batch_size=batch_size, + ) + trainer_klass = XGBoostExternalMemoryTrainer + init_args["batch_size"] = batch_size + + return trainer_klass(**init_args) # type: ignore[arg-type] else: raise TypeError( f"Unexpected dataset type: {type(dataset)}." diff --git a/snowflake/ml/modeling/_internal/model_trainer_builder_test.py b/snowflake/ml/modeling/_internal/model_trainer_builder_test.py new file mode 100644 index 00000000..8fbb37e5 --- /dev/null +++ b/snowflake/ml/modeling/_internal/model_trainer_builder_test.py @@ -0,0 +1,84 @@ +from typing import Any +from unittest import mock + +import inflection +from absl.testing import absltest +from sklearn.datasets import load_iris +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import GridSearchCV +from xgboost import XGBRegressor + +from snowflake.ml.modeling._internal.distributed_hpo_trainer import ( + DistributedHPOTrainer, +) +from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder +from snowflake.ml.modeling._internal.snowpark_trainer import SnowparkModelTrainer +from snowflake.ml.modeling._internal.xgboost_external_memory_trainer import ( + XGBoostExternalMemoryTrainer, +) +from snowflake.ml.utils.connection_params import SnowflakeLoginOptions +from snowflake.snowpark import DataFrame, Session + + +class SnowparkHandlersUnitTest(absltest.TestCase): + def setUp(self) -> None: + self._session = Session.builder.configs(SnowflakeLoginOptions()).create() + + def tearDown(self) -> None: + self._session.close() + + def get_snowpark_dataset(self) -> DataFrame: + input_df_pandas = load_iris(as_frame=True).frame + input_df_pandas.columns = [inflection.parameterize(c, "_").upper() for c in input_df_pandas.columns] + input_df_pandas["INDEX"] = input_df_pandas.reset_index().index + input_df: DataFrame = self._session.create_dataframe(input_df_pandas) + return input_df + + def test_sklearn_model_trainer(self) -> None: + model = LinearRegression() + dataset = self.get_snowpark_dataset() + trainer = ModelTrainerBuilder.build(estimator=model, dataset=dataset, input_cols=[]) + + self.assertTrue(isinstance(trainer, SnowparkModelTrainer)) + + @mock.patch("snowflake.ml.modeling._internal.model_trainer_builder.is_single_node") + def test_distributed_hpo_trainer(self, mock_is_single_node: Any) -> None: + mock_is_single_node.return_value = False + dataset = self.get_snowpark_dataset() + model = GridSearchCV(estimator=LinearRegression(), param_grid={"loss": ["rmsqe", "mae"]}) + trainer = ModelTrainerBuilder.build(estimator=model, dataset=dataset, input_cols=[]) + + self.assertTrue(isinstance(trainer, DistributedHPOTrainer)) + + @mock.patch("snowflake.ml.modeling._internal.model_trainer_builder.is_single_node") + def test_single_node_hpo_trainer(self, mock_is_single_node: Any) -> None: + mock_is_single_node.return_value = True + dataset = self.get_snowpark_dataset() + model = GridSearchCV(estimator=LinearRegression(), param_grid={"loss": ["rmsqe", "mae"]}) + trainer = ModelTrainerBuilder.build(estimator=model, dataset=dataset, input_cols=[]) + + self.assertTrue(isinstance(trainer, SnowparkModelTrainer)) + + def test_xgboost_external_memory_model_trainer(self) -> None: + model = XGBRegressor() + dataset = self.get_snowpark_dataset() + trainer = ModelTrainerBuilder.build( + estimator=model, dataset=dataset, input_cols=[], use_external_memory_version=True, batch_size=1000 + ) + + self.assertTrue(isinstance(trainer, XGBoostExternalMemoryTrainer)) + + def test_xgboost_standard_model_trainer(self) -> None: + model = XGBRegressor() + dataset = self.get_snowpark_dataset() + trainer = ModelTrainerBuilder.build( + estimator=model, + dataset=dataset, + input_cols=[], + ) + + self.assertTrue(isinstance(trainer, SnowparkModelTrainer)) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/modeling/_internal/snowpark_trainer.py b/snowflake/ml/modeling/_internal/snowpark_trainer.py index 3d7aaf39..315eded1 100644 --- a/snowflake/ml/modeling/_internal/snowpark_trainer.py +++ b/snowflake/ml/modeling/_internal/snowpark_trainer.py @@ -12,7 +12,11 @@ exceptions, modeling_error_messages, ) -from snowflake.ml._internal.utils import identifier, snowpark_dataframe_utils +from snowflake.ml._internal.utils import ( + identifier, + pkg_version_utils, + snowpark_dataframe_utils, +) from snowflake.ml._internal.utils.query_result_checker import SqlResultValidator from snowflake.ml._internal.utils.temp_file_utils import ( cleanup_temp_files, @@ -253,11 +257,15 @@ def _get_fit_wrapper_sproc(self, statement_params: Dict[str, str]) -> StoredProc fit_sproc_name = random_name_for_temp_object(TempObjectType.PROCEDURE) + relaxed_dependencies = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel( + pkg_versions=model_spec.pkgDependencies, session=self.session + ) + fit_wrapper_sproc = self.session.sproc.register( func=self._build_fit_wrapper_sproc(model_spec=model_spec), is_permanent=False, name=fit_sproc_name, - packages=["snowflake-snowpark-python"] + model_spec.pkgDependencies, # type: ignore[arg-type] + packages=["snowflake-snowpark-python"] + relaxed_dependencies, # type: ignore[arg-type] replace=True, session=self.session, statement_params=statement_params, diff --git a/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer.py b/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer.py new file mode 100644 index 00000000..5f7e5942 --- /dev/null +++ b/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer.py @@ -0,0 +1,444 @@ +import inspect +import os +import tempfile +from typing import Any, Dict, List, Optional + +import cloudpickle as cp +import pandas as pd +import pyarrow.parquet as pq + +from snowflake.ml._internal import telemetry +from snowflake.ml._internal.exceptions import ( + error_codes, + exceptions, + modeling_error_messages, +) +from snowflake.ml._internal.utils import pkg_version_utils +from snowflake.ml._internal.utils.query_result_checker import ResultValidator +from snowflake.ml._internal.utils.snowpark_dataframe_utils import ( + cast_snowpark_dataframe, +) +from snowflake.ml._internal.utils.temp_file_utils import get_temp_file_path +from snowflake.ml.modeling._internal.model_specifications import ( + ModelSpecifications, + ModelSpecificationsBuilder, +) +from snowflake.ml.modeling._internal.snowpark_trainer import SnowparkModelTrainer +from snowflake.snowpark import ( + DataFrame, + Session, + exceptions as snowpark_exceptions, + functions as F, +) +from snowflake.snowpark._internal.utils import ( + TempObjectType, + random_name_for_temp_object, +) + +_PROJECT = "ModelDevelopment" + + +def get_data_iterator( + file_paths: List[str], + batch_size: int, + input_cols: List[str], + label_cols: List[str], + sample_weight_col: Optional[str] = None, +) -> Any: + from typing import List, Optional + + import xgboost + + class ParquetDataIterator(xgboost.DataIter): + """ + This iterator reads parquet data stored in a specified files and returns + deserialized data, enabling seamless integration with the xgboost framework for + machine learning tasks. + """ + + def __init__( + self, + file_paths: List[str], + batch_size: int, + input_cols: List[str], + label_cols: List[str], + sample_weight_col: Optional[str] = None, + ) -> None: + """ + Initialize the DataIterator. + + Args: + file_paths: List of file paths containing the data. + batch_size: Target number of rows in each batch. + input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be used for + training. + label_cols: The name(s) of one or more columns in a DataFrame representing the target variable(s) + to learn. + sample_weight_col: The column name representing the weight of training examples. + """ + self._file_paths = file_paths + self._batch_size = batch_size + self._input_cols = input_cols + self._label_cols = label_cols + self._sample_weight_col = sample_weight_col + + # File index + self._it = 0 + # Pandas dataframe containing temp data + self._df = None + # XGBoost will generate some cache files under current directory with the prefix + # "cache" + cache_dir_name = tempfile.mkdtemp() + super().__init__(cache_prefix=os.path.join(cache_dir_name, "cache")) + + def next(self, batch_consumer_fn) -> int: # type: ignore[no-untyped-def] + """Advance the iterator by 1 step and pass the data to XGBoost's batch_consumer_fn. + This function is called by XGBoost during the construction of ``DMatrix`` + + Args: + batch_consumer_fn: batch consumer function + + Returns: + 0 if there is no more data, else 1. + """ + while (self._df is None) or (self._df.shape[0] < self._batch_size): + # Read files and append data to temp df until batch size is reached. + if self._it == len(self._file_paths): + break + new_df = pq.read_table(self._file_paths[self._it]).to_pandas() + self._it += 1 + + if self._df is None: + self._df = new_df + else: + self._df = pd.concat([self._df, new_df], ignore_index=True) + + if (self._df is None) or (self._df.shape[0] == 0): + # No more data + return 0 + + # Slice the temp df and save the remainder in the temp df + batch_end_index = min(self._batch_size, self._df.shape[0]) + batch_df = self._df.iloc[:batch_end_index] + self._df = self._df.truncate(before=batch_end_index).reset_index(drop=True) + + # TODO(snandamuri): Make it proper to support categorical features, etc. + func_args = { + "data": batch_df[self._input_cols], + "label": batch_df[self._label_cols].squeeze(), + } + if self._sample_weight_col is not None: + func_args["weight"] = batch_df[self._sample_weight_col].squeeze() + + batch_consumer_fn(**func_args) + # Return 1 to let XGBoost know we haven't seen all the files yet. + return 1 + + def reset(self) -> None: + """Reset the iterator to its beginning""" + self._it = 0 + + return ParquetDataIterator( + file_paths=file_paths, + batch_size=batch_size, + input_cols=input_cols, + label_cols=label_cols, + sample_weight_col=sample_weight_col, + ) + + +def train_xgboost_model( + estimator: object, + file_paths: List[str], + batch_size: int, + input_cols: List[str], + label_cols: List[str], + sample_weight_col: Optional[str] = None, +) -> object: + """ + Function to train XGBoost models using the external memory version of XGBoost. + """ + import xgboost + + def _objective_decorator(func): # type: ignore[no-untyped-def] + def inner(preds, dmatrix): # type: ignore[no-untyped-def] + """internal function""" + labels = dmatrix.get_label() + return func(labels, preds) + + return inner + + assert isinstance(estimator, xgboost.XGBModel) + params = estimator.get_xgb_params() + obj = None + + if isinstance(estimator, xgboost.XGBClassifier): + # TODO (snandamuri): Find better way to get expected_classes + # Set: self.classes_, self.n_classes_ + expected_classes = pd.unique(pq.read_table(file_paths[0]).to_pandas()[label_cols].squeeze()) + estimator.n_classes_ = len(expected_classes) + if callable(estimator.objective): + obj = _objective_decorator(estimator.objective) # type: ignore[no-untyped-call] + # Use default value. Is it really not used ? + params["objective"] = "binary:logistic" + + if len(expected_classes) > 2: + # Switch to using a multiclass objective in the underlying XGB instance + if params.get("objective", None) != "multi:softmax": + params["objective"] = "multi:softprob" + params["num_class"] = len(expected_classes) + + if "tree_method" not in params.keys() or params["tree_method"] is None or params["tree_method"].lower() == "exact": + params["tree_method"] = "hist" + + if ( + "grow_policy" not in params.keys() + or params["grow_policy"] is None + or params["grow_policy"].lower() != "depthwise" + ): + params["grow_policy"] = "depthwise" + + it = get_data_iterator( + file_paths=file_paths, + batch_size=batch_size, + input_cols=input_cols, + label_cols=label_cols, + sample_weight_col=sample_weight_col, + ) + Xy = xgboost.DMatrix(it) + estimator._Booster = xgboost.train( + params, + Xy, + estimator.get_num_boosting_rounds(), + evals=[], + early_stopping_rounds=estimator.early_stopping_rounds, + evals_result=None, + obj=obj, + custom_metric=estimator.eval_metric, + verbose_eval=None, + xgb_model=None, + callbacks=None, + ) + return estimator + + +cp.register_pickle_by_value(inspect.getmodule(get_data_iterator)) +cp.register_pickle_by_value(inspect.getmodule(train_xgboost_model)) + + +class XGBoostExternalMemoryTrainer(SnowparkModelTrainer): + """ + When working with large datasets, training XGBoost models traditionally requires loading the entire dataset into + memory, which can be costly and sometimes infeasible due to memory constraints. To solve this problem, XGBoost + provides support for loading data from external memory using a built-in data parser. With this feature enabled, + the training process occurs in a two-step approach: + Preprocessing Step: Input data is read and parsed into an internal format, such as CSR, CSC, or sorted CSC. + Processed state is appended to an in-memory buffer. Once the buffer reaches a predefined size, it is + written out to disk as a page. + Tree Construction Step: During the tree construction phase, the data pages stored on disk are streamed via + a multi-threaded pre-fetcher, allowing the model to efficiently access and process the data without + overloading memory. + """ + + def __init__( + self, + estimator: object, + dataset: DataFrame, + session: Session, + input_cols: List[str], + label_cols: Optional[List[str]], + sample_weight_col: Optional[str], + autogenerated: bool = False, + subproject: str = "", + batch_size: int = 10000, + ) -> None: + """ + Initializes the XGBoostExternalMemoryTrainer with a model, a Snowpark DataFrame, feature, and label column + names, etc. + + Args: + estimator: SKLearn compatible estimator or transformer object. + dataset: The dataset used for training the model. + session: Snowflake session object to be used for training. + input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be used for training. + label_cols: The name(s) of one or more columns in a DataFrame representing the target variable(s) to learn. + sample_weight_col: The column name representing the weight of training examples. + autogenerated: A boolean denoting if the trainer is being used by autogenerated code or not. + subproject: subproject name to be used in telemetry. + batch_size: Number of the rows in the each batch processed during training. + """ + super().__init__( + estimator=estimator, + dataset=dataset, + session=session, + input_cols=input_cols, + label_cols=label_cols, + sample_weight_col=sample_weight_col, + autogenerated=autogenerated, + subproject=subproject, + ) + self._batch_size = batch_size + + def _get_xgb_external_memory_fit_wrapper_sproc( + self, + model_spec: ModelSpecifications, + session: Session, + statement_params: Dict[str, str], + import_file_paths: List[str], + ) -> Any: + fit_sproc_name = random_name_for_temp_object(TempObjectType.PROCEDURE) + + relaxed_dependencies = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel( + pkg_versions=model_spec.pkgDependencies, session=self.session + ) + + @F.sproc( + is_permanent=False, + name=fit_sproc_name, + packages=list(["snowflake-snowpark-python"] + relaxed_dependencies), + replace=True, + session=session, + statement_params=statement_params, + anonymous=True, + imports=list(import_file_paths), + ) # type: ignore[misc] + def fit_wrapper_sproc( + session: Session, + stage_transform_file_name: str, + stage_result_file_name: str, + dataset_stage_name: str, + batch_size: int, + input_cols: List[str], + label_cols: List[str], + sample_weight_col: Optional[str], + statement_params: Dict[str, str], + ) -> str: + import os + import sys + + import cloudpickle as cp + + local_transform_file_name = get_temp_file_path() + + session.file.get(stage_transform_file_name, local_transform_file_name, statement_params=statement_params) + + local_transform_file_path = os.path.join( + local_transform_file_name, os.listdir(local_transform_file_name)[0] + ) + with open(local_transform_file_path, mode="r+b") as local_transform_file_obj: + estimator = cp.load(local_transform_file_obj) + + data_files = [ + os.path.join(sys._xoptions["snowflake_import_directory"], filename) + for filename in os.listdir(sys._xoptions["snowflake_import_directory"]) + if filename.startswith(dataset_stage_name) + ] + + estimator = train_xgboost_model( + estimator=estimator, + file_paths=data_files, + batch_size=batch_size, + input_cols=input_cols, + label_cols=label_cols, + sample_weight_col=sample_weight_col, + ) + + local_result_file_name = get_temp_file_path() + with open(local_result_file_name, mode="w+b") as local_result_file_obj: + cp.dump(estimator, local_result_file_obj) + + session.file.put( + local_result_file_name, + stage_result_file_name, + auto_compress=False, + overwrite=True, + statement_params=statement_params, + ) + + # Note: you can add something like + "|" + str(df) to the return string + # to pass debug information to the caller. + return str(os.path.basename(local_result_file_name)) + + return fit_wrapper_sproc + + def _write_training_data_to_stage(self, dataset_stage_name: str) -> List[str]: + """ + Materializes the training to the specified stage and returns the list of stage file paths. + + Args: + dataset_stage_name: Target stage to materialize training data. + + Returns: + List of stage file paths that contain the materialized data. + """ + # Stage data. + dataset = cast_snowpark_dataframe(self.dataset) + remote_file_path = f"{dataset_stage_name}/{dataset_stage_name}.parquet" + copy_response = dataset.write.copy_into_location( # type:ignore[call-overload] + remote_file_path, file_format_type="parquet", header=True, overwrite=True + ) + ResultValidator(result=copy_response).has_dimensions(expected_rows=1).validate() + data_file_paths = [f"@{row.name}" for row in self.session.sql(f"LIST @{dataset_stage_name}").collect()] + return data_file_paths + + def train(self) -> object: + """ + Runs hyper parameter optimization by distributing the tasks across warehouse. + + Returns: + Trained model + + Raises: + SnowflakeMLException: For known types of user and system errors. + e: For every unexpected exception from SnowflakeClient. + """ + temp_stage_name = self._create_temp_stage() + (stage_transform_file_name, stage_result_file_name) = self._upload_model_to_stage(stage_name=temp_stage_name) + data_file_paths = self._write_training_data_to_stage(dataset_stage_name=temp_stage_name) + + # Call fit sproc + statement_params = telemetry.get_function_usage_statement_params( + project=_PROJECT, + subproject=self._subproject, + function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), self._class_name), + api_calls=[Session.call], + custom_tags=None, + ) + + model_spec = ModelSpecificationsBuilder.build(model=self.estimator) + fit_wrapper = self._get_xgb_external_memory_fit_wrapper_sproc( + model_spec=model_spec, + session=self.session, + statement_params=statement_params, + import_file_paths=data_file_paths, + ) + + try: + sproc_export_file_name = fit_wrapper( + self.session, + stage_transform_file_name, + stage_result_file_name, + temp_stage_name, + self._batch_size, + self.input_cols, + self.label_cols, + self.sample_weight_col, + statement_params, + ) + except snowpark_exceptions.SnowparkClientException as e: + if "fit() missing 1 required positional argument: 'y'" in str(e): + raise exceptions.SnowflakeMLException( + error_code=error_codes.NOT_FOUND, + original_exception=RuntimeError(modeling_error_messages.ATTRIBUTE_NOT_SET.format("label_cols")), + ) from e + raise e + + if "|" in sproc_export_file_name: + fields = sproc_export_file_name.strip().split("|") + sproc_export_file_name = fields[0] + + return self._fetch_model_from_stage( + dir_path=stage_result_file_name, + file_name=sproc_export_file_name, + statement_params=statement_params, + ) diff --git a/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer_test.py b/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer_test.py new file mode 100644 index 00000000..8a663c91 --- /dev/null +++ b/snowflake/ml/modeling/_internal/xgboost_external_memory_trainer_test.py @@ -0,0 +1,100 @@ +import math + +import inflection +import pandas as pd +from absl.testing import absltest +from sklearn.datasets import load_iris + +from snowflake.ml._internal.utils.temp_file_utils import ( + cleanup_temp_files, + get_temp_file_path, +) +from snowflake.ml.modeling._internal.xgboost_external_memory_trainer import ( + get_data_iterator, +) + + +class XGBoostExternalMemoryTrainerTest(absltest.TestCase): + def setUp(self) -> None: + pass + + def tearDown(self) -> None: + pass + + def get_dataset(self) -> pd.DataFrame: + input_df_pandas = load_iris(as_frame=True).frame + input_df_pandas.columns = [inflection.parameterize(c, "_").upper() for c in input_df_pandas.columns] + input_cols = [c for c in input_df_pandas.columns if not c.startswith("TARGET")] + label_col = [c for c in input_df_pandas.columns if c.startswith("TARGET")] + return (input_df_pandas, input_cols, label_col) + + def test_data_iterator_single_file(self) -> None: + df, input_cols, label_col = self.get_dataset() + + num_rows_in_original_dataset = df.shape[0] + batch_size = 20 + + temp_file = get_temp_file_path() + df.to_parquet(temp_file) + + it = get_data_iterator( + file_paths=[temp_file], + batch_size=20, + input_cols=input_cols, + label_cols=label_col, + ) + + num_rows = 0 + num_batches = 0 + + def consumer_func(data: pd.DataFrame, label: pd.DataFrame) -> None: + nonlocal num_rows + nonlocal num_batches + num_rows += data.shape[0] + num_batches += 1 + + while it.next(consumer_func): + pass + + self.assertEqual(num_rows, num_rows_in_original_dataset) + self.assertEqual(num_batches, math.ceil(float(num_rows_in_original_dataset) / float(batch_size))) + cleanup_temp_files(temp_file) + + def test_data_iterator_multiple_file(self) -> None: + df, input_cols, label_col = self.get_dataset() + + num_rows_in_original_dataset = df.shape[0] + batch_size = 20 + + temp_file1 = get_temp_file_path() + temp_file2 = get_temp_file_path() + df1, df2 = df.iloc[:70], df.iloc[70:] + df1.to_parquet(temp_file1) + df2.to_parquet(temp_file2) + + it = get_data_iterator( + file_paths=[temp_file1, temp_file2], + batch_size=20, + input_cols=input_cols, + label_cols=label_col, + ) + + num_rows = 0 + num_batches = 0 + + def consumer_func(data: pd.DataFrame, label: pd.DataFrame) -> None: + nonlocal num_rows + nonlocal num_batches + num_rows += data.shape[0] + num_batches += 1 + + while it.next(consumer_func): + pass + + self.assertEqual(num_rows, num_rows_in_original_dataset) + self.assertEqual(num_batches, math.ceil(float(num_rows_in_original_dataset) / float(batch_size))) + cleanup_temp_files([temp_file1, temp_file2]) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/modeling/preprocessing/min_max_scaler.py b/snowflake/ml/modeling/preprocessing/min_max_scaler.py index 9e122cb7..56fff072 100644 --- a/snowflake/ml/modeling/preprocessing/min_max_scaler.py +++ b/snowflake/ml/modeling/preprocessing/min_max_scaler.py @@ -8,8 +8,9 @@ from snowflake import snowpark from snowflake.ml._internal import telemetry +from snowflake.ml._internal.exceptions import error_codes, exceptions from snowflake.ml.modeling.framework import _utils, base -from snowflake.snowpark import functions as F +from snowflake.snowpark import functions as F, types as T class MinMaxScaler(base.BaseTransformer): @@ -125,6 +126,18 @@ def _reset(self) -> None: self.data_max_ = {} self.data_range_ = {} + def _check_input_column_types(self, dataset: snowpark.DataFrame) -> None: + for field in dataset.schema.fields: + if field.name in self.input_cols: + if not issubclass(type(field.datatype), T._NumericType): + raise exceptions.SnowflakeMLException( + error_code=error_codes.INVALID_DATA_TYPE, + original_exception=TypeError( + f"Non-numeric input column {field.name} datatype {field.datatype} " + "is not supported by the MinMaxScaler." + ), + ) + @telemetry.send_api_usage_telemetry( project=base.PROJECT, subproject=base.SUBPROJECT, @@ -169,6 +182,7 @@ def _fit_sklearn(self, dataset: pd.DataFrame) -> None: self.data_range_[input_col] = float(sklearn_scaler.data_range_[i]) def _fit_snowpark(self, dataset: snowpark.DataFrame) -> None: + self._check_input_column_types(dataset) computed_states = self._compute(dataset, self.input_cols, self.custom_states) # assign states to the object diff --git a/snowflake/ml/monitoring/tests/BUILD.bazel b/snowflake/ml/monitoring/tests/BUILD.bazel index f005dc1a..594aae84 100644 --- a/snowflake/ml/monitoring/tests/BUILD.bazel +++ b/snowflake/ml/monitoring/tests/BUILD.bazel @@ -1,6 +1,9 @@ load("//bazel:py_rules.bzl", "py_test") -package(default_visibility = ["//snowflake/ml/monitoring"]) +package(default_visibility = [ + "//bazel:snowml_public_common", + "//snowflake/ml/monitoring", +]) SHARD_COUNT = 3 diff --git a/snowflake/ml/registry/BUILD.bazel b/snowflake/ml/registry/BUILD.bazel index 89bb441b..19d7c1a1 100644 --- a/snowflake/ml/registry/BUILD.bazel +++ b/snowflake/ml/registry/BUILD.bazel @@ -46,7 +46,7 @@ py_library( "_schema_upgrade_plans.py", "_schema_version_manager.py", ], - visibility = ["//visibility:private"], + visibility = ["//bazel:snowml_public_common"], deps = [ "//snowflake/ml/_internal/utils:query_result_checker", "//snowflake/ml/_internal/utils:table_manager", @@ -78,7 +78,7 @@ py_test( ) py_library( - name = "registry", + name = "registry_impl", srcs = [ "registry.py", ], @@ -86,12 +86,23 @@ py_library( "//snowflake/ml/_internal:telemetry", "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/model", "//snowflake/ml/model:model_signature", "//snowflake/ml/model:type_hints", - "//snowflake/ml/model/_client/model:model_impl", - "//snowflake/ml/model/_client/model:model_version_impl", - "//snowflake/ml/model/_client/ops:model_ops", - "//snowflake/ml/model/_model_composer:model_composer", + "//snowflake/ml/registry/_manager:model_manager", + ], +) + +py_library( + name = "registry", + srcs = [ + "__init__.py", + ], + deps = [ + ":artifact_manager", + ":model_registry", + ":registry_impl", + ":schema", ], ) @@ -101,15 +112,22 @@ py_test( "registry_test.py", ], deps = [ - ":registry", - "//snowflake/ml/_internal/utils:sql_identifier", - "//snowflake/ml/model/_client/model:model_version_impl", - "//snowflake/ml/model/_model_composer:model_composer", + ":registry_impl", + "//snowflake/ml/model", "//snowflake/ml/test_utils:mock_data_frame", "//snowflake/ml/test_utils:mock_session", ], ) +py_test( + name = "package_visibility_test", + srcs = ["package_visibility_test.py"], + deps = [ + ":model_registry", + ":registry", + ], +) + py_package( name = "model_registry_pkg", packages = ["snowflake.ml"], diff --git a/snowflake/ml/registry/__init__.py b/snowflake/ml/registry/__init__.py new file mode 100644 index 00000000..47275f2d --- /dev/null +++ b/snowflake/ml/registry/__init__.py @@ -0,0 +1,3 @@ +from snowflake.ml.registry.registry import Registry + +__all__ = ["Registry"] diff --git a/snowflake/ml/registry/_manager/BUILD.bazel b/snowflake/ml/registry/_manager/BUILD.bazel new file mode 100644 index 00000000..e75c6e12 --- /dev/null +++ b/snowflake/ml/registry/_manager/BUILD.bazel @@ -0,0 +1,40 @@ +load("//bazel:py_rules.bzl", "py_library", "py_test") + +package(default_visibility = [ + "//bazel:snowml_public_common", + "//snowflake/ml/registry:__pkg__", +]) + +py_library( + name = "model_manager", + srcs = [ + "model_manager.py", + ], + deps = [ + "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/model", + "//snowflake/ml/model:model_signature", + "//snowflake/ml/model:type_hints", + "//snowflake/ml/model/_client/model:model_impl", + "//snowflake/ml/model/_client/model:model_version_impl", + "//snowflake/ml/model/_client/ops:metadata_ops", + "//snowflake/ml/model/_client/ops:model_ops", + "//snowflake/ml/model/_model_composer:model_composer", + ], +) + +py_test( + name = "model_manager_test", + srcs = [ + "model_manager_test.py", + ], + deps = [ + ":model_manager", + "//snowflake/ml/_internal/utils:sql_identifier", + "//snowflake/ml/model/_client/model:model_version_impl", + "//snowflake/ml/model/_model_composer:model_composer", + "//snowflake/ml/test_utils:mock_data_frame", + "//snowflake/ml/test_utils:mock_session", + ], +) diff --git a/snowflake/ml/registry/_manager/model_manager.py b/snowflake/ml/registry/_manager/model_manager.py new file mode 100644 index 00000000..cd2f3c87 --- /dev/null +++ b/snowflake/ml/registry/_manager/model_manager.py @@ -0,0 +1,163 @@ +from types import ModuleType +from typing import Any, Dict, List, Optional + +import pandas as pd +from absl.logging import logging + +from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml.model import model_signature, type_hints as model_types +from snowflake.ml.model._client.model import model_impl, model_version_impl +from snowflake.ml.model._client.ops import metadata_ops, model_ops +from snowflake.ml.model._model_composer import model_composer +from snowflake.snowpark import session + +logger = logging.getLogger(__name__) + + +class ModelManager: + def __init__( + self, + session: session.Session, + *, + database_name: sql_identifier.SqlIdentifier, + schema_name: sql_identifier.SqlIdentifier, + ) -> None: + self._database_name = database_name + self._schema_name = schema_name + self._model_ops = model_ops.ModelOperator( + session, database_name=self._database_name, schema_name=self._schema_name + ) + + def log_model( + self, + model: model_types.SupportedModelType, + *, + model_name: str, + version_name: str, + comment: Optional[str] = None, + metrics: Optional[Dict[str, Any]] = None, + conda_dependencies: Optional[List[str]] = None, + pip_requirements: Optional[List[str]] = None, + python_version: Optional[str] = None, + signatures: Optional[Dict[str, model_signature.ModelSignature]] = None, + sample_input_data: Optional[model_types.SupportedDataType] = None, + code_paths: Optional[List[str]] = None, + ext_modules: Optional[List[ModuleType]] = None, + options: Optional[model_types.ModelSaveOption] = None, + statement_params: Optional[Dict[str, Any]] = None, + ) -> model_version_impl.ModelVersion: + model_name_id = sql_identifier.SqlIdentifier(model_name) + + version_name_id = sql_identifier.SqlIdentifier(version_name) + + if self._model_ops.validate_existence( + model_name=model_name_id, statement_params=statement_params + ) and self._model_ops.validate_existence( + model_name=model_name_id, version_name=version_name_id, statement_params=statement_params + ): + raise ValueError(f"Model {model_name} version {version_name} already existed.") + + stage_path = self._model_ops.prepare_model_stage_path( + statement_params=statement_params, + ) + + logger.info("Start packaging and uploading your model. It might take some time based on the size of the model.") + + mc = model_composer.ModelComposer(self._model_ops._session, stage_path=stage_path) + mc.save( + name=model_name_id.resolved(), + model=model, + signatures=signatures, + sample_input=sample_input_data, + conda_dependencies=conda_dependencies, + pip_requirements=pip_requirements, + python_version=python_version, + code_paths=code_paths, + ext_modules=ext_modules, + options=options, + ) + + logger.info("Start creating MODEL object for you in the Snowflake.") + + self._model_ops.create_from_stage( + composed_model=mc, + model_name=model_name_id, + version_name=version_name_id, + statement_params=statement_params, + ) + + mv = model_version_impl.ModelVersion._ref( + self._model_ops, + model_name=model_name_id, + version_name=version_name_id, + ) + + if comment: + mv.comment = comment + + if metrics: + self._model_ops._metadata_ops.save( + metadata_ops.ModelVersionMetadataSchema(metrics=metrics), + model_name=model_name_id, + version_name=version_name_id, + statement_params=statement_params, + ) + + return mv + + def get_model( + self, + model_name: str, + *, + statement_params: Optional[Dict[str, Any]] = None, + ) -> model_impl.Model: + model_name_id = sql_identifier.SqlIdentifier(model_name) + if self._model_ops.validate_existence( + model_name=model_name_id, + statement_params=statement_params, + ): + return model_impl.Model._ref( + self._model_ops, + model_name=model_name_id, + ) + else: + raise ValueError(f"Unable to find model {model_name}") + + def models( + self, + *, + statement_params: Optional[Dict[str, Any]] = None, + ) -> List[model_impl.Model]: + model_names = self._model_ops.list_models_or_versions( + statement_params=statement_params, + ) + return [ + model_impl.Model._ref( + self._model_ops, + model_name=model_name, + ) + for model_name in model_names + ] + + def show_models( + self, + *, + statement_params: Optional[Dict[str, Any]] = None, + ) -> pd.DataFrame: + rows = self._model_ops.show_models_or_versions( + statement_params=statement_params, + ) + return pd.DataFrame([row.as_dict() for row in rows]) + + def delete_model( + self, + model_name: str, + *, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + model_name_id = sql_identifier.SqlIdentifier(model_name) + + self._model_ops.delete_model_or_version( + model_name=model_name_id, + statement_params=statement_params, + ) diff --git a/snowflake/ml/registry/_manager/model_manager_test.py b/snowflake/ml/registry/_manager/model_manager_test.py new file mode 100644 index 00000000..a314961c --- /dev/null +++ b/snowflake/ml/registry/_manager/model_manager_test.py @@ -0,0 +1,351 @@ +from typing import cast +from unittest import mock + +import pandas as pd +from absl.testing import absltest + +from snowflake.ml._internal.utils import sql_identifier +from snowflake.ml.model._client.model import model_impl, model_version_impl +from snowflake.ml.model._model_composer import model_composer +from snowflake.ml.registry._manager import model_manager +from snowflake.ml.test_utils import mock_session +from snowflake.snowpark import Row, Session + + +class RegistryTest(absltest.TestCase): + def setUp(self) -> None: + self.m_session = mock_session.MockSession(conn=None, test_case=self) + self.c_session = cast(Session, self.m_session) + self.m_r = model_manager.ModelManager( + self.c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("TEST"), + ) + + def test_get_model_1(self) -> None: + m_model = model_impl.Model._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + ) + with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=True) as mock_validate_existence: + m = self.m_r.get_model("MODEL") + self.assertEqual(m, m_model) + mock_validate_existence.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + + def test_get_model_2(self) -> None: + with mock.patch.object( + self.m_r._model_ops, "validate_existence", return_value=False + ) as mock_validate_existence: + with self.assertRaisesRegex(ValueError, "Unable to find model MODEL"): + self.m_r.get_model("MODEL") + mock_validate_existence.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + + def test_models(self) -> None: + m_model_1 = model_impl.Model._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + ) + m_model_2 = model_impl.Model._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("Model", case_sensitive=True), + ) + with mock.patch.object( + self.m_r._model_ops, + "list_models_or_versions", + return_value=[ + sql_identifier.SqlIdentifier("MODEL"), + sql_identifier.SqlIdentifier("Model", case_sensitive=True), + ], + ) as mock_list_models_or_versions: + m_list = self.m_r.models() + self.assertListEqual(m_list, [m_model_1, m_model_2]) + mock_list_models_or_versions.assert_called_once_with( + statement_params=mock.ANY, + ) + + def test_show_models(self) -> None: + m_list_res = [ + Row( + create_on="06/01", + name="MODEL", + comment="This is a comment", + model_name="MODEL", + database_name="TEMP", + schema_name="test", + default_version_name="V1", + ), + Row( + create_on="06/01", + name="Model", + comment="This is a comment", + model_name="MODEL", + database_name="TEMP", + schema_name="test", + default_version_name="v1", + ), + ] + with mock.patch.object( + self.m_r._model_ops, + "show_models_or_versions", + return_value=m_list_res, + ) as mock_show_models_or_versions: + mv_info = self.m_r.show_models() + pd.testing.assert_frame_equal(mv_info, pd.DataFrame([row.as_dict() for row in m_list_res])) + mock_show_models_or_versions.assert_called_once_with( + statement_params=mock.ANY, + ) + + def test_log_model_1(self) -> None: + m_model = mock.MagicMock() + m_conda_dependency = mock.MagicMock() + m_sample_input_data = mock.MagicMock() + m_stage_path = "@TEMP.TEST.MODEL/V1" + with mock.patch.object( + self.m_r._model_ops, "validate_existence", return_value=False + ) as mock_validate_existence, mock.patch.object( + self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path + ) as mock_prepare_model_stage_path, mock.patch.object( + model_composer.ModelComposer, "save" + ) as mock_save, mock.patch.object( + self.m_r._model_ops, "create_from_stage" + ) as mock_create_from_stage: + mv = self.m_r.log_model( + model=m_model, + model_name="MODEL", + version_name="v1", + conda_dependencies=m_conda_dependency, + sample_input_data=m_sample_input_data, + ) + mock_validate_existence.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + mock_prepare_model_stage_path.assert_called_once_with( + statement_params=mock.ANY, + ) + mock_save.assert_called_once_with( + name="MODEL", + model=m_model, + signatures=None, + sample_input=m_sample_input_data, + conda_dependencies=m_conda_dependency, + pip_requirements=None, + python_version=None, + code_paths=None, + ext_modules=None, + options=None, + ) + mock_create_from_stage.assert_called_once_with( + composed_model=mock.ANY, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("v1"), + statement_params=mock.ANY, + ) + self.assertEqual( + mv, + model_version_impl.ModelVersion._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("v1"), + ), + ) + + def test_log_model_2(self) -> None: + m_model = mock.MagicMock() + m_pip_requirements = mock.MagicMock() + m_signatures = mock.MagicMock() + m_options = mock.MagicMock() + m_stage_path = "@TEMP.TEST.MODEL/V1" + with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=False), mock.patch.object( + self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path + ) as mock_prepare_model_stage_path, mock.patch.object( + model_composer.ModelComposer, "save" + ) as mock_save, mock.patch.object( + self.m_r._model_ops, "create_from_stage" + ) as mock_create_from_stage: + mv = self.m_r.log_model( + model=m_model, + model_name="MODEL", + version_name="V1", + pip_requirements=m_pip_requirements, + signatures=m_signatures, + options=m_options, + ) + mock_prepare_model_stage_path.assert_called_once_with( + statement_params=mock.ANY, + ) + mock_save.assert_called_once_with( + name="MODEL", + model=m_model, + signatures=m_signatures, + sample_input=None, + conda_dependencies=None, + pip_requirements=m_pip_requirements, + python_version=None, + code_paths=None, + ext_modules=None, + options=m_options, + ) + mock_create_from_stage.assert_called_once_with( + composed_model=mock.ANY, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + self.assertEqual( + mv, + model_version_impl.ModelVersion._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + ), + ) + + def test_log_model_3(self) -> None: + m_model = mock.MagicMock() + m_python_version = mock.MagicMock() + m_code_paths = mock.MagicMock() + m_ext_modules = mock.MagicMock() + m_stage_path = "@TEMP.TEST.MODEL/V1" + with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=False), mock.patch.object( + self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path + ) as mock_prepare_model_stage_path, mock.patch.object( + model_composer.ModelComposer, "save" + ) as mock_save, mock.patch.object( + self.m_r._model_ops, "create_from_stage" + ) as mock_create_from_stage: + mv = self.m_r.log_model( + model=m_model, + model_name="MODEL", + version_name="V1", + python_version=m_python_version, + code_paths=m_code_paths, + ext_modules=m_ext_modules, + ) + mock_prepare_model_stage_path.assert_called_once_with( + statement_params=mock.ANY, + ) + mock_save.assert_called_once_with( + name="MODEL", + model=m_model, + signatures=None, + sample_input=None, + conda_dependencies=None, + pip_requirements=None, + python_version=m_python_version, + code_paths=m_code_paths, + ext_modules=m_ext_modules, + options=None, + ) + mock_create_from_stage.assert_called_once_with( + composed_model=mock.ANY, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + self.assertEqual( + mv, + model_version_impl.ModelVersion._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + ), + ) + + def test_log_model_4(self) -> None: + m_model = mock.MagicMock() + m_stage_path = "@TEMP.TEST.MODEL/V1" + with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=False), mock.patch.object( + self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path + ) as mock_prepare_model_stage_path, mock.patch.object( + model_composer.ModelComposer, "save" + ) as mock_save, mock.patch.object( + self.m_r._model_ops, "create_from_stage" + ) as mock_create_from_stage, mock.patch.object( + self.m_r._model_ops, "set_comment" + ) as mock_set_comment, mock.patch.object( + self.m_r._model_ops._metadata_ops, "save" + ) as mock_metadata_save: + mv = self.m_r.log_model( + model=m_model, model_name="MODEL", version_name="V1", comment="this is comment", metrics={"a": 1} + ) + mock_prepare_model_stage_path.assert_called_once_with( + statement_params=mock.ANY, + ) + mock_save.assert_called_once_with( + name="MODEL", + model=m_model, + signatures=None, + sample_input=None, + conda_dependencies=None, + pip_requirements=None, + python_version=None, + code_paths=None, + ext_modules=None, + options=None, + ) + mock_create_from_stage.assert_called_once_with( + composed_model=mock.ANY, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + self.assertEqual( + mv, + model_version_impl.ModelVersion._ref( + self.m_r._model_ops, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + ), + ) + mock_set_comment.assert_called_once_with( + comment="this is comment", + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + mock_metadata_save.assert_called_once_with( + {"metrics": {"a": 1}}, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + + def test_log_model_5(self) -> None: + m_model = mock.MagicMock() + with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=True) as mock_validate_existence: + with self.assertRaisesRegex(ValueError, "Model MODEL version V1 already existed."): + self.m_r.log_model(model=m_model, model_name="MODEL", version_name="V1") + mock_validate_existence.assert_has_calls( + [ + mock.call( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ), + mock.call( + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ), + ] + ) + + def test_delete_model(self) -> None: + with mock.patch.object(self.m_r._model_ops, "delete_model_or_version") as mock_delete_model_or_version: + self.m_r.delete_model( + model_name="MODEL", + ) + mock_delete_model_or_version.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/registry/model_registry.py b/snowflake/ml/registry/model_registry.py index 0146e52c..ae8a7a18 100644 --- a/snowflake/ml/registry/model_registry.py +++ b/snowflake/ml/registry/model_registry.py @@ -3,6 +3,7 @@ import sys import textwrap import types +import warnings from typing import ( TYPE_CHECKING, Any, @@ -305,6 +306,17 @@ def __init__( schema_name: Desired name of the schema used by this model registry inside the database. create_if_not_exists: create model registry if it's not exists already. """ + + warnings.warn( + """ +The `snowflake.ml.registry.model_registry.ModelRegistry` has been deprecated starting from version 1.2.0. +It will stay in the Private Preview phase. For future implementations, kindly utilize `snowflake.ml.registry.Registry`, +except when specifically required. The old model registry will be removed once all its primary functionalities are +fully integrated into the new registry. + """, + DeprecationWarning, + stacklevel=2, + ) if create_if_not_exists: create_model_registry(session=session, database_name=database_name, schema_name=schema_name) diff --git a/snowflake/ml/registry/notebooks/Using MODEL via Registry in Snowflake.ipynb b/snowflake/ml/registry/notebooks/Using MODEL via Registry in Snowflake.ipynb index 895cf8e9..9d2e5757 100644 --- a/snowflake/ml/registry/notebooks/Using MODEL via Registry in Snowflake.ipynb +++ b/snowflake/ml/registry/notebooks/Using MODEL via Registry in Snowflake.ipynb @@ -231,7 +231,7 @@ "metadata": {}, "outputs": [], "source": [ - "remote_prediction = mv.run(test_features, method_name=\"predict\")" + "remote_prediction = mv.run(test_features, function_name=\"predict\")" ] }, { @@ -260,7 +260,7 @@ "metadata": {}, "outputs": [], "source": [ - "mv.list_methods()" + "mv.show_functions()" ] }, { @@ -269,7 +269,7 @@ "metadata": {}, "outputs": [], "source": [ - "remote_prediction_proba = mv.run(test_features, method_name=\"predict_proba\")" + "remote_prediction_proba = mv.run(test_features, function_name=\"predict_proba\")" ] }, { @@ -321,7 +321,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### List models and versions\n" + "### Show and List models and versions\n" ] }, { @@ -330,7 +330,7 @@ "metadata": {}, "outputs": [], "source": [ - "reg.list_models()" + "reg.show_models()" ] }, { @@ -339,7 +339,25 @@ "metadata": {}, "outputs": [], "source": [ - "m.list_versions()" + "reg.models()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.show_versions()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m.versions()" ] }, { @@ -444,7 +462,7 @@ "metadata": {}, "outputs": [], "source": [ - "mv.list_metrics()" + "mv.show_metrics()" ] }, { @@ -501,7 +519,7 @@ "metadata": {}, "outputs": [], "source": [ - "reg.list_models()" + "reg.show_models()" ] }, { @@ -637,7 +655,7 @@ "metadata": {}, "outputs": [], "source": [ - "mv.run(kddcup99_sp_df_test, method_name=\"predict\").show()" + "mv.run(kddcup99_sp_df_test, function_name=\"predict\").show()" ] }, { diff --git a/snowflake/ml/registry/package_visibility_test.py b/snowflake/ml/registry/package_visibility_test.py new file mode 100644 index 00000000..8dd064c9 --- /dev/null +++ b/snowflake/ml/registry/package_visibility_test.py @@ -0,0 +1,21 @@ +from types import ModuleType + +from absl.testing import absltest + +from snowflake.ml import registry +from snowflake.ml.registry import artifact, model_registry + + +class PackageVisibilityTest(absltest.TestCase): + """Ensure that the functions in this package are visible externally.""" + + def test_class_visible(self) -> None: + self.assertIsInstance(registry.Registry, type) + + def test_module_visible(self) -> None: + self.assertIsInstance(model_registry, ModuleType) + self.assertIsInstance(artifact, ModuleType) + + +if __name__ == "__main__": + absltest.main() diff --git a/snowflake/ml/registry/registry.py b/snowflake/ml/registry/registry.py index f8031fac..2132d549 100644 --- a/snowflake/ml/registry/registry.py +++ b/snowflake/ml/registry/registry.py @@ -1,12 +1,17 @@ from types import ModuleType -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional + +import pandas as pd from snowflake.ml._internal import telemetry from snowflake.ml._internal.utils import sql_identifier -from snowflake.ml.model import model_signature, type_hints as model_types -from snowflake.ml.model._client.model import model_impl, model_version_impl -from snowflake.ml.model._client.ops import model_ops -from snowflake.ml.model._model_composer import model_composer +from snowflake.ml.model import ( + Model, + ModelVersion, + model_signature, + type_hints as model_types, +) +from snowflake.ml.registry._manager import model_manager from snowflake.snowpark import session _TELEMETRY_PROJECT = "MLOps" @@ -21,6 +26,18 @@ def __init__( database_name: Optional[str] = None, schema_name: Optional[str] = None, ) -> None: + """Opens a registry within a pre-created Snowflake schema. + + Args: + session: The Snowpark Session to connect with Snowflake. + database_name: The name of the database. If None, the current database of the session + will be used. Defaults to None. + schema_name: The name of the schema. If None, the current schema of the session + will be used. If there is no active schema, the PUBLIC schema will be used. Defaults to None. + + Raises: + ValueError: When there is no specified or active database in the session. + """ if database_name: self._database_name = sql_identifier.SqlIdentifier(database_name) else: @@ -42,12 +59,13 @@ def __init__( else sql_identifier.SqlIdentifier("PUBLIC") ) - self._model_ops = model_ops.ModelOperator( + self._model_manager = model_manager.ModelManager( session, database_name=self._database_name, schema_name=self._schema_name ) @property def location(self) -> str: + """Get the location (database.schema) of the registry.""" return ".".join([self._database_name.identifier(), self._schema_name.identifier()]) @telemetry.send_api_usage_telemetry( @@ -60,6 +78,8 @@ def log_model( *, model_name: str, version_name: str, + comment: Optional[str] = None, + metrics: Optional[Dict[str, Any]] = None, conda_dependencies: Optional[List[str]] = None, pip_requirements: Optional[List[str]] = None, python_version: Optional[str] = None, @@ -68,148 +88,136 @@ def log_model( code_paths: Optional[List[str]] = None, ext_modules: Optional[List[ModuleType]] = None, options: Optional[model_types.ModelSaveOption] = None, - ) -> model_version_impl.ModelVersion: - """Log a model. + ) -> ModelVersion: + """ + Log a model with various parameters and metadata. Args: - model: Model Python object - model_name: A string as name. - version_name: A string as version. model_name and version_name combination must be unique. - signatures: Model data signatures for inputs and output for every target methods. If it is None, + model: Model object of supported types such as Scikit-learn, XGBoost, Snowpark ML, + PyTorch, TorchScript, Tensorflow, Tensorflow Keras, MLFlow, HuggingFace Pipeline, + Peft-finetuned LLM, or Custom Model. + model_name: Name to identify the model. + version_name: Version identifier for the model. Combination of model_name and version_name must be unique. + comment: Comment associated with the model version. Defaults to None. + metrics: A JSON serializable dictionary containing metrics linked to the model version. Defaults to None. + signatures: Model data signatures for inputs and outputs for various target methods. If it is None, sample_input_data would be used to infer the signatures for those models that cannot automatically - infer the signature. If not None, sample_input should not be specified. Defaults to None. - sample_input_data: Sample input data to infer the model signatures from. If it is None, signatures must be - specified if the model cannot automatically infer the signature. If not None, signatures should not be - specified. Defaults to None. - conda_dependencies: List of Conda package specs. Use "[channel::]package [operator version]" syntax to - specify a dependency. It is a recommended way to specify your dependencies using conda. When channel is - not specified, Snowflake Anaconda Channel will be used. - pip_requirements: List of Pip package specs. - python_version: A string of python version where model is run. Used for user override. If specified as None, - current version would be captured. Defaults to None. - code_paths: Directory of code to import. - ext_modules: External modules that user might want to get pickled with model object. Defaults to None. - options: Model specific kwargs. + infer the signature. If not None, sample_input_data should not be specified. Defaults to None. + sample_input_data: Sample input data to infer model signatures from. Defaults to None. + conda_dependencies: List of Conda package specifications. Use "[channel::]package [operator version]" syntax + to specify a dependency. It is a recommended way to specify your dependencies using conda. When channel + is not specified, Snowflake Anaconda Channel will be used. Defaults to None. + pip_requirements: List of Pip package specifications. Defaults to None. + python_version: Python version in which the model is run. Defaults to None. + code_paths: List of directories containing code to import. Defaults to None. + ext_modules: List of external modules to pickle with the model object. + Only supported when logging the following types of model: + Scikit-learn, Snowpark ML, PyTorch, TorchScript and Custom Model. Defaults to None. + options (Dict[str, Any], optional): Additional model saving options. + + Model Saving Options include: + + - embed_local_ml_library: Embed local Snowpark ML into the code directory or folder. + Override to True if the local Snowpark ML version is not available in the Snowflake Anaconda + Channel. Otherwise, defaults to False + - method_options: Per-method saving options including: + - case_sensitive: Indicates whether the method and its signature should be case sensitive. + This means when you refer the method in the SQL, you need to double quote it. + This will be helpful if you need case to tell apart your methods or features, or you have + non-alphabetic characters in your method or feature name. Defaults to False. + - max_batch_size: Maximum batch size that the method could accept in the Snowflake Warehouse. + Defaults to None, determined automatically by Snowflake. Returns: - A ModelVersion object corresponding to the model just get logged. + ModelVersion: ModelVersion object corresponding to the model just logged. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - model_name_id = sql_identifier.SqlIdentifier(model_name) - - version_name_id = sql_identifier.SqlIdentifier(version_name) - - stage_path = self._model_ops.prepare_model_stage_path( - statement_params=statement_params, - ) - - mc = model_composer.ModelComposer(self._model_ops._session, stage_path=stage_path) - mc.save( - name=model_name_id.resolved(), + return self._model_manager.log_model( model=model, - signatures=signatures, - sample_input=sample_input_data, + model_name=model_name, + version_name=version_name, + comment=comment, + metrics=metrics, conda_dependencies=conda_dependencies, pip_requirements=pip_requirements, python_version=python_version, + signatures=signatures, + sample_input_data=sample_input_data, code_paths=code_paths, ext_modules=ext_modules, options=options, - ) - self._model_ops.create_from_stage( - composed_model=mc, - model_name=model_name_id, - version_name=version_name_id, statement_params=statement_params, ) - return model_version_impl.ModelVersion._ref( - self._model_ops, - model_name=model_name_id, - version_name=version_name_id, - ) - @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - def get_model(self, model_name: str) -> model_impl.Model: - """Get the model object. + def get_model(self, model_name: str) -> Model: + """Get the model object by its name. Args: - model_name: The model name. - - Raises: - ValueError: Raised when the model requested does not exist. + model_name: The name of the model. Returns: - The model object. + The corresponding model object. """ - model_name_id = sql_identifier.SqlIdentifier(model_name) - statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - if self._model_ops.validate_existence( - model_name=model_name_id, - statement_params=statement_params, - ): - return model_impl.Model._ref( - self._model_ops, - model_name=model_name_id, - ) - else: - raise ValueError(f"Unable to find model {model_name}") + return self._model_manager.get_model(model_name=model_name, statement_params=statement_params) @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - def list_models(self) -> List[model_impl.Model]: - """List all models in the schema where the registry is opened. + def models(self) -> List[Model]: + """Get all models in the schema where the registry is opened. Returns: - A List of Model= object representing all models in the schema where the registry is opened. + A list of Model objects representing all models in the opened registry. """ statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - model_names = self._model_ops.list_models_or_versions( - statement_params=statement_params, + return self._model_manager.models(statement_params=statement_params) + + @telemetry.send_api_usage_telemetry( + project=_TELEMETRY_PROJECT, + subproject=_MODEL_TELEMETRY_SUBPROJECT, + ) + def show_models(self) -> pd.DataFrame: + """Show information of all models in the schema where the registry is opened. + + Returns: + A Pandas DataFrame containing information of all models in the schema. + """ + statement_params = telemetry.get_statement_params( + project=_TELEMETRY_PROJECT, + subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - return [ - model_impl.Model._ref( - self._model_ops, - model_name=model_name, - ) - for model_name in model_names - ] + return self._model_manager.show_models(statement_params=statement_params) @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) def delete_model(self, model_name: str) -> None: - """Delete the model. + """ + Delete the model by its name. Args: - model_name: The model name, can be fully qualified one. - If not, use database name and schema name of the registry. + model_name: The name of the model to be deleted. """ - model_name_id = sql_identifier.SqlIdentifier(model_name) - statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, ) - self._model_ops.delete_model_or_version( - model_name=model_name_id, - statement_params=statement_params, - ) + self._model_manager.delete_model(model_name=model_name, statement_params=statement_params) diff --git a/snowflake/ml/registry/registry_test.py b/snowflake/ml/registry/registry_test.py index b59e27f9..ef42d1ed 100644 --- a/snowflake/ml/registry/registry_test.py +++ b/snowflake/ml/registry/registry_test.py @@ -3,9 +3,6 @@ from absl.testing import absltest -from snowflake.ml._internal.utils import sql_identifier -from snowflake.ml.model._client.model import model_impl, model_version_impl -from snowflake.ml.model._model_composer import model_composer from snowflake.ml.registry import registry from snowflake.ml.test_utils import mock_session from snowflake.snowpark import Session @@ -85,211 +82,86 @@ def setUp(self) -> None: self.c_session = cast(Session, self.m_session) self.m_r = registry.Registry(self.c_session, database_name="TEMP", schema_name="TEST") - def test_get_model_1(self) -> None: - m_model = model_impl.Model._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - ) - with mock.patch.object(self.m_r._model_ops, "validate_existence", return_value=True) as mock_validate_existence: - m = self.m_r.get_model("MODEL") - self.assertEqual(m, m_model) - mock_validate_existence.assert_called_once_with( - model_name=sql_identifier.SqlIdentifier("MODEL"), + def test_get_model(self) -> None: + with mock.patch.object(self.m_r._model_manager, "get_model", return_value=True) as mock_get_model: + self.m_r.get_model("MODEL") + mock_get_model.assert_called_once_with( + model_name="MODEL", statement_params=mock.ANY, ) - def test_get_model_2(self) -> None: + def test_models(self) -> None: with mock.patch.object( - self.m_r._model_ops, "validate_existence", return_value=False - ) as mock_validate_existence: - with self.assertRaisesRegex(ValueError, "Unable to find model MODEL"): - self.m_r.get_model("MODEL") - mock_validate_existence.assert_called_once_with( - model_name=sql_identifier.SqlIdentifier("MODEL"), + self.m_r._model_manager, + "models", + ) as mock_show_models: + self.m_r.models() + mock_show_models.assert_called_once_with( statement_params=mock.ANY, ) - def test_list_models(self) -> None: - m_model_1 = model_impl.Model._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - ) - m_model_2 = model_impl.Model._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("Model", case_sensitive=True), - ) + def test_show_models(self) -> None: with mock.patch.object( - self.m_r._model_ops, - "list_models_or_versions", - return_value=[ - sql_identifier.SqlIdentifier("MODEL"), - sql_identifier.SqlIdentifier("Model", case_sensitive=True), - ], - ) as mock_list_models_or_versions: - m_list = self.m_r.list_models() - self.assertListEqual(m_list, [m_model_1, m_model_2]) - mock_list_models_or_versions.assert_called_once_with( + self.m_r._model_manager, + "show_models", + ) as mock_show_models: + self.m_r.show_models() + mock_show_models.assert_called_once_with( statement_params=mock.ANY, ) - def test_log_model_1(self) -> None: + def test_log_model(self) -> None: m_model = mock.MagicMock() m_conda_dependency = mock.MagicMock() m_sample_input_data = mock.MagicMock() - m_stage_path = "@TEMP.TEST.MODEL/V1" - with mock.patch.object( - self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path - ) as mock_prepare_model_stage_path, mock.patch.object( - model_composer.ModelComposer, "save" - ) as mock_save, mock.patch.object( - self.m_r._model_ops, "create_from_stage" - ) as mock_create_from_stage: - mv = self.m_r.log_model( - model=m_model, - model_name="MODEL", - version_name="v1", - conda_dependencies=m_conda_dependency, - sample_input_data=m_sample_input_data, - ) - mock_prepare_model_stage_path.assert_called_once_with( - statement_params=mock.ANY, - ) - mock_save.assert_called_once_with( - name="MODEL", - model=m_model, - signatures=None, - sample_input=m_sample_input_data, - conda_dependencies=m_conda_dependency, - pip_requirements=None, - python_version=None, - code_paths=None, - ext_modules=None, - options=None, - ) - mock_create_from_stage.assert_called_once_with( - composed_model=mock.ANY, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("v1"), - statement_params=mock.ANY, - ) - self.assertEqual( - mv, - model_version_impl.ModelVersion._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("v1"), - ), - ) - - def test_log_model_2(self) -> None: - m_model = mock.MagicMock() m_pip_requirements = mock.MagicMock() m_signatures = mock.MagicMock() m_options = mock.MagicMock() - m_stage_path = "@TEMP.TEST.MODEL/V1" - with mock.patch.object( - self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path - ) as mock_prepare_model_stage_path, mock.patch.object( - model_composer.ModelComposer, "save" - ) as mock_save, mock.patch.object( - self.m_r._model_ops, "create_from_stage" - ) as mock_create_from_stage: - mv = self.m_r.log_model( - model=m_model, - model_name="MODEL", - version_name="V1", - pip_requirements=m_pip_requirements, - signatures=m_signatures, - options=m_options, - ) - mock_prepare_model_stage_path.assert_called_once_with( - statement_params=mock.ANY, - ) - mock_save.assert_called_once_with( - name="MODEL", - model=m_model, - signatures=m_signatures, - sample_input=None, - conda_dependencies=None, - pip_requirements=m_pip_requirements, - python_version=None, - code_paths=None, - ext_modules=None, - options=m_options, - ) - mock_create_from_stage.assert_called_once_with( - composed_model=mock.ANY, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("V1"), - statement_params=mock.ANY, - ) - self.assertEqual( - mv, - model_version_impl.ModelVersion._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("V1"), - ), - ) - - def test_log_model_3(self) -> None: - m_model = mock.MagicMock() m_python_version = mock.MagicMock() m_code_paths = mock.MagicMock() m_ext_modules = mock.MagicMock() - m_stage_path = "@TEMP.TEST.MODEL/V1" - with mock.patch.object( - self.m_r._model_ops, "prepare_model_stage_path", return_value=m_stage_path - ) as mock_prepare_model_stage_path, mock.patch.object( - model_composer.ModelComposer, "save" - ) as mock_save, mock.patch.object( - self.m_r._model_ops, "create_from_stage" - ) as mock_create_from_stage: - mv = self.m_r.log_model( + m_comment = mock.MagicMock() + m_metrics = mock.MagicMock() + with mock.patch.object(self.m_r._model_manager, "log_model") as mock_log_model: + self.m_r.log_model( model=m_model, model_name="MODEL", - version_name="V1", + version_name="v1", + comment=m_comment, + metrics=m_metrics, + conda_dependencies=m_conda_dependency, + pip_requirements=m_pip_requirements, python_version=m_python_version, + signatures=m_signatures, + sample_input_data=m_sample_input_data, code_paths=m_code_paths, ext_modules=m_ext_modules, + options=m_options, ) - mock_prepare_model_stage_path.assert_called_once_with( - statement_params=mock.ANY, - ) - mock_save.assert_called_once_with( - name="MODEL", + mock_log_model.assert_called_once_with( model=m_model, - signatures=None, - sample_input=None, - conda_dependencies=None, - pip_requirements=None, + model_name="MODEL", + version_name="v1", + comment=m_comment, + metrics=m_metrics, + conda_dependencies=m_conda_dependency, + pip_requirements=m_pip_requirements, python_version=m_python_version, + signatures=m_signatures, + sample_input_data=m_sample_input_data, code_paths=m_code_paths, ext_modules=m_ext_modules, - options=None, - ) - mock_create_from_stage.assert_called_once_with( - composed_model=mock.ANY, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("V1"), + options=m_options, statement_params=mock.ANY, ) - self.assertEqual( - mv, - model_version_impl.ModelVersion._ref( - self.m_r._model_ops, - model_name=sql_identifier.SqlIdentifier("MODEL"), - version_name=sql_identifier.SqlIdentifier("V1"), - ), - ) def test_delete_model(self) -> None: - with mock.patch.object(self.m_r._model_ops, "delete_model_or_version") as mock_delete_model_or_version: + with mock.patch.object(self.m_r._model_manager, "delete_model") as mock_delete_model: self.m_r.delete_model( model_name="MODEL", ) - mock_delete_model_or_version.assert_called_once_with( - model_name=sql_identifier.SqlIdentifier("MODEL"), + mock_delete_model.assert_called_once_with( + model_name="MODEL", statement_params=mock.ANY, ) diff --git a/snowflake/ml/version.bzl b/snowflake/ml/version.bzl index d6dfd85a..793bbe93 100644 --- a/snowflake/ml/version.bzl +++ b/snowflake/ml/version.bzl @@ -1,2 +1,2 @@ # This is parsed by regex in conda reciper meta file. Make sure not to break it. -VERSION = "1.1.2" +VERSION = "1.2.0" diff --git a/tests/integ/snowflake/ml/_internal/env_utils_integ_test.py b/tests/integ/snowflake/ml/_internal/env_utils_integ_test.py index 35813d87..487a8576 100644 --- a/tests/integ/snowflake/ml/_internal/env_utils_integ_test.py +++ b/tests/integ/snowflake/ml/_internal/env_utils_integ_test.py @@ -14,33 +14,32 @@ def tearDown(self) -> None: self._session.close() def test_validate_requirement_in_snowflake_conda_channel(self) -> None: - res = env_utils.validate_requirements_in_information_schema( + res = env_utils.get_matched_package_versions_in_information_schema( session=self._session, reqs=[requirements.Requirement("xgboost")], python_version=snowml_env.PYTHON_VERSION ) - self.assertNotEmpty(res) + self.assertNotEmpty(res["xgboost"]) - res = env_utils.validate_requirements_in_information_schema( + res = env_utils.get_matched_package_versions_in_information_schema( session=self._session, reqs=[requirements.Requirement("xgboost"), requirements.Requirement("pytorch")], python_version=snowml_env.PYTHON_VERSION, ) - self.assertNotEmpty(res) - - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( - session=self._session, - reqs=[requirements.Requirement("xgboost==1.0.*")], - python_version=snowml_env.PYTHON_VERSION, - ) + self.assertNotEmpty(res["xgboost"]) + self.assertNotEmpty(res["pytorch"]) + + res = env_utils.get_matched_package_versions_in_information_schema( + session=self._session, + reqs=[requirements.Requirement("xgboost==1.0.*")], + python_version=snowml_env.PYTHON_VERSION, ) + self.assertEmpty(res["xgboost"]) - self.assertIsNone( - env_utils.validate_requirements_in_information_schema( - session=self._session, - reqs=[requirements.Requirement("python-package")], - python_version=snowml_env.PYTHON_VERSION, - ) + res = env_utils.get_matched_package_versions_in_information_schema( + session=self._session, + reqs=[requirements.Requirement("python-package")], + python_version=snowml_env.PYTHON_VERSION, ) + self.assertNotIn("python-package", res) if __name__ == "__main__": diff --git a/tests/integ/snowflake/ml/extra_tests/BUILD.bazel b/tests/integ/snowflake/ml/extra_tests/BUILD.bazel index 9dfdf6d3..d34395ef 100644 --- a/tests/integ/snowflake/ml/extra_tests/BUILD.bazel +++ b/tests/integ/snowflake/ml/extra_tests/BUILD.bazel @@ -138,6 +138,18 @@ py_test( ], ) +py_test( + name = "fit_transform_test", + srcs = ["fit_transform_test.py"], + shard_count = 3, + deps = [ + "//snowflake/ml/modeling/manifold:mds", + "//snowflake/ml/modeling/manifold:spectral_embedding", + "//snowflake/ml/modeling/manifold:tsne", + "//snowflake/ml/utils:connection_params", + ], +) + py_test( name = "decimal_type_test", srcs = ["decimal_type_test.py"], @@ -146,3 +158,13 @@ py_test( "//snowflake/ml/utils:connection_params", ], ) + +py_test( + name = "xgboost_external_memory_training_test", + srcs = ["xgboost_external_memory_training_test.py"], + deps = [ + "//snowflake/ml/modeling/metrics:classification", + "//snowflake/ml/modeling/xgboost:xgb_classifier", + "//snowflake/ml/utils:connection_params", + ], +) diff --git a/tests/integ/snowflake/ml/extra_tests/fit_transform_test.py b/tests/integ/snowflake/ml/extra_tests/fit_transform_test.py new file mode 100644 index 00000000..d29a611c --- /dev/null +++ b/tests/integ/snowflake/ml/extra_tests/fit_transform_test.py @@ -0,0 +1,73 @@ +import numpy as np +import pandas as pd +from absl.testing.absltest import TestCase, main +from sklearn.datasets import load_digits +from sklearn.manifold import ( + MDS as SKMDS, + TSNE as SKTSNE, + SpectralEmbedding as SKSpectralEmbedding, +) + +from snowflake.ml.modeling.manifold import MDS, TSNE, SpectralEmbedding +from snowflake.ml.utils.connection_params import SnowflakeLoginOptions +from snowflake.snowpark import Session + + +class FitTransformTest(TestCase): + def _load_data(self): + X, _ = load_digits(return_X_y=True) + self._input_df_pandas = pd.DataFrame(X)[:100] + self._input_df_pandas.columns = [str(c) for c in self._input_df_pandas.columns] + self._input_df = self._session.create_dataframe(self._input_df_pandas) + self._input_cols = self._input_df.columns + self._output_cols = [str(c) for c in range(100)] + + def setUp(self): + """Creates Snowpark and Snowflake environments for testing.""" + self._session = Session.builder.configs(SnowflakeLoginOptions()).create() + self._load_data() + + def tearDown(self): + self._session.close() + + def testMDS(self): + sk_embedding = SKMDS(n_components=2, normalized_stress="auto", random_state=2024) + + embedding = MDS( + input_cols=self._input_cols, + output_cols=self._output_cols, + n_components=2, + normalized_stress="auto", + random_state=2024, + ) + sk_X_transformed = sk_embedding.fit_transform(self._input_df_pandas) + X_transformed = embedding.fit_transform(self._input_df) + np.testing.assert_allclose(sk_X_transformed, X_transformed, rtol=1.0e-1, atol=1.0e-2) + + def testSpectralEmbedding(self): + sk_embedding = SKSpectralEmbedding(n_components=2, random_state=2024) + sk_X_transformed = sk_embedding.fit_transform(self._input_df_pandas) + + embedding = SpectralEmbedding( + input_cols=self._input_cols, output_cols=self._output_cols, n_components=2, random_state=2024 + ) + X_transformed = embedding.fit_transform(self._input_df) + np.testing.assert_allclose(sk_X_transformed, X_transformed, rtol=1.0e-1, atol=1.0e-2) + + def testTSNE(self): + sk_embedding = SKTSNE(n_components=2, random_state=2024, n_jobs=1) + sk_X_transformed = sk_embedding.fit_transform(self._input_df_pandas) + + embedding = TSNE( + input_cols=self._input_cols, + output_cols=self._output_cols, + n_components=2, + random_state=2024, + n_jobs=1, + ) + X_transformed = embedding.fit_transform(self._input_df) + np.testing.assert_allclose(sk_X_transformed.shape, X_transformed.shape, rtol=1.0e-1, atol=1.0e-2) + + +if __name__ == "__main__": + main() diff --git a/tests/integ/snowflake/ml/extra_tests/xgboost_external_memory_training_test.py b/tests/integ/snowflake/ml/extra_tests/xgboost_external_memory_training_test.py new file mode 100644 index 00000000..1ab9100d --- /dev/null +++ b/tests/integ/snowflake/ml/extra_tests/xgboost_external_memory_training_test.py @@ -0,0 +1,81 @@ +import numpy as np +from absl.testing.absltest import TestCase, main +from sklearn.metrics import accuracy_score as sk_accuracy_score +from xgboost import XGBClassifier as NativeXGBClassifier + +from snowflake.ml.modeling.xgboost import XGBClassifier +from snowflake.ml.utils.connection_params import SnowflakeLoginOptions +from snowflake.snowpark import Session, functions as F + +categorical_columns = [ + "AGE", + "CAMPAIGN", + "CONTACT", + "DAY_OF_WEEK", + "EDUCATION", + "HOUSING", + "JOB", + "LOAN", + "MARITAL", + "MONTH", + "POUTCOME", + "DEFAULT", +] +numerical_columns = [ + "CONS_CONF_IDX", + "CONS_PRICE_IDX", + "DURATION", + "EMP_VAR_RATE", + "EURIBOR3M", + "NR_EMPLOYED", + "PDAYS", + "PREVIOUS", +] +label_column = ["LABEL"] +feature_cols = categorical_columns + numerical_columns + ["ROW_INDEX"] + + +class XGBoostExternalMemoryTrainingTest(TestCase): + def setUp(self): + """Creates Snowpark and Snowflake environments for testing.""" + self._session = Session.builder.configs(SnowflakeLoginOptions()).create() + + def tearDown(self): + self._session.close() + + def test_fit_and_compare_results(self) -> None: + input_df = ( + self._session.sql( + """SELECT *, IFF(Y = 'yes', 1.0, 0.0) as LABEL + FROM ML_DATASETS.PUBLIC.UCI_BANK_MARKETING_20COLUMNS""" + ) + .drop("Y") + .withColumn("ROW_INDEX", F.monotonically_increasing_id()) + ) + pd_df = input_df.to_pandas().sort_values(by=["ROW_INDEX"])[numerical_columns + ["ROW_INDEX", "LABEL"]] + sp_df = self._session.create_dataframe(pd_df) + + sk_reg = NativeXGBClassifier(random_state=0) + sk_reg.fit(pd_df[numerical_columns], pd_df["LABEL"]) + sk_result = sk_reg.predict(pd_df[numerical_columns]) + + sk_accuracy = sk_accuracy_score(pd_df["LABEL"], sk_result) + + reg = XGBClassifier( + random_state=0, + input_cols=numerical_columns, + label_cols=label_column, + use_external_memory_version=True, + batch_size=10000, + ) + reg.fit(sp_df) + result = reg.predict(sp_df) + + result_pd = result.to_pandas().sort_values(by="ROW_INDEX")[["LABEL", "OUTPUT_LABEL"]] + accuracy = sk_accuracy_score(result_pd["LABEL"], result_pd["OUTPUT_LABEL"]) + + np.testing.assert_allclose(sk_accuracy, accuracy, rtol=0.01, atol=0.01) + + +if __name__ == "__main__": + main() diff --git a/tests/integ/snowflake/ml/image_builds/BUILD.bazel b/tests/integ/snowflake/ml/image_builds/BUILD.bazel index bb31ad2e..8e3e15f9 100644 --- a/tests/integ/snowflake/ml/image_builds/BUILD.bazel +++ b/tests/integ/snowflake/ml/image_builds/BUILD.bazel @@ -5,9 +5,9 @@ py_test( timeout = "long", srcs = ["image_registry_client_integ_test.py"], deps = [ + "//snowflake/ml/_internal/container_services/image_registry:registry_client", "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/_internal/utils:query_result_checker", - "//snowflake/ml/model/_deploy_client/utils:image_registry_client", "//snowflake/ml/model/_deploy_client/utils:snowservice_client", "//tests/integ/snowflake/ml/test_utils:spcs_integ_test_base", ], diff --git a/tests/integ/snowflake/ml/image_builds/image_registry_client_integ_test.py b/tests/integ/snowflake/ml/image_builds/image_registry_client_integ_test.py index af70f7a7..e502c894 100644 --- a/tests/integ/snowflake/ml/image_builds/image_registry_client_integ_test.py +++ b/tests/integ/snowflake/ml/image_builds/image_registry_client_integ_test.py @@ -1,10 +1,10 @@ from absl.testing import absltest -from snowflake.ml._internal.utils import identifier, query_result_checker -from snowflake.ml.model._deploy_client.utils import ( - image_registry_client, - snowservice_client, +from snowflake.ml._internal.container_services.image_registry import ( + registry_client as image_registry_client, ) +from snowflake.ml._internal.utils import identifier, query_result_checker +from snowflake.ml.model._deploy_client.utils import snowservice_client from tests.integ.snowflake.ml.test_utils import spcs_integ_test_base diff --git a/tests/integ/snowflake/ml/model/_client/model/BUILD.bazel b/tests/integ/snowflake/ml/model/_client/model/BUILD.bazel index d5bfd8b7..f3410613 100644 --- a/tests/integ/snowflake/ml/model/_client/model/BUILD.bazel +++ b/tests/integ/snowflake/ml/model/_client/model/BUILD.bazel @@ -4,8 +4,9 @@ py_test( name = "model_impl_integ_test", timeout = "long", srcs = ["model_impl_integ_test.py"], - shard_count = 6, deps = [ + "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/registry", "//snowflake/ml/utils:connection_params", "//tests/integ/snowflake/ml/test_utils:db_manager", @@ -18,8 +19,8 @@ py_test( name = "model_version_impl_integ_test", timeout = "long", srcs = ["model_version_impl_integ_test.py"], - shard_count = 6, deps = [ + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model/_client/model:model_version_impl", "//snowflake/ml/registry", diff --git a/tests/integ/snowflake/ml/model/_client/model/model_impl_integ_test.py b/tests/integ/snowflake/ml/model/_client/model/model_impl_integ_test.py index 48760ac5..5256c1e0 100644 --- a/tests/integ/snowflake/ml/model/_client/model/model_impl_integ_test.py +++ b/tests/integ/snowflake/ml/model/_client/model/model_impl_integ_test.py @@ -4,6 +4,7 @@ from absl.testing import absltest, parameterized from packaging import version +from snowflake.ml._internal.utils import identifier, snowflake_env from snowflake.ml.registry import registry from snowflake.ml.utils import connection_params from snowflake.snowpark import Session @@ -18,6 +19,14 @@ VERSION_NAME2 = "V2" +@unittest.skipUnless( + test_env_utils.get_current_snowflake_version() >= version.parse("8.0.0"), + "New model only available when the Snowflake Version is newer than 8.0.0", +) +@unittest.skipUnless( + test_env_utils.get_current_snowflake_cloud_type() == snowflake_env.SnowflakeCloudType.AWS, + "New model only available in AWS", +) class TestModelImplInteg(parameterized.TestCase): @classmethod def setUpClass(self) -> None: @@ -37,11 +46,6 @@ def setUpClass(self) -> None: } ).create() - current_sf_version = test_env_utils.get_current_snowflake_version(self._session) - - if current_sf_version < version.parse("8.0.0"): - raise unittest.SkipTest("This test requires Snowflake Version 8.0.0 or higher.") - self._db_manager = db_manager.DBManager(self._session) self._db_manager.create_database(self._test_db) self._db_manager.create_schema(self._test_schema) @@ -63,11 +67,21 @@ def setUpClass(self) -> None: ) self._model = self.registry.get_model(model_name=MODEL_NAME) + self._tag_name1 = "MYTAG" + self._tag_name2 = '"live_version"' + + self._session.sql(f"CREATE TAG {self._tag_name1}").collect() + self._session.sql(f"CREATE TAG {self._tag_name2}").collect() + @classmethod def tearDownClass(self) -> None: self._db_manager.drop_database(self._test_db) self._session.close() + def test_versions(self) -> None: + self.assertEqual(self._model.versions(), [self._mv, self._mv2]) + self.assertLen(self._model.show_versions(), 2) + def test_description(self) -> None: description = "test description" self._model.description = description @@ -79,6 +93,41 @@ def test_default(self) -> None: self._model.default = VERSION_NAME2 self.assertEqual(self._model.default.version_name, VERSION_NAME2) + @unittest.skipUnless( + test_env_utils.get_current_snowflake_version() >= version.parse("8.2.0"), + "TAG on model only available when the Snowflake Version is newer than 8.2.0", + ) + def test_tag(self) -> None: + fq_tag_name1 = identifier.get_schema_level_object_identifier(self._test_db, self._test_schema, self._tag_name1) + fq_tag_name2 = identifier.get_schema_level_object_identifier(self._test_db, self._test_schema, self._tag_name2) + self.assertDictEqual({}, self._model.show_tags()) + self.assertIsNone(self._model.get_tag(self._tag_name1)) + self._model.set_tag(self._tag_name1, "val1") + self.assertEqual( + "val1", + self._model.get_tag(fq_tag_name1), + ) + self.assertDictEqual( + {fq_tag_name1: "val1"}, + self._model.show_tags(), + ) + self._model.set_tag(fq_tag_name2, "v2") + self.assertEqual("v2", self._model.get_tag(self._tag_name2)) + self.assertDictEqual( + { + fq_tag_name1: "val1", + fq_tag_name2: "v2", + }, + self._model.show_tags(), + ) + self._model.unset_tag(fq_tag_name2) + self.assertDictEqual( + {fq_tag_name1: "val1"}, + self._model.show_tags(), + ) + self._model.unset_tag(self._tag_name1) + self.assertDictEqual({}, self._model.show_tags()) + if __name__ == "__main__": absltest.main() diff --git a/tests/integ/snowflake/ml/model/_client/model/model_version_impl_integ_test.py b/tests/integ/snowflake/ml/model/_client/model/model_version_impl_integ_test.py index ca7b367d..0ac15c80 100644 --- a/tests/integ/snowflake/ml/model/_client/model/model_version_impl_integ_test.py +++ b/tests/integ/snowflake/ml/model/_client/model/model_version_impl_integ_test.py @@ -4,6 +4,7 @@ from absl.testing import absltest, parameterized from packaging import version +from snowflake.ml._internal.utils import snowflake_env from snowflake.ml.registry import registry from snowflake.ml.utils import connection_params from snowflake.snowpark import Session @@ -17,6 +18,14 @@ VERSION_NAME = "V1" +@unittest.skipUnless( + test_env_utils.get_current_snowflake_version() >= version.parse("8.0.0"), + "New model only available when the Snowflake Version is newer than 8.0.0", +) +@unittest.skipUnless( + test_env_utils.get_current_snowflake_cloud_type() == snowflake_env.SnowflakeCloudType.AWS, + "New model only available in AWS", +) class TestModelVersionImplInteg(parameterized.TestCase): @classmethod def setUpClass(self) -> None: @@ -36,11 +45,6 @@ def setUpClass(self) -> None: } ).create() - current_sf_version = test_env_utils.get_current_snowflake_version(self._session) - - if current_sf_version < version.parse("8.0.0"): - raise unittest.SkipTest("This test requires Snowflake Version 8.0.0 or higher.") - self._db_manager = db_manager.DBManager(self._session) self._db_manager.create_database(self._test_db) self._db_manager.create_schema(self._test_schema) @@ -72,11 +76,11 @@ def test_metrics(self) -> None: self._mv.set_metric(k, v) self.assertEqual(self._mv.get_metric("a"), expected_metrics["a"]) - self.assertDictEqual(self._mv.list_metrics(), expected_metrics) + self.assertDictEqual(self._mv.show_metrics(), expected_metrics) expected_metrics.pop("b") self._mv.delete_metric("b") - self.assertDictEqual(self._mv.list_metrics(), expected_metrics) + self.assertDictEqual(self._mv.show_metrics(), expected_metrics) with self.assertRaises(KeyError): self._mv.get_metric("b") diff --git a/tests/integ/snowflake/ml/modeling/model_selection/BUILD.bazel b/tests/integ/snowflake/ml/modeling/model_selection/BUILD.bazel index 2d1bcbee..e80541e7 100644 --- a/tests/integ/snowflake/ml/modeling/model_selection/BUILD.bazel +++ b/tests/integ/snowflake/ml/modeling/model_selection/BUILD.bazel @@ -45,3 +45,15 @@ py_test( "//snowflake/ml/utils:connection_params", ], ) + +py_test( + name = "check_output_hpo_integ_test", + timeout = "long", + srcs = ["check_output_hpo_integ_test.py"], + shard_count = 5, + deps = [ + "//snowflake/ml/modeling/linear_model:linear_regression", + "//snowflake/ml/modeling/model_selection:grid_search_cv", + "//snowflake/ml/utils:connection_params", + ], +) diff --git a/tests/integ/snowflake/ml/modeling/model_selection/check_output_hpo_integ_test.py b/tests/integ/snowflake/ml/modeling/model_selection/check_output_hpo_integ_test.py new file mode 100644 index 00000000..95326f58 --- /dev/null +++ b/tests/integ/snowflake/ml/modeling/model_selection/check_output_hpo_integ_test.py @@ -0,0 +1,243 @@ +""" +The main purpose of this file is to use Linear Regression, +to match all kinds of input and output for GridSearchCV/RandomSearchCV. +""" + +from typing import Any, Dict, List, Tuple, Union +from unittest import mock + +import inflection +import numpy as np +import numpy.typing as npt +import pandas as pd +from absl.testing import absltest, parameterized +from sklearn.datasets import load_iris +from sklearn.linear_model import LinearRegression as SkLinearRegression +from sklearn.model_selection import GridSearchCV as SkGridSearchCV, KFold +from sklearn.model_selection._split import BaseCrossValidator + +from snowflake.ml.modeling.linear_model import ( # type: ignore[attr-defined] + LinearRegression, +) +from snowflake.ml.modeling.model_selection import ( # type: ignore[attr-defined] + GridSearchCV, +) +from snowflake.ml.utils.connection_params import SnowflakeLoginOptions +from snowflake.snowpark import Session + + +def _load_iris_data() -> Tuple[pd.DataFrame, List[str], List[str]]: + input_df_pandas = load_iris(as_frame=True).frame + input_df_pandas.columns = [inflection.parameterize(c, "_").upper() for c in input_df_pandas.columns] + input_df_pandas["INDEX"] = input_df_pandas.reset_index().index + + input_cols = [c for c in input_df_pandas.columns if not c.startswith("TARGET")] + label_col = [c for c in input_df_pandas.columns if c.startswith("TARGET")] + + return input_df_pandas, input_cols, label_col + + +class GridSearchCVTest(parameterized.TestCase): + def setUp(self) -> None: + """Creates Snowpark and Snowflake environments for testing.""" + self._session = Session.builder.configs(SnowflakeLoginOptions()).create() + + pd_data, input_col, label_col = _load_iris_data() + self._input_df_pandas = pd_data + self._input_cols = input_col + self._label_col = label_col + self._input_df = self._session.create_dataframe(self._input_df_pandas) + + def tearDown(self) -> None: + self._session.close() + + def _compare_cv_results(self, cv_result_1: Dict[str, Any], cv_result_2: Dict[str, Any]) -> None: + # compare the keys + self.assertEqual(cv_result_1.keys(), cv_result_2.keys()) + # compare the values + for k, v in cv_result_1.items(): + if isinstance(v, np.ndarray): + if k.startswith("param_"): # compare the masked array + np.ma.allequal(v, cv_result_2[k]) # type: ignore[no-untyped-call] + elif k == "params": # compare the parameter combination + self.assertEqual(v.tolist(), cv_result_2[k]) + elif k.endswith("test_score"): # compare the test score + np.testing.assert_allclose(v, cv_result_2[k], rtol=1.0e-7, atol=1.0e-7) + # Do not compare the fit time + + def _compare_global_variables(self, sk_obj: SkLinearRegression, sklearn_reg: SkLinearRegression) -> None: + # the result of SnowML grid search cv should behave the same as sklearn's + # TODO - check scorer_ + assert isinstance(sk_obj.refit_time_, float) + np.testing.assert_allclose(sk_obj.best_score_, sklearn_reg.best_score_) + self.assertEqual(sk_obj.multimetric_, sklearn_reg.multimetric_) + self.assertEqual(sk_obj.best_index_, sklearn_reg.best_index_) + if hasattr(sk_obj, "n_splits_"): # n_splits_ is only available in RandomSearchCV + self.assertEqual(sk_obj.n_splits_, sklearn_reg.n_splits_) + if hasattr(sk_obj, "best_estimator_"): + for variable_name in sk_obj.best_estimator_.__dict__.keys(): + if variable_name != "n_jobs": + if isinstance(getattr(sk_obj.best_estimator_, variable_name), np.ndarray): + if getattr(sk_obj.best_estimator_, variable_name).dtype == "object": + self.assertEqual( + getattr(sk_obj.best_estimator_, variable_name).tolist(), + getattr(sklearn_reg.best_estimator_, variable_name).tolist(), + ) + else: + np.testing.assert_allclose( + getattr(sk_obj.best_estimator_, variable_name), + getattr(sklearn_reg.best_estimator_, variable_name), + rtol=1.0e-7, + atol=1.0e-7, + ) + else: + np.testing.assert_allclose( + getattr(sk_obj.best_estimator_, variable_name), + getattr(sklearn_reg.best_estimator_, variable_name), + rtol=1.0e-7, + atol=1.0e-7, + ) + self.assertEqual(sk_obj.n_features_in_, sklearn_reg.n_features_in_) + if hasattr(sk_obj, "feature_names_in_") and hasattr( + sklearn_reg, "feature_names_in_" + ): # feature_names_in_ variable is only available when `best_estimator_` is defined + self.assertEqual(sk_obj.feature_names_in_.tolist(), sklearn_reg.feature_names_in_.tolist()) + if hasattr(sk_obj, "classes_"): + self.assertEqual(sk_obj.classes_, sklearn_reg.classes_) + self._compare_cv_results(sk_obj.cv_results_, sklearn_reg.cv_results_) + if not sk_obj.multimetric_: + self.assertEqual(sk_obj.best_params_, sklearn_reg.best_params_) + + @parameterized.parameters( # type: ignore[misc] + # Standard Sklearn sample + { + "is_single_node": False, + "params": {"copy_X": [True, False], "fit_intercept": [True, False]}, + "cv": 5, + "kwargs": dict(), + }, + # param_grid: list of dictionary + { + "is_single_node": False, + "params": [ + {"copy_X": [True], "fit_intercept": [True, False]}, + {"copy_X": [False], "fit_intercept": [True, False]}, + ], + "cv": 5, + "kwargs": dict(), + }, + # cv: CV splitter + { + "is_single_node": False, + "params": [ + {"copy_X": [True], "fit_intercept": [True, False]}, + {"copy_X": [False], "fit_intercept": [True, False]}, + ], + "cv": KFold(5), + "kwargs": dict(), + }, + # cv: iterator + { + "is_single_node": False, + "params": [ + {"copy_X": [True], "fit_intercept": [True, False]}, + {"copy_X": [False], "fit_intercept": [True, False]}, + ], + "cv": [ + ( + np.array([i for i in range(30, 150)]), + np.array([i for i in range(30)]), + ), + ( + np.array([i for i in range(30)] + [i for i in range(60, 150)]), + np.array([i for i in range(30, 60)]), + ), + ( + np.array([i for i in range(60)] + [i for i in range(90, 150)]), + np.array([i for i in range(60, 90)]), + ), + ( + np.array([i for i in range(90)] + [i for i in range(120, 150)]), + np.array([i for i in range(90, 120)]), + ), + ( + np.array([i for i in range(120)]), + np.array([i for i in range(120, 150)]), + ), + ], + "kwargs": dict(), + }, + { + "is_single_node": False, + "params": [ + {"copy_X": [True], "fit_intercept": [True, False]}, + {"copy_X": [False], "fit_intercept": [True, False]}, + ], + "cv": [ + ( + [i for i in range(30, 150)], + [i for i in range(30)], + ), + ( + [i for i in range(30)] + [i for i in range(60, 150)], + [i for i in range(30, 60)], + ), + ( + [i for i in range(60)] + [i for i in range(90, 150)], + [i for i in range(60, 90)], + ), + ( + [i for i in range(90)] + [i for i in range(120, 150)], + [i for i in range(90, 120)], + ), + ( + [i for i in range(120)], + [i for i in range(120, 150)], + ), + ], + "kwargs": dict(), + }, + # TODO: scoring + { + "is_single_node": False, + "params": {"copy_X": [True, False], "fit_intercept": [True, False]}, + "cv": 5, + "kwargs": dict(scoring=["accuracy", "f1_macro"], refit="f1_macro", return_train_score=True), + }, + # TODO: refit + # TODO: error_score + # return_train_score: True + { + "is_single_node": False, + "params": {"copy_X": [True, False], "fit_intercept": [True, False]}, + "cv": 5, + "kwargs": dict(return_train_score=True), + }, + ) + @mock.patch("snowflake.ml.modeling._internal.model_trainer_builder.is_single_node") + def test_fit_and_compare_results( + self, + mock_is_single_node: mock.MagicMock, + is_single_node: bool, + params: Union[Dict[str, Any], List[Dict[str, Any]]], + cv: Union[int, BaseCrossValidator, List[Tuple[Union[List[int], npt.NDArray[np.int_]]]]], + kwargs: Dict[str, Any], + ) -> None: + mock_is_single_node.return_value = is_single_node + + reg = GridSearchCV(estimator=LinearRegression(), param_grid=params, cv=cv, **kwargs) + sklearn_reg = SkGridSearchCV(estimator=SkLinearRegression(), param_grid=params, cv=cv, **kwargs) + reg.set_input_cols(self._input_cols) + output_cols = ["OUTPUT_" + c for c in self._label_col] + reg.set_output_cols(output_cols) + reg.set_label_cols(self._label_col) + + reg.fit(self._input_df) + sklearn_reg.fit(X=self._input_df_pandas[self._input_cols], y=self._input_df_pandas[self._label_col].squeeze()) + sk_obj = reg.to_sklearn() + + self._compare_global_variables(sk_obj, sklearn_reg) + + +if __name__ == "__main__": + absltest.main() diff --git a/tests/integ/snowflake/ml/modeling/model_selection/grid_search_integ_test.py b/tests/integ/snowflake/ml/modeling/model_selection/grid_search_integ_test.py index c152748a..7465acab 100644 --- a/tests/integ/snowflake/ml/modeling/model_selection/grid_search_integ_test.py +++ b/tests/integ/snowflake/ml/modeling/model_selection/grid_search_integ_test.py @@ -97,6 +97,14 @@ def test_fit_and_compare_results(self, mock_is_single_node) -> None: "kwargs": dict(), "estimator_kwargs": dict(random_state=0), }, + { + "is_single_node": False, + "skmodel": SkRandomForestClassifier, + "model": RandomForestClassifier, + "params": {"n_estimators": [50, 200], "min_samples_split": [1.0, 2, 3], "max_depth": [3, 8]}, + "kwargs": dict(return_train_score=True), + "estimator_kwargs": dict(random_state=0), + }, { "is_single_node": False, "skmodel": SkSVC, @@ -105,6 +113,14 @@ def test_fit_and_compare_results(self, mock_is_single_node) -> None: "kwargs": dict(), "estimator_kwargs": dict(random_state=0), }, + { + "is_single_node": False, + "skmodel": SkSVC, + "model": SVC, + "params": {"kernel": ("linear", "rbf"), "C": [1, 10, 80]}, + "kwargs": dict(return_train_score=True), + "estimator_kwargs": dict(random_state=0), + }, { "is_single_node": False, "skmodel": SkXGBClassifier, @@ -113,6 +129,14 @@ def test_fit_and_compare_results(self, mock_is_single_node) -> None: "kwargs": dict(scoring=["accuracy", "f1_macro"], refit="f1_macro"), "estimator_kwargs": dict(seed=42), }, + { + "is_single_node": False, + "skmodel": SkXGBClassifier, + "model": XGBClassifier, + "params": {"max_depth": [2, 6], "learning_rate": [0.1, 0.01]}, + "kwargs": dict(scoring=["accuracy", "f1_macro"], refit="f1_macro", return_train_score=True), + "estimator_kwargs": dict(seed=42), + }, ) @mock.patch("snowflake.ml.modeling._internal.model_trainer_builder.is_single_node") def test_fit_and_compare_results_distributed( diff --git a/tests/integ/snowflake/ml/modeling/preprocessing/min_max_scaler_test.py b/tests/integ/snowflake/ml/modeling/preprocessing/min_max_scaler_test.py index ec4ca781..c200dd11 100644 --- a/tests/integ/snowflake/ml/modeling/preprocessing/min_max_scaler_test.py +++ b/tests/integ/snowflake/ml/modeling/preprocessing/min_max_scaler_test.py @@ -19,6 +19,7 @@ from snowflake.snowpark import Session from tests.integ.snowflake.ml.modeling.framework import utils as framework_utils from tests.integ.snowflake.ml.modeling.framework.utils import ( + CATEGORICAL_COLS, DATA, DATA_CLIP, ID_COL, @@ -42,6 +43,22 @@ def tearDown(self) -> None: if os.path.exists(filepath): os.remove(filepath) + def test_fit_non_numeric_raises_exception(self) -> None: + """ + Fitting scaler with non-numeric columns should raise an exception.. + + Raises + ------ + AssertionError + If the expected exception is not raised. + """ + input_cols = CATEGORICAL_COLS + _, df = framework_utils.get_df(self._session, DATA, SCHEMA, np.nan) + + scaler = MinMaxScaler().set_input_cols(input_cols) + with self.assertRaises(TypeError): + scaler.fit(df) + def test_fit(self) -> None: """ Verify fitted states. diff --git a/tests/integ/snowflake/ml/registry/model/BUILD.bazel b/tests/integ/snowflake/ml/registry/model/BUILD.bazel index 4dd54706..727e7374 100644 --- a/tests/integ/snowflake/ml/registry/model/BUILD.bazel +++ b/tests/integ/snowflake/ml/registry/model/BUILD.bazel @@ -7,6 +7,7 @@ py_library( testonly = True, srcs = ["registry_model_test_base.py"], deps = [ + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/model:type_hints", "//snowflake/ml/registry", "//snowflake/ml/utils:connection_params", diff --git a/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py b/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py index 27311c9f..13a85a09 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py +++ b/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py @@ -6,6 +6,7 @@ from absl.testing import absltest from packaging import version +from snowflake.ml._internal.utils import snowflake_env from snowflake.ml.model import type_hints as model_types from snowflake.ml.registry import registry from snowflake.ml.utils import connection_params @@ -13,6 +14,14 @@ from tests.integ.snowflake.ml.test_utils import db_manager, test_env_utils +@unittest.skipUnless( + test_env_utils.get_current_snowflake_version() >= version.parse("8.0.0"), + "New model only available when the Snowflake Version is newer than 8.0.0", +) +@unittest.skipUnless( + test_env_utils.get_current_snowflake_cloud_type() == snowflake_env.SnowflakeCloudType.AWS, + "New model only available in AWS", +) class RegistryModelTestBase(absltest.TestCase): def setUp(self) -> None: """Creates Snowpark and Snowflake environments for testing.""" @@ -31,11 +40,6 @@ def setUp(self) -> None: } ).create() - current_sf_version = test_env_utils.get_current_snowflake_version(self._session) - - if current_sf_version < version.parse("8.0.0"): - raise unittest.SkipTest("This test requires Snowflake Version 8.0.0 or higher.") - self._db_manager = db_manager.DBManager(self._session) self._db_manager.create_database(self._test_db) self._db_manager.create_schema(self._test_schema) @@ -73,13 +77,15 @@ def _test_registry_model( ) for target_method, (test_input, check_func) in prediction_assert_fns.items(): - res = mv.run(test_input, method_name=target_method) + res = mv.run(test_input, function_name=target_method) check_func(res) + self.registry.show_models() + self.registry.delete_model(model_name=name) - self.assertNotIn(mv.model_name, [m.name for m in self.registry.list_models()]) + self.assertNotIn(mv.model_name, [m.name for m in self.registry.models()]) if __name__ == "__main__": diff --git a/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py index 48398502..20af6c7f 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +import pytest import tensorflow as tf from absl.testing import absltest @@ -25,6 +26,7 @@ def __call__(self, tensor: tf.Tensor) -> tf.Tensor: return self.a_variable * tensor + self.non_trainable_variable +@pytest.mark.pip_incompatible class TestRegistryTensorflowModelInteg(registry_model_test_base.RegistryModelTestBase): def test_tf_tensor_as_sample( self, diff --git a/tests/integ/snowflake/ml/registry/model_registry_compat_test.py b/tests/integ/snowflake/ml/registry/model_registry_compat_test.py index 781ae871..7a762c0d 100644 --- a/tests/integ/snowflake/ml/registry/model_registry_compat_test.py +++ b/tests/integ/snowflake/ml/registry/model_registry_compat_test.py @@ -69,6 +69,8 @@ def prepare_registry_and_log_model(session: session.Session, registry_name: str, registry = model_registry.ModelRegistry(session=session, database_name=registry_name) iris_X, iris_y = datasets.load_iris(return_X_y=True, as_frame=True) + # Normalize the column name to avoid set it as case_sensitive where there was a BCR in 1.1.2 + iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] # LogisticRegression is for classfication task, such as iris regr = linear_model.LogisticRegression() regr.fit(iris_X, iris_y) @@ -101,6 +103,7 @@ def test_log_model_compat(self, permanent: bool) -> None: deployment_name=deployment_name, target_method="predict", permanent=permanent ) iris_X, iris_y = datasets.load_iris(return_X_y=True, as_frame=True) + iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] model_ref.predict(deployment_name, iris_X) diff --git a/tests/integ/snowflake/ml/test_utils/BUILD.bazel b/tests/integ/snowflake/ml/test_utils/BUILD.bazel index e55d4da4..2e8ca7d8 100644 --- a/tests/integ/snowflake/ml/test_utils/BUILD.bazel +++ b/tests/integ/snowflake/ml/test_utils/BUILD.bazel @@ -47,9 +47,10 @@ py_library( ], deps = [ ":_snowml_requirements", + ":test_env_utils", + "//snowflake/ml/_internal:env", "//snowflake/ml/_internal:env_utils", "//snowflake/ml/_internal:file_utils", - "//snowflake/ml/utils:connection_params", ], ) @@ -75,6 +76,8 @@ py_library( "//snowflake/ml/_internal:env", "//snowflake/ml/_internal:env_utils", "//snowflake/ml/_internal/utils:query_result_checker", + "//snowflake/ml/_internal/utils:snowflake_env", + "//snowflake/ml/utils:connection_params", ], ) @@ -83,7 +86,9 @@ py_library( testonly = True, srcs = ["spcs_integ_test_base.py"], deps = [ + ":test_env_utils", "//snowflake/ml/_internal/utils:identifier", + "//snowflake/ml/_internal/utils:snowflake_env", "//snowflake/ml/utils:connection_params", "//tests/integ/snowflake/ml/test_utils:db_manager", ], diff --git a/tests/integ/snowflake/ml/test_utils/common_test_base.py b/tests/integ/snowflake/ml/test_utils/common_test_base.py index 5695c2b2..11724c09 100644 --- a/tests/integ/snowflake/ml/test_utils/common_test_base.py +++ b/tests/integ/snowflake/ml/test_utils/common_test_base.py @@ -9,11 +9,10 @@ from packaging import requirements from typing_extensions import Concatenate, ParamSpec -from snowflake.ml._internal import env_utils, file_utils -from snowflake.ml.utils import connection_params +from snowflake.ml._internal import env, env_utils, file_utils from snowflake.snowpark import functions as F, session from snowflake.snowpark._internal import udf_utils, utils as snowpark_utils -from tests.integ.snowflake.ml.test_utils import _snowml_requirements +from tests.integ.snowflake.ml.test_utils import _snowml_requirements, test_env_utils _V = TypeVar("_V", bound="CommonTestBase") _T_args = ParamSpec("_T_args") @@ -40,11 +39,7 @@ def get_function_body(func: Callable[..., Any]) -> str: class CommonTestBase(parameterized.TestCase): def setUp(self) -> None: """Creates Snowpark and Snowflake environments for testing.""" - self.session = ( - session._get_active_session() - if snowpark_utils.is_in_stored_procedure() # type: ignore[no-untyped-call] # - else session.Session.builder.configs(connection_params.SnowflakeLoginOptions()).create() - ) + self.session = test_env_utils.get_available_session() def tearDown(self) -> None: if not snowpark_utils.is_in_stored_procedure(): # type: ignore[no-untyped-call] @@ -242,10 +237,12 @@ def {func_name}({first_arg_name}: snowflake.snowpark.Session, {", ".join(arg_lis actual_method(self, *args, **kwargs) additional_cases = [ - {"_snowml_pkg_ver": pkg_ver} - for pkg_ver in env_utils.get_matched_package_versions_in_snowflake_conda_channel( - req=requirements.Requirement(f"snowflake-ml-python{version_range}") - ) + {"_snowml_pkg_ver": str(pkg_ver)} + for pkg_ver in env_utils.get_matched_package_versions_in_information_schema( + test_env_utils.get_available_session(), + [requirements.Requirement(f"{env_utils.SNOWPARK_ML_PKG_NAME}{version_range}")], + python_version=env.PYTHON_VERSION, + )[env_utils.SNOWPARK_ML_PKG_NAME] ] modified_test_cases = [{**t1, **t2} for t1 in test_cases for t2 in additional_cases] diff --git a/tests/integ/snowflake/ml/test_utils/spcs_integ_test_base.py b/tests/integ/snowflake/ml/test_utils/spcs_integ_test_base.py index 6d714034..256aa67a 100644 --- a/tests/integ/snowflake/ml/test_utils/spcs_integ_test_base.py +++ b/tests/integ/snowflake/ml/test_utils/spcs_integ_test_base.py @@ -1,27 +1,25 @@ +import unittest import uuid -from unittest import SkipTest from absl.testing import absltest +from snowflake.ml._internal.utils import snowflake_env from snowflake.ml.utils import connection_params from snowflake.snowpark import Session -from tests.integ.snowflake.ml.test_utils import db_manager +from tests.integ.snowflake.ml.test_utils import db_manager, test_env_utils +@unittest.skipUnless( + test_env_utils.get_current_snowflake_cloud_type() == snowflake_env.SnowflakeCloudType.AWS, + "SPCS only available in AWS", +) class SpcsIntegTestBase(absltest.TestCase): - _SNOWSERVICE_CONNECTION_NAME = "regtest" _TEST_CPU_COMPUTE_POOL = "REGTEST_INFERENCE_CPU_POOL" _TEST_GPU_COMPUTE_POOL = "REGTEST_INFERENCE_GPU_POOL" def setUp(self) -> None: """Creates Snowpark and Snowflake environments for testing.""" - try: - login_options = connection_params.SnowflakeLoginOptions(connection_name=self._SNOWSERVICE_CONNECTION_NAME) - except KeyError: - raise SkipTest( - "SnowService connection parameters not present: skipping " - "TestModelRegistryIntegWithSnowServiceDeployment." - ) + login_options = connection_params.SnowflakeLoginOptions() self._run_id = uuid.uuid4().hex[:2] self._test_db = db_manager.TestObjectNameGenerator.get_snowml_test_object_name(self._run_id, "db").upper() diff --git a/tests/integ/snowflake/ml/test_utils/test_env_utils.py b/tests/integ/snowflake/ml/test_utils/test_env_utils.py index f84c241a..74a066f1 100644 --- a/tests/integ/snowflake/ml/test_utils/test_env_utils.py +++ b/tests/integ/snowflake/ml/test_utils/test_env_utils.py @@ -1,70 +1,44 @@ import functools -import textwrap -from typing import List from packaging import requirements, version -import snowflake.connector from snowflake.ml._internal import env, env_utils -from snowflake.ml._internal.utils import query_result_checker +from snowflake.ml._internal.utils import snowflake_env +from snowflake.ml.utils import connection_params from snowflake.snowpark import session +from snowflake.snowpark._internal import utils as snowpark_utils -def get_current_snowflake_version(session: session.Session) -> version.Version: - res = session.sql("SELECT CURRENT_VERSION() AS CURRENT_VERSION").collect()[0] - version_str = res.CURRENT_VERSION - assert isinstance(version_str, str) - - version_str = "+".join(version_str.split()) - return version.parse(version_str) +def get_available_session() -> session.Session: + return ( + session._get_active_session() + if snowpark_utils.is_in_stored_procedure() # type: ignore[no-untyped-call] # + else session.Session.builder.configs(connection_params.SnowflakeLoginOptions()).create() + ) @functools.lru_cache -def get_package_versions_in_server( - session: session.Session, - package_req_str: str, - python_version: str = env.PYTHON_VERSION, -) -> List[version.Version]: - package_req = requirements.Requirement(package_req_str) - parsed_python_version = version.Version(python_version) - sql = textwrap.dedent( - f""" - SELECT PACKAGE_NAME, VERSION - FROM information_schema.packages - WHERE package_name = '{package_req.name}' - AND language = 'python' - AND runtime_version = '{parsed_python_version.major}.{parsed_python_version.minor}'; - """ - ) +def get_current_snowflake_version() -> version.Version: + return snowflake_env.get_current_snowflake_version(get_available_session()) - version_list = [] - try: - result = ( - query_result_checker.SqlResultValidator( - session=session, - query=sql, - ) - .has_column("VERSION") - .has_dimensions(expected_rows=None, expected_cols=2) - .validate() - ) - for row in result: - req_ver = version.parse(row["VERSION"]) - version_list.append(req_ver) - except snowflake.connector.DataError: - return [] - available_version_list = list(package_req.specifier.filter(version_list)) - return available_version_list + +@functools.lru_cache +def get_current_snowflake_cloud_type() -> snowflake_env.SnowflakeCloudType: + sess = get_available_session() + region = snowflake_env.get_regions(sess)[snowflake_env.get_current_region_id(sess)] + return region["cloud"] @functools.lru_cache def get_latest_package_version_spec_in_server( - session: session.Session, + sess: session.Session, package_req_str: str, python_version: str = env.PYTHON_VERSION, ) -> str: package_req = requirements.Requirement(package_req_str) - available_version_list = get_package_versions_in_server(session, package_req_str, python_version) + available_version_list = env_utils.get_matched_package_versions_in_information_schema( + sess, [package_req], python_version + ).get(package_req.name, []) if len(available_version_list) == 0: return str(package_req) return f"{package_req.name}=={max(available_version_list)}" @@ -74,7 +48,7 @@ def get_latest_package_version_spec_in_server( def get_latest_package_version_spec_in_conda(package_req_str: str, python_version: str = env.PYTHON_VERSION) -> str: package_req = requirements.Requirement(package_req_str) available_version_list = env_utils.get_matched_package_versions_in_snowflake_conda_channel( - req=requirements.Requirement(package_req_str), python_version=python_version + package_req, python_version=python_version ) if len(available_version_list) == 0: return str(package_req) diff --git a/third_party/rules_mypy/BUILD.bazel b/third_party/rules_mypy/BUILD.bazel index 6f60a504..eda5ef8e 100644 --- a/third_party/rules_mypy/BUILD.bazel +++ b/third_party/rules_mypy/BUILD.bazel @@ -2,10 +2,9 @@ load("@rules_python//python:defs.bzl", "py_binary") package(default_visibility = ["//visibility:public"]) -exports_files(["mypy.sh.tpl"]) - py_binary( name = "mypy", srcs = ["main.py"], + legacy_create_init = 0, main = "main.py", ) diff --git a/third_party/rules_mypy/main.py b/third_party/rules_mypy/main.py index 262f860b..5f23779a 100644 --- a/third_party/rules_mypy/main.py +++ b/third_party/rules_mypy/main.py @@ -1,77 +1,11 @@ -import argparse -import json -import subprocess -import sys -import tempfile - -MYPY_ENTRYPOINT_CODE = """ import sys try: from mypy.main import main except ImportError as e: raise ImportError( - f"Unable to import mypy. Make sure mypy is added to the bazel conda environment. Actual error: {{e}}" + f"Unable to import mypy. Make sure mypy is added to the bazel conda environment. Actual error: {e}" ) if __name__ == "__main__": main(stdout=sys.stdout, stderr=sys.stderr) - -""" - - -def mypy_checker() -> None: - # To parse the arguments that bazel provides. - parser = argparse.ArgumentParser( - # Without this, the second path documented in main below fails. - fromfile_prefix_chars="@" - ) - parser.add_argument("--out") - parser.add_argument("--persistent_worker", action="store_true") - - args = parser.parse_args() - - with tempfile.NamedTemporaryFile(suffix=".py") as mypy_entrypoint: - mypy_entrypoint.write(MYPY_ENTRYPOINT_CODE.encode()) - mypy_entrypoint.flush() - first_run = True - while args.persistent_worker or first_run: - data = sys.stdin.readline() - req = json.loads(data) - mypy_args = req["arguments"] - process = subprocess.Popen( - # We use this to make sure we are invoking mypy that is installed in the same environment of the current - # Python. - [sys.executable, mypy_entrypoint.name] + mypy_args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - process.wait() - text, _ = process.communicate() - - if process.returncode: - header = "=" * 20 + " MYPY TYPE CHECKING REPORT BEGIN " + "=" * 20 + "\n" - footer = "=" * 20 + " MYPY TYPE CHECKING REPORT END " + "=" * 20 + "\n" - - message = "".join([header, text.decode(), footer]) - else: - message = "" - - with open(args.out, "w") as output: - output.write(message) - sys.stderr.flush() - sys.stdout.write( - json.dumps( - { - "exitCode": process.returncode, - "output": message, - "requestId": req.get("requestId", 0), - } - ) - ) - sys.stdout.flush() - first_run = False - - -if __name__ == "__main__": - mypy_checker() diff --git a/third_party/rules_mypy/mypy.bzl b/third_party/rules_mypy/mypy.bzl index eea954b2..6cd64ea3 100644 --- a/third_party/rules_mypy/mypy.bzl +++ b/third_party/rules_mypy/mypy.bzl @@ -1,5 +1,7 @@ "Public API" +load("@bazel_skylib//lib:sets.bzl", "sets") +load("@bazel_skylib//lib:shell.bzl", "shell") load("@rules_mypy//:rules.bzl", "MyPyStubsInfo") MyPyAspectInfo = provider( @@ -33,8 +35,29 @@ DEFAULT_ATTRS = { default = Label("@//:mypy.ini"), allow_single_file = True, ), + "_template": attr.label( + default = Label("@rules_mypy//templates:mypy.sh.tpl"), + allow_single_file = True, + ), } +def _sources_to_cache_map_triples(srcs, is_aspect): + triples_as_flat_list = [] + for f in srcs: + if is_aspect: + f_path = f.path + else: + # "The path of this file relative to its root. This excludes the aforementioned root, i.e. configuration-specific fragments of the path. + # This is also the path under which the file is mapped if it's in the runfiles of a binary." + # - https://docs.bazel.build/versions/master/skylark/lib/File.html + f_path = f.short_path + triples_as_flat_list.extend([ + shell.quote(f_path), + shell.quote("{}.meta.json".format(f_path)), + shell.quote("{}.data.json".format(f_path)), + ]) + return triples_as_flat_list + def _is_external_dep(dep): return dep.label.workspace_root.startswith("external/") @@ -68,11 +91,28 @@ def _extract_stub_deps(deps): stub_files.append(src_f) return stub_files -def _mypy_rule_impl(ctx): - base_rule = ctx.rule +def _extract_imports(imports, label): + # NOTE: Bazel's implementation of this for py_binary, py_test is at + # src/main/java/com/google/devtools/build/lib/bazel/rules/python/BazelPythonSemantics.java + mypypath_parts = [] + for import_ in imports: + if import_.startswith("/"): + # buildifier: disable=print + print("ignoring invalid absolute path '{}'".format(import_)) + elif import_ in ["", "."]: + mypypath_parts.append(label.package) + else: + mypypath_parts.append("{}/{}".format(label.package, import_)) + return mypypath_parts + +def _mypy_rule_impl(ctx, is_aspect = False): + base_rule = ctx + if is_aspect: + base_rule = ctx.rule mypy_config_file = ctx.file._mypy_config + mypypath_parts = [] direct_src_files = [] transitive_srcs_depsets = [] stub_files = [] @@ -84,80 +124,99 @@ def _mypy_rule_impl(ctx): transitive_srcs_depsets = _extract_transitive_deps(base_rule.attr.deps) stub_files = _extract_stub_deps(base_rule.attr.deps) + if hasattr(base_rule.attr, "imports"): + mypypath_parts = _extract_imports(base_rule.attr.imports, ctx.label) + final_srcs_depset = depset(transitive = transitive_srcs_depsets + [depset(direct = direct_src_files)]) src_files = [f for f in final_srcs_depset.to_list() if not _is_external_src(f)] if not src_files: return None - out = ctx.actions.declare_file("%s_dummy_out" % ctx.rule.attr.name) - runfiles_name = "%s.mypy_runfiles" % ctx.rule.attr.name + mypypath_parts += [src_f.dirname for src_f in stub_files] + mypypath = ":".join(mypypath_parts) + + # Ideally, a file should be passed into this rule. If this is an executable + # rule, then we default to the implicit executable file, otherwise we create + # a stub. + if not is_aspect: + if hasattr(ctx, "outputs"): + exe = ctx.outputs.executable + else: + exe = ctx.actions.declare_file( + "%s_mypy_exe" % base_rule.attr.name, + ) + out = None + else: + out = ctx.actions.declare_file("%s_dummy_out" % ctx.rule.attr.name) + exe = ctx.actions.declare_file( + "%s_mypy_exe" % ctx.rule.attr.name, + ) # Compose a list of the files needed for use. Note that aspect rules can use # the project version of mypy however, other rules should fall back on their # relative runfiles. + runfiles = ctx.runfiles(files = src_files + stub_files + [mypy_config_file]) + if not is_aspect: + runfiles = runfiles.merge(ctx.attr._mypy_cli.default_runfiles) - src_run_files = [] - direct_src_run_files = [] - stub_run_files = [] - - for f in src_files + stub_files: - run_file_path = runfiles_name + "/" + f.short_path - run_file = ctx.actions.declare_file(run_file_path) - ctx.actions.symlink( - output = run_file, - target_file = f, - ) - if f in src_files: - src_run_files.append(run_file) - if f in direct_src_files: - direct_src_run_files.append(run_file) - if f in stub_files: - stub_run_files.append(run_file) - - src_root_path = src_run_files[0].path - src_root_path = src_root_path[0:(src_root_path.find(runfiles_name) + len(runfiles_name))] - - # arguments sent to mypy - args = ["--cache-dir", ctx.bin_dir.path + "/.mypy_cache", "--package-root", src_root_path, "--config-file", mypy_config_file.path] + [f.path for f in direct_src_run_files] - - worker_arg_file = ctx.actions.declare_file(ctx.rule.attr.name + ".worker_args") - ctx.actions.write( - output = worker_arg_file, - content = "\n".join(args), + src_root_paths = sets.to_list( + sets.make([f.root.path for f in src_files]), ) - return MyPyAspectInfo( - exe = ctx.executable._mypy_cli, - args = worker_arg_file, - runfiles = src_run_files + stub_run_files + [mypy_config_file, worker_arg_file], - out = out, + ctx.actions.expand_template( + template = ctx.file._template, + output = exe, + substitutions = { + "{CACHE_MAP_TRIPLES}": " ".join(_sources_to_cache_map_triples(src_files, is_aspect)), + "{MYPYPATH_PATH}": mypypath if mypypath else "", + "{MYPY_EXE}": ctx.executable._mypy_cli.path, + "{MYPY_INI_PATH}": mypy_config_file.path, + "{MYPY_ROOT}": ctx.executable._mypy_cli.root.path, + "{OUTPUT}": out.path if out else "", + "{PACKAGE_ROOTS}": " ".join([ + "--package-root " + shell.quote(path or ".") + for path in src_root_paths + ]), + "{SRCS}": " ".join([ + shell.quote(f.path) if is_aspect else shell.quote(f.short_path) + for f in src_files + ]), + "{VERBOSE_BASH}": "set -x" if DEBUG else "", + "{VERBOSE_OPT}": "--verbose" if DEBUG else "", + }, + is_executable = True, ) + if is_aspect: + return [ + DefaultInfo(executable = exe, runfiles = runfiles), + MyPyAspectInfo(exe = exe, out = out), + ] + return DefaultInfo(executable = exe, runfiles = runfiles) + def _mypy_aspect_impl(_, ctx): if (ctx.rule.kind not in ["py_binary", "py_library", "py_test", "mypy_test"] or ctx.label.workspace_root.startswith("external")): return [] - aspect_info = _mypy_rule_impl( + providers = _mypy_rule_impl( ctx, + is_aspect = True, ) - if not aspect_info: + if not providers: return [] + info = providers[0] + aspect_info = providers[1] + ctx.actions.run( outputs = [aspect_info.out], - inputs = aspect_info.runfiles, - tools = [aspect_info.exe], + inputs = info.default_runfiles.files, + tools = [ctx.executable._mypy_cli], executable = aspect_info.exe, mnemonic = "MyPy", progress_message = "Type-checking %s" % ctx.label, - execution_requirements = { - "requires-worker-protocol": "json", - "supports-workers": "1", - }, - # out is required for worker to write the output. - arguments = ["--out", aspect_info.out.path, "@" + aspect_info.args.path], use_default_shell_env = True, ) return [ @@ -166,8 +225,21 @@ def _mypy_aspect_impl(_, ctx): ), ] +def _mypy_test_impl(ctx): + info = _mypy_rule_impl(ctx, is_aspect = False) + if not info: + fail("A list of python deps are required for mypy_test") + return info + mypy_aspect = aspect( implementation = _mypy_aspect_impl, attr_aspects = ["deps"], attrs = DEFAULT_ATTRS, ) + +mypy_test = rule( + implementation = _mypy_test_impl, + test = True, + attrs = dict(DEFAULT_ATTRS.items() + + [("deps", attr.label_list(aspects = [mypy_aspect]))]), +) diff --git a/third_party/rules_mypy/templates/BUILD.bazel b/third_party/rules_mypy/templates/BUILD.bazel new file mode 100644 index 00000000..820e9a31 --- /dev/null +++ b/third_party/rules_mypy/templates/BUILD.bazel @@ -0,0 +1 @@ +exports_files(["mypy.sh.tpl"]) diff --git a/third_party/rules_mypy/templates/mypy.sh.tpl b/third_party/rules_mypy/templates/mypy.sh.tpl new file mode 100644 index 00000000..4ba83e73 --- /dev/null +++ b/third_party/rules_mypy/templates/mypy.sh.tpl @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +{VERBOSE_BASH} +set -o errexit +set -o nounset +set -o pipefail + +main() { + local output + local report_file + local status + local root + local mypy + + report_file="{OUTPUT}" + root="{MYPY_ROOT}/" + mypy="{MYPY_EXE}" + + export MYPYPATH="$(pwd):{MYPYPATH_PATH}" + + # Workspace rules run in a different location from aspect rules. Here we + # normalize if the external source isn't found. + if [ ! -f $mypy ]; then + mypy=${mypy#${root}} + fi + + # We need the return code of mypy. + set +o errexit + output=$($mypy {VERBOSE_OPT} --bazel {PACKAGE_ROOTS} --config-file {MYPY_INI_PATH} --cache-map {CACHE_MAP_TRIPLES} -- {SRCS} 2>&1) + status=$? + set -o errexit + + if [ ! -z "$report_file" ]; then + echo "${output}" > "${report_file}" + fi + + if [[ $status -ne 0 ]]; then + echo "${output}" # Show MyPy's error to end-user via Bazel's console logging + exit 1 + fi + +} + +main "$@"