diff --git a/README.md b/README.md index 5306161..494e2c0 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,31 @@ print(res.choices[0].message.content) ``` -更多功能介绍,请参阅 [PAI Python SDK文档](https://alipai.readthedocs.io/) 。 +- 微调预训练模型 + +通过PAI提供的微调脚本,提交一个模型微调任务 + +```python + +from pai.model import ModelTrainingRecipe + +training_recipe = ModelTrainingRecipe( + model_name="qwen2-0.5b-instruct", + model_provider="pai", + instance_type="ecs.gn6e-c12g1.3xlarge", +) + +training_recipe.train( + inputs={ + # 本地或是阿里云OSS上的数据路径(oss:///path/to/data) + "train": "" + } +) + + +``` + +通过访问PAI提供的示例仓库,可以了解更多使用示例:[pai-examples](https://github.com/aliyun/pai-examples/tree/master/pai-python-sdk) ## 🤝 贡献代码 diff --git a/README_EN.md b/README_EN.md index 833f99d..4268bcb 100644 --- a/README_EN.md +++ b/README_EN.md @@ -81,7 +81,38 @@ print(res.choices[0].message.content) ``` -For more details, please refer to the [PAI Python SDK Documentation](https://alipai.readthedocs.io/). +- Fine-tune the pretrained model +- +Submit a model fine-tuning task using the fine-tuning script provided by PAI. + +```python + +from pai.model import ModelTrainingRecipe + +# Retrieve the Qwen2-0.5b-instruct model training recipe provided by PAI +training_recipe = ModelTrainingRecipe( + model_name="qwen2-0.5b-instruct", + model_provider="pai", + instance_type="ecs.gn6e-c12g1.3xlarge", +) + +# Submit the training job +job = training_recipe.train( + inputs={ + # Data path on local or Alibaba Cloud OSS (oss:///path/to/data) + "train": "" + } +) + +# Get output model path +print(training_recipe.model_data()) + +# Deploy the fine-tuned model +predictor = training_recipe.deploy(service_name="qwen2_finetune") + +``` + +You can learn more usage examples by visiting the PAI example repository: [pai-examples](https://github.com/aliyun/pai-examples/tree/master/pai-python-sdk) ## 🤝 Contributing diff --git a/docs/source/index.rst b/docs/source/index.rst index f426f83..1ba224f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -30,16 +30,6 @@ PAI Python SDK 文档 user-guide/processing-job -.. toctree:: - :maxdepth: 1 - :caption: 示例教程 - - tutorial/framework - tutorial/train - tutorial/predict - tutorial/advance - - .. toctree:: :maxdepth: 1 :caption: Reference diff --git a/docs/source/tutorial/.gitignore b/docs/source/tutorial/.gitignore deleted file mode 100644 index 879843a..0000000 --- a/docs/source/tutorial/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -test_data -train_data -xgb_src -train_src -infer_src -tf_train_src -fashion-mnist -data -bert diff --git a/docs/source/tutorial/advance.rst b/docs/source/tutorial/advance.rst deleted file mode 100644 index fbeebce..0000000 --- a/docs/source/tutorial/advance.rst +++ /dev/null @@ -1,10 +0,0 @@ -=========================================== -AIGC && LLM -=========================================== - -.. toctree:: - :maxdepth: 1 - - stable_diffusion_lora/stable_diffusion_lora - chatglm2_finetune/chatglm2_finetune - baichuan2_finetune/baichuan2_finetune diff --git a/docs/source/tutorial/async_inference/async_inference.ipynb b/docs/source/tutorial/async_inference/async_inference.ipynb deleted file mode 100644 index b28c169..0000000 --- a/docs/source/tutorial/async_inference/async_inference.ipynb +++ /dev/null @@ -1,464 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 部署异步推理服务\n", - "\n", - "在复杂的模型推理场景中,例如AIGC、视频处理等场景中,模型服务推理耗时较长,存在长连接超时导致请求失败或实例负载不均衡等问题,不适用于实时推理的场景。针对以上问题,PAI提供了异步推理服务,用于支持类似的场景,用户可以在提交预测请求之后,通过轮询或是订阅的方式获取到推理服务的预测结果。\n", - "\n", - "在当前文档中,我们将介绍如何使用PAI Python SDK在PAI上部署和调用异步推理服务。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 准备工作\n", - "\n", - "我们可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署异步推理服务模型\n", - "\n", - "将模型部署为异步推理服务与部署标准的在线推理服务类似,用户仅需在部署时(`Model.deploy`),传递`service_type=ServicType.Async`即可。\n", - "\n", - "当前流程中,我们将使用镜像部署的模式,部署一个异步的推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 准备异步推理服务的应用代码目录\n", - "!mkdir -p serve_src/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过`%%writefile`指令,我们将推理服务代码写入到`serve_src/run.py`文件中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serve_src/run.py\n", - "import asyncio\n", - "from random import random\n", - "\n", - "from fastapi import FastAPI, Request\n", - "import uvicorn, json, datetime\n", - "\n", - "# 默认模型加载路径\n", - "model_path = \"/eas/workspace/model/\"\n", - "\n", - "app = FastAPI()\n", - "\n", - "\n", - "@app.post(\"/\")\n", - "async def create_item(request: Request):\n", - " print(\"Make mock prediction starting ...\")\n", - " # Mock prediction\n", - " await asyncio.sleep(15)\n", - " print(\"Prediction finished.\")\n", - " return [random() for _ in range(10)]\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " uvicorn.run(app, host=\"0.0.0.0\", port=8000, workers=1)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PAI提供的PyTorch推理镜像部署以上的模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec, ServiceType\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "m = Model(\n", - " inference_spec=container_serving_spec(\n", - " source_dir=\"serve_src\",\n", - " command=\"python run.py\",\n", - " image_uri=retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"1.10\",\n", - " accelerator_type=\"gpu\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " requirements=[\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " )\n", - " # 用户可以通过`model_data`参数,传递一个OSS上的模型。相应的模型会被加载到推理服务的容器中。\n", - " # model_data=\"oss:///path/to/model/\"\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过设置部署服务的`service_type=ServiceType.Async`参数,我们可以将模型部署为异步推理服务。异步推理服务使用分别使用输入队列(source)和输出队列(sink)保存预测请求和预测结果。通过`options`参数,可以配置队列使用的资源,队列最大长度,是否开启自动驱逐等高阶参数。异步服务支持的完整的高阶参数,请参考文档:[异步服务-参数配置](https://help.aliyun.com/document_detail/476812.html?#section-gor-gne-gtq)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import AsyncPredictor\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "service_name = f\"async_service_example_{random_str(6)}\"\n", - "\n", - "p: AsyncPredictor = m.deploy(\n", - " service_name=service_name,\n", - " instance_type=\"ecs.c6.large\",\n", - " # 设置当前部署的服务类型为异步服务\n", - " service_type=ServiceType.Async,\n", - " # 用户可以通过options字段配置高阶参数\n", - " options={\n", - " # 异步推理详细参数文档: https://help.aliyun.com/document_detail/476812.html\n", - " \"queue.cpu\": 2, # 队列使用的CPU核数,默认为1\n", - " \"queue.memory\": 2048, # 异步服务使用过的队列内存,单位为MB\n", - " },\n", - ")\n", - "\n", - "print()\n", - "\n", - "print(p)\n", - "print(p.service_name)\n", - "print(p.access_token)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 调用推理服务\n", - "\n", - "用户发送调用异步队列服务与请求同步推理服务的方式相同,但是异步推理服务会立即返回本次预测请求的`RequestId`,而不是预测结果。用户可以通过轮询获取到推理服务的预测结果。\n", - "\n", - "- **用户客户端**发送推理请求,加入到推理服务的输入队列中,PAI-EAS返回请求的RequestId。\n", - "- PAI处理输入队列中的请求,转发给到**用户的推理服务**,推理服务处理完请求后,将结果写入到输出队列中\n", - "- **用户客户端**可以通过RequestId轮询,可以获取到**用户推理服务**的预测结果\n", - "\n", - "\n", - "PAI Python SDK提供了`AsyncPredictor`,支持用户更加简单得调用异步推理服务。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 调用异步推理服务\n", - "\n", - "`AsyncPredictor`提供了`predict`和`raw_predict`方法发送预测请求,它们都会返回一个`AsyncTask`,用户可以通过`AsyncTask.result()`获取预测结果。 \n", - "\n", - "二者的区别在于`predict`方法会使用`Serializer`对象对输入数据进行序列化,对预测结果进行反序列化,而`raw_predict`方法直接将输入数据发送给异步推理服务,返回HTTP响应结果(`RawResponse`)。\n", - "\n", - "```python\n", - "\n", - "from pai.predictor import AsyncPredictor, AsyncTask\n", - "from pai.serializer import JsonSerializer\n", - "\n", - "p = AsyncPredictor(service_name='test_async_service', serializer=JsonSerializer())\n", - "\n", - "t1: AsyncTask = p.predict(data={\"some\": \"data\"})\n", - "# result是推理服务的响应结果(Response Body),经过Serialzier.deserialize处理后返回的结果.\n", - "result = t1.result()\n", - "\n", - "\n", - "t2: AsyncTask = p.raw_predict(data=b'{\"some\": \"data\"}')\n", - "resp: RawResponse = t2.result()\n", - "print(resp.status_code, resp.content)\n", - "\n", - "```\n", - "\n", - "`AsyncPredictor`会维护一个线程池,通过一个线程去发送推理请求,并等待请求处理完成。用户可以通过`max_workers`参数配置线程池的大小。\n", - "\n", - "```python\n", - "\n", - "p = AsyncPredictor(service_name='test_async_service', max_workers=20)\n", - "\n", - "```\n", - "\n", - "当用户需要在异步请求完成之后,对于响应的结果进行处理时,可以通过`callback`参数传递一个回调函数。回调函数的参数为`AsyncTask.result()`,也就实际响应的结果。\n", - "\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "以下的示例代码中,我们将使用`AsyncPredictor`调用异步推理服务,并通过会回调函数处理预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse, AsyncTask\n", - "import time\n", - "\n", - "# 结果列表\n", - "results = []\n", - "\n", - "\n", - "# 定义回调函数\n", - "def callback_fn(resp: RawResponse):\n", - " print(\"Callback: get prediction result \", resp.json())\n", - " results.append(resp.json())\n", - "\n", - "\n", - "# 发送预测请求,使用回调函数处理预测结果。\n", - "task: AsyncTask = p.raw_predict(\n", - " data=b\"test_data\",\n", - " callback=callback_fn,\n", - ")\n", - "\n", - "# result() 方法等待预测完成\n", - "resp: RawResponse = task.result()\n", - "print(resp.json())\n", - "\n", - "# 等待回调函数执行完成\n", - "time.sleep(1)\n", - "\n", - "print(results)\n", - "assert len(results) == 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "以下示例中,我们批量发送异步推理请求,然后等待所有的请求完成。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tasks = []\n", - "\n", - "for i in range(10):\n", - " task: AsyncTask = p.raw_predict(\n", - " data=b\"test_data\",\n", - " callback=lambda x: print(\"Prediction result: \", x.json()),\n", - " )\n", - " tasks.append(task)\n", - "\n", - "prediction_results = [t.result().json() for t in tasks]\n", - "\n", - "print(prediction_results)\n", - "print(len(prediction_results))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 使用异步API调用推理服务\n", - "\n", - "`AsyncPredictor` 提供了异步API `raw_predict_async` 和 `predict_async`,支持用户使用Python提供的异步框架(asyncio)调用推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "# 使用异步API调用异步推理服务\n", - "res: RawResponse = await p.raw_predict_async(data=b\"test_data\")\n", - "\n", - "print(res.status_code)\n", - "print(res.content)\n", - "print(res.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过SDK提供的异步API,我们可以不借助于线程池,批量发送异步预测请求。\n", - "\n", - "以下的示例中,我们将使用异步API,批量发送异步预测请求,等待推理完成,并使用回调函数打印预测请求结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "\n", - "\n", - "# 定义回调函数\n", - "def task_done_cb(task: asyncio.Task):\n", - " if task.exception():\n", - " raise task.exception()\n", - " else:\n", - " print(\"Prediction result: \", task.result().json())\n", - "\n", - "\n", - "# 使用异步API批量调用异步推理服务\n", - "async def batch_predict():\n", - " tasks = []\n", - " for _ in range(10):\n", - " task = asyncio.create_task(\n", - " # raw_predict_async 是一个coroutine\n", - " p.raw_predict_async(\n", - " data=b\"test_data\",\n", - " )\n", - " )\n", - " # 调用完成之后,打印调用返回结果\n", - " task.add_done_callback(task_done_cb)\n", - "\n", - " tasks.append(task)\n", - " # 等待所有任务完成\n", - " return await asyncio.gather(*tasks, return_exceptions=True)\n", - "\n", - "\n", - "batch_results = await batch_predict()\n", - "\n", - "\n", - "for result in batch_results:\n", - " print(result.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后,可以使用`delete_service`方法删除对应服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/baichuan2_finetune/.gitignore b/docs/source/tutorial/baichuan2_finetune/.gitignore deleted file mode 100644 index f23b395..0000000 --- a/docs/source/tutorial/baichuan2_finetune/.gitignore +++ /dev/null @@ -1 +0,0 @@ -swift diff --git a/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb b/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb deleted file mode 100644 index d7b2576..0000000 --- a/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb +++ /dev/null @@ -1,292 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用ModelScope Swift微调Baichuan2模型" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 介绍\n", - "\n", - "[Baichuan 2](https://github.com/baichuan-inc/Baichuan2)是[百川智能](https://www.baichuan-ai.com/home)推出的开源大语言模型,采用2.6万亿Tokens的高质量语料进行训练,在多个权威的中文、英文和多语言的通用、领域benchmark上取得了同尺寸最佳的效果。`Baichuan2` 目前发布了7B、13B的Base和Chat版本,支持模型商用。\n", - "\n", - "当在特定领域使用大语言模型时,可以通过prompt的方式引导模型,也可以通过在领域数据集上微调训练,从而在领域的任务上获得更好的效果。后者的优点是不依赖于Prompt(可能超过模型的输入长度上限),有更好的推理性能,并且经过微调后,在领域相关任务上有更好的效果。\n", - "\n", - "本文将介绍如何在PAI对`Baichuan2`模型完成微调训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 准备训练脚本\n", - "\n", - "`ModelScope`提供了[SWIFT(Scalable lightWeight Infrastructure for Fine-Tuning)](https://github.com/modelscope/swift#swiftscalable-lightweight-infrastructure-for-fine-tuning)框架,支持模型的全参数微调,也集成了各种高效微调方法,例如`LoRA`、`QLoRA`等,支持用户对`Baichuan2`、`QWen`、`llama2`等常见的语言进行微调训练。\n", - "\n", - "基于[Swift的LLM finetune脚本](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/src/llm_sft.py),我们修改了部分逻辑,从而支持用户在PAI的训练作业中使用,主要包括:\n", - "\n", - "- 使用PAI预置的`Baichuan2-Base`模型\n", - "\n", - "对于热门的社区模型,PAI提供了模型缓存在OSS Bucket上,支持挂载到训练作业,训练脚本可以通过读取本地文件的方式加载获取模型。\n", - "\n", - "- 保存模型\n", - "\n", - "训练脚本需要将模型保存到指定路径(`/ml/output/model`),从而将模型保存到用户的OSS Bucket中。\n", - "\n", - "- 训练依赖的第三方\n", - "\n", - "训练作业将运行在PAI提供的`PyTorch`基础镜像上,我们需要在作业环境中安装`transformers`、`datasets`、`swift`、`xformers`等第三方依赖。PAI训练作业支持使用训练脚本目录下的`requirements.txt`安装第三方依赖。\n", - "\n", - "\n", - "完整的训练脚本请参考 `train_src` 目录下的训练文件(`llm_sft.py`)。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "使用提交任务的方式训练模型,能够支持用户并行运行多个训练任务,高效得探索不同的超参组合对于模型性能影响,并且能够支持分布式训练。通过PAI Python SDK提供的`Estimator`API,我们可以方便得将一个本地训练脚本提交到PAI上运行。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "我们将通过以下代码配置训练作业脚本、作业启动命令、使用的作业镜像,以及机器实例规格,提交训练作业。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve\n", - "from pai.estimator import Estimator\n", - "\n", - "# 训练作业启动命令\n", - "# 完整的参数说明请参考文档:https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/README_CN.md#sftsh-%E5%91%BD%E4%BB%A4%E8%A1%8C%E5%8F%82%E6%95%B0\n", - "command = \"\"\"CUDA_VISIBLE_DEVICES=0 \\\n", - "python llm_sft.py \\\n", - " --model_type baichuan2-7b \\\n", - " --sft_type lora \\\n", - " --template_type default-generation \\\n", - " --dtype fp16 \\\n", - " --output_dir /ml/output/model/ \\\n", - " --dataset advertise-gen \\\n", - " --train_dataset_sample 20000 \\\n", - " --num_train_epochs 1 \\\n", - " --max_length 2048 \\\n", - " --quantization_bit 4 \\\n", - " --lora_rank 8 \\\n", - " --lora_alpha 32 \\\n", - " --lora_dropout_p 0. \\\n", - " --lora_target_modules ALL \\\n", - " --gradient_checkpointing true \\\n", - " --batch_size 16 \\\n", - " --weight_decay 0. \\\n", - " --learning_rate 1e-4 \\\n", - " --gradient_accumulation_steps 4 \\\n", - " --max_grad_norm 0.5 \\\n", - " --warmup_ratio 0.03 \\\n", - " --eval_steps 100 \\\n", - " --save_steps 100 \\\n", - " --save_total_limit 2 \\\n", - " --logging_steps 10\n", - "\"\"\"\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " source_dir=\"train_src/\", # 代码目录\n", - " image_uri=retrieve(\"PyTorch\", framework_version=\"latest\").image_uri, # 训练作业使用的镜像\n", - " command=command, # 训练启动命令\n", - " instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 使用的机器规格示例,V100(32G)\n", - " instance_count=1, # 机器实例个数\n", - " base_job_name=\"baichuan2_finetune\", # 训练作业名称\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "PAI提供了预置的`Baichuan2-Base`模型,可以通过以下方式获取对应的模型`OSS Bucket`路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "# 获取PAI提供的Baichuan2-7B-Base模型\n", - "m = RegisteredModel(\n", - " model_name=\"baichuan-inc/Baichuan2-7B-Base\", model_provider=\"huggingface\"\n", - ")\n", - "\n", - "# 模型地址\n", - "print(m.model_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "提交训练作业,等待作业完成。用户可以通过打印的作业详情页URL,查看训练作业进度,资源使用,日志等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "notebookRunGroups": { - "groupValue": "2" - } - }, - "outputs": [], - "source": [ - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " # 训练代码可以从 /ml/input/data/pretrained_model/ 目录下读取挂载的预训练模型\n", - " \"pretrained_model\": m.model_data,\n", - " },\n", - " wait=False, # 是否等待训练作业完成\n", - ")\n", - "\n", - "# 打开一个TensorBoard,监控训练作业\n", - "tb = est.tensorboard()\n", - "\n", - "\n", - "# 等待训练作业完成\n", - "est.wait()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业写出到 `/ml/output/model` 目录下的模型文件和checkpoints将被保存到用户的OSS Bucket中,可以通过 `est.model_data()` 获取 OSS Bucket路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看数据模型的OSS Bucket路径\n", - "print(est.model_data())\n", - "\n", - "\n", - "# 删除启动的TensorBoard(每一个账号下最多能够启动5个TensorBoard示例)\n", - "tb.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "在当前示例中,我们展示了如何基于`ModelScope Swift`框架,使用PAI预置的`Baichuan2-Base`模型,完成`Baichuan2`模型的微调训练。用户可以参考以上的示例,修改脚本,使用用户自定义的数据集,或是修改使用的基础预训练模型,完成自定义语言模型的微调训练。\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb b/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb deleted file mode 100644 index db4cd46..0000000 --- a/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb +++ /dev/null @@ -1,920 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 微调和部署对话模型ChatGLM2-6B\n", - "\n", - "[ChatGLM2-6B](https://www.modelscope.cn/models/ZhipuAI/chatglm2-6b/summary)是中英文对话模型[ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM2-6B 引入了多项升级,包括更强大的性能、更长的上下文、更高效的推理。\n", - "\n", - "在本示例中,我们将展示:\n", - "\n", - "- 将ChatGLM2-6B部署到PAI创建推理服务,基于推理服务API和Gradio实现一个简易对话机器人。\n", - "\n", - "- 在PAI对ChatGLM2-6B进行微调训练,并将微调的模型部署创建推理服务。\n", - "\n", - "\n", - "## 准备工作\n", - "\n", - "### 前提条件\n", - "\n", - "- 已获取阿里云账号的鉴权AccessKey ID和AccessKey Secret,详情请参见:[获取AccessKey](https://help.aliyun.com/document_detail/116401.html)。\n", - "- 已创建或是加入一个PAI AI工作空间,详情请参见:[创建工作空间](https://help.aliyun.com/document_detail/326193.html)。\n", - "- 已创建OSS Bucket,详情请参见:[控制台创建存储空间](https://help.aliyun.com/document_detail/31885.html)。\n", - "\n", - "\n", - "### 安装和配置PAI Python SDK\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai\n", - "!python -m pip install gradio" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 直接部署ChatGLM2\n", - "\n", - "`ChatGLM2-6B`是一个对话语言模型,能够基于历史对话信息,和用户的Prompt输入,进行反馈。通过HuggingFace的transformers库用户可以直接使用`ChatGLM2-6B`提供的对话能力,示例如下:\n", - "\n", - "```python\n", - "\n", - ">>> from transformers import AutoTokenizer, AutoModel\n", - ">>> tokenizer = AutoTokenizer.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True)\n", - ">>> model = AutoModel.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True).half().cuda()\n", - ">>> model = model.eval()\n", - ">>> response, history = model.chat(tokenizer, \"你好\", history=[])\n", - ">>> print(response)\n", - "你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。\n", - ">>> response, history = model.chat(tokenizer, \"晚上睡不着应该怎么办\", history=history)\n", - ">>> print(response)\n", - "晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:\n", - "\n", - "1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。\n", - "2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。\n", - "3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。\n", - "4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。\n", - "5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。\n", - "6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。\n", - "\n", - "如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。\n", - "\n", - "\n", - "\n", - "```\n", - "\n", - "以下的流程中,我们将`ChatGLM2-6B`部署到PAI创建一个推理服务,然后基于推理服务的API,使用Gradio创建一个对话机器人。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 获取ChatGLM2模型\n", - "\n", - "推理服务和训练作业中都需要加载使用模型,PAI在部分region上提供模型缓存,支持用户能够更快地获取到相应的模型。用户可以通过以下代码获取相应的模型,然后在训练作业和推理服务中加载使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "m = RegisteredModel(\n", - " \"THUDM/chatglm2-6b\",\n", - " model_provider=\"huggingface\",\n", - ")\n", - "\n", - "model_uri = m.model_data\n", - "print(model_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 创建推理服务\n", - "\n", - "PAI-EAS是阿里云PAI提供模型在线服务平台,支持用户一键部署推理服务或是AIWeb应用,支持异构资源,弹性扩缩容。PAI-EAS支持使用镜像的方式部署模型,以下的流程,我们将使用PAI提供的PyTorch推理镜像,将以上的模型部署为推理服务。\n", - "\n", - "\n", - "在部署推理服务之前,我们需要准备相应的推理服务程序,他负责加载模型,提供对应的HTTP API服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p server_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的推理服务代码如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile server_src/run.py\n", - "# source: https://github.com/THUDM/ChatGLM-6B/blob/main/api.py\n", - "\n", - "import os\n", - "\n", - "from fastapi import FastAPI, Request\n", - "from transformers import AutoTokenizer, AutoModel, AutoConfig\n", - "import uvicorn, json, datetime\n", - "import torch\n", - "\n", - "\n", - "model = None\n", - "tokenizer = None\n", - "\n", - "# 默认的模型保存路径\n", - "chatglm_model_path = \"/eas/workspace/model/\"\n", - "# ptuning checkpoints保存路径\n", - "ptuning_checkpoint = \"/ml/ptuning_checkpoints/\"\n", - "pre_seq_len = 128\n", - "app = FastAPI()\n", - "\n", - "\n", - "def load_model():\n", - " global model, tokenizer\n", - " tokenizer = AutoTokenizer.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - "\n", - " if os.path.exists(ptuning_checkpoint):\n", - " # P-tuning v2\n", - " print(f\"Loading model/ptuning_checkpoint weight...\")\n", - " config = AutoConfig.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " config.pre_seq_len = pre_seq_len\n", - " config.prefix_projection = False\n", - "\n", - " model = AutoModel.from_pretrained(chatglm_model_path, config=config, trust_remote_code=True)\n", - " tokenizer = AutoTokenizer.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " prefix_state_dict = torch.load(os.path.join(ptuning_checkpoint, \"pytorch_model.bin\"))\n", - " new_prefix_state_dict = {}\n", - " for k, v in prefix_state_dict.items():\n", - " if k.startswith(\"transformer.prefix_encoder.\"):\n", - " new_prefix_state_dict[k[len(\"transformer.prefix_encoder.\"):]] = v\n", - " model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)\n", - "\n", - " model = model.half().cuda()\n", - " model.transformer.prefix_encoder.float().cuda()\n", - " model.eval()\n", - " else:\n", - " print(f\"Loading model weight...\")\n", - " model = AutoModel.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " model.half().cuda()\n", - " model.eval()\n", - "\n", - "\n", - "\n", - "@app.post(\"/\")\n", - "async def create_item(request: Request):\n", - " global model, tokenizer\n", - " json_post_raw = await request.json()\n", - " json_post = json.dumps(json_post_raw)\n", - " json_post_list = json.loads(json_post)\n", - " prompt = json_post_list.get('prompt')\n", - " history = json_post_list.get('history')\n", - " max_length = json_post_list.get('max_length')\n", - " top_p = json_post_list.get('top_p')\n", - " temperature = json_post_list.get('temperature')\n", - " response, history = model.chat(tokenizer,\n", - " prompt,\n", - " history=history,\n", - " max_length=max_length if max_length else 2048,\n", - " top_p=top_p if top_p else 0.7,\n", - " temperature=temperature if temperature else 0.95)\n", - " now = datetime.datetime.now()\n", - " time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n", - " answer = {\n", - " \"response\": response,\n", - " \"history\": history,\n", - " \"status\": 200,\n", - " \"time\": time\n", - " }\n", - " log = \"[\" + time + \"] \" + '\", prompt:\"' + prompt + '\", response:\"' + repr(response) + '\"'\n", - " print(log)\n", - " return answer\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " load_model()\n", - " uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PyTorch镜像运行相应的推理服务,在启动服务之前需要安装模型依赖的相关的依赖。我们可以在`server_src`下准备依赖的`requirements.txt`,对应的`requirements.txt`会在推理服务启动之前被安装到环境中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile server_src/requirements.txt\n", - "\n", - "# 模型需要的依赖\n", - "transformers==4.30.2\n", - "accelerate\n", - "icetk\n", - "cpm_kernels\n", - "\n", - "torch>=2.0,<2.1\n", - "gradio\n", - "mdtex2html\n", - "sentencepiece\n", - "accelerate\n", - "\n", - "# 推理服务Server的依赖\n", - "fastapi\n", - "uvicorn" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "基于以上的推理服务程序,我们将使用PyTorch镜像和OSS上的模型在PAI创建一个推理服务,代码如下。\n", - "\n", - "> 对于如何使用SDK创建推理服务的详细介绍,请见文档:[创建推理服务](https://help.aliyun.com/document_detail/2261532.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import container_serving_spec, Model\n", - "from pai.image import retrieve, ImageScope\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "# InferenceSpec用于描述如何创建推理服务\n", - "infer_spec = container_serving_spec(\n", - " # 使用PAI提供的最新PyTorch的推理镜像\n", - " image_uri=retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " source_dir=\"./server_src\",\n", - " command=\"python run.py\",\n", - ")\n", - "\n", - "m = Model(\n", - " # 模型的OSS路径,默认模型会通过挂载的方式挂载到`/eas/workspace/model/`路径下。\n", - " model_data=model_uri,\n", - " inference_spec=infer_spec,\n", - ")\n", - "\n", - "\n", - "# 部署模型,创建推理服务.\n", - "p = m.deploy(\n", - " service_name=\"chatglm_demo_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.gn6i-c8g1.2xlarge\", # 8vCPU 31GB NVIDIA T4×1(GPU Mem 16GB)\n", - " options={\n", - " # 配置EAS RPC框架的超时时间, 单位为毫秒\n", - " \"metadata.rpc.keepalive\": 20000,\n", - " },\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`m.deploy`返回一个Predictor对象,可以用于向创建的推理服务程序发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = p.raw_predict(\n", - " {\n", - " \"prompt\": \"你好\",\n", - " }\n", - ")\n", - "print(resp.json()[\"response\"])\n", - "\n", - "\n", - "resp = p.raw_predict(\n", - " {\n", - " \"prompt\": \"晚上睡不着应该怎么办\",\n", - " \"history\": resp.json()[\"history\"],\n", - " },\n", - " timeout=20,\n", - ")\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "基于以上的推理服务,我们可以使用Gradio创建一个简单的对话机器人demo。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import gradio as gr\n", - "import random\n", - "import time\n", - "\n", - "with gr.Blocks() as demo:\n", - " chatbot = gr.Chatbot()\n", - " msg = gr.Textbox()\n", - " clear = gr.Button(\"Clear\")\n", - " submit = gr.Button(\"Submit\")\n", - "\n", - " def respond(message, chat_history):\n", - "\n", - " print(f\"Message: {message}\")\n", - " print(f\"ChatHistory: {chat_history}\")\n", - " resp = p.raw_predict(\n", - " {\n", - " \"prompt\": message,\n", - " \"history\": chat_history,\n", - " }\n", - " ).json()\n", - " print(f\"Response: {resp['response']}\")\n", - "\n", - " chat_history.append((message, resp[\"response\"]))\n", - " return \"\", chat_history\n", - "\n", - " submit.click(respond, [msg, chatbot], [msg, chatbot])\n", - "\n", - "demo.launch(share=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过以上创建的Gradio应用,我们可以在页面上与部署的ChatGLM模型进行对话。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "在测试完成之后,我们可以通过以下的代码删除推理服务,释放资源。\n", - "\n", - "> 请注意,删除在线推理服务之后,对应的Gradio的应用将无法使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 微调ChatGLM2-6B\n", - "\n", - "我们可以使用领域数据对ChatGLM进行微调,从而使得模型在特定领域和任务下有更好的表现。ChatGLM团队提供了使用[P-Tuning v2](https://github.com/THUDM/P-tuning-v2)方式对模型进行[微调的方案](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning),我们将基于此方案展示如何将微调训练作业提交到PAI的训练服务执行。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据集\n", - "\n", - "我们将使用了[广告生成数据集](https://aclanthology.org/D19-1321.pdf),对ChatGLM进行微调。我们首先需要准备数据到OSS,供后续微调训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import download, OssUriObj, upload\n", - "import zipfile\n", - "\n", - "# 下载数据\n", - "data = download(\n", - " # 当前的数据集在上海region,跨region下载,我们需要传递对应OSS Bucket所在Endpoint.\n", - " OssUriObj(\n", - " \"oss://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/chatGLM/AdvertiseGen_Simple.zip\"\n", - " ),\n", - " local_path=\"./\",\n", - ")\n", - "\n", - "# 解压缩数据\n", - "with zipfile.ZipFile(data, \"r\") as zip_ref:\n", - " zip_ref.extractall(\"./train_data/\")\n", - "\n", - "# 上传数据到OSS\n", - "train_data = \"./train_data/AdvertiseGen_Simple/\"\n", - "train_data_uri = upload(\n", - " \"./train_data/AdvertiseGen_Simple/\", oss_path=\"chatglm_demo/data/advertisegen/\"\n", - ")\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "相应的数据集数据格式如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!head -n 5 ./train_data/AdvertiseGen_Simple/train.json" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备微调训练作业脚本\n", - "\n", - "ChatGLM的官方提供[微调训练脚本](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning),支持使用P-Tuning v2的方式对ChatGLM模型进行微调。我们将基于相应的微调训练脚本,修改训练作业的拉起Shell脚本(`train.sh`),然后使用PAI Python SDK将微调训练作业提交到PAI执行。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 下载ChatGLM代码\n", - "!git clone https://github.com/THUDM/ChatGLM2-6B.git" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "当训练作业提交到PAI执行时,需要按一定规范读取输入数据,以及将需要保存的模型写出到指定路径下,更加具体介绍请见文档:[提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。\n", - "\n", - "修改后的训练作业拉起脚本如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%writefile ChatGLM2-6B/ptuning/train.sh\n", - "\n", - "PRE_SEQ_LEN=128\n", - "LR=2e-2\n", - "NUM_GPUS=`nvidia-smi --list-gpus | wc -l`\n", - "\n", - "torchrun --standalone --nnodes=1 --nproc-per-node=$NUM_GPUS main.py \\\n", - " --do_train \\\n", - " --train_file /ml/input/data/train/train.json \\\n", - " --validation_file /ml/input/data/train/dev.json \\\n", - " --preprocessing_num_workers 10 \\\n", - " --prompt_column content \\\n", - " --response_column summary \\\n", - " --overwrite_cache \\\n", - " --model_name_or_path /ml/input/data/model \\\n", - " --output_dir /ml/output/model/ \\\n", - " --overwrite_output_dir \\\n", - " --max_source_length 64 \\\n", - " --max_target_length 128 \\\n", - " --per_device_train_batch_size 4 \\\n", - " --per_device_eval_batch_size 4 \\\n", - " --gradient_accumulation_steps 32 \\\n", - " --predict_with_generate \\\n", - " --num_train_epochs 10 \\\n", - " --save_strategy epoch \\\n", - " --learning_rate $LR \\\n", - " --pre_seq_len $PRE_SEQ_LEN \\\n", - " --quantization_bit 4\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "这里我们将使用PAI提供的PyTorch GPU训练镜像运行训练作业,需要安装部分第三方依赖包。用户可以通过提供`requirements.txt`的方式提供,相应的依赖会在训练作业执行前被安装到环境中\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%writefile ChatGLM2-6B/ptuning/requirements.txt\n", - "# 模型需要的依赖\n", - "transformers==4.30.2\n", - "accelerate\n", - "icetk\n", - "cpm_kernels\n", - "\n", - "torch>=2.0,<2.1\n", - "sentencepiece\n", - "accelerate\n", - "\n", - "rouge_chinese\n", - "nltk\n", - "jieba\n", - "datasets" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "我们将通过PAI Python SDK,将以上的训练作业提交到PAI执行。SDK在提交训练作业之后,会打印训练作业的链接,用户可以通过对应的链接查看作业的执行详情,输出日志。\n", - "\n", - "> Note:按当前示例教程使用的训练配置、数据集和机器规格,训练作业运行约10分钟左右。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的最新的PyTorch推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "\n", - "est = Estimator(\n", - " command=\"bash train.sh\", # 启动命令\n", - " source_dir=\"./ChatGLM2-6B/ptuning\", # 训练代码目录.\n", - " image_uri=image_uri, # 训练镜像\n", - " instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 使用的机器规格示例,V100(32G)\n", - " base_job_name=\"chatglm2_finetune_\",\n", - ")\n", - "\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " \"model\": model_uri,\n", - " \"train\": train_data_uri,\n", - " }\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "默认`estimator.fit`会等待到作业执行完成。作业执行成功之后,用户可以通过`est.model_data()`获取输出模型在OSS上的路径地址。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "用户可以通过`ossutil`或是SDK提供的便利方法将模型下载到本地:\n", - "\n", - "```python\n", - "from pai.common.oss_util import download\n", - "\n", - "\n", - "# 使用SDK的便利方法下载模型到本地.\n", - "download(\n", - "\toss_path=est.model_data(),\n", - "\tlocal_path=\"./output_model\",\n", - ")\n", - "\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 部署微调之后的模型\n", - "\n", - "微调训练之后获得的`checkpoints`,需要和原始的模型配合一起使用。我们需要通过以下代码获得对应的checkpoint路径.\n", - "\n", - "> 用户通过修改微调训练的代码,使用`Trainer.save_model()`显式的保存相应的checkpoints,则可以直接通过`estimator.model_data()`下获得相应的checkpoints." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "# 以上的训练作业超参设置中,我们设置`epochs=2`, checkpoints保存的策略是`每一个epochs保存`。\n", - "# 默认最后一个checkpoint会被保存到`{output_dir}/checkpoint-2`路径下.\n", - "# 通过以下路径,我们可以获得模型训练获得的最后一个checkpoint的OSS路径.\n", - "\n", - "checkpoint_uri = os.path.join(est.model_data(), \"checkpoint-10/\")\n", - "print(checkpoint_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将复用ChatGLM2部署的推理服务程序创建推理服务。与直接部署ChatGLM2的不同点在于我们还需要提供微调之后获得的checkpoints。\n", - "\n", - "通过`InferenceSpec.mount` API,我们可以将相应的OSS模型路径挂载到服务容器中,供推理服务程序使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pai.model import container_serving_spec, Model\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# InferenceSpec用于描述如何创建推理服务\n", - "infer_spec = container_serving_spec(\n", - " image_uri=retrieve( # 使用PAI提供的最新PyTorch的推理镜像\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " source_dir=\"./server_src\", # 代码目录\n", - " command=\"python run.py\", # 启动命令\n", - ")\n", - "\n", - "\n", - "# 将相应的checkpoints挂载到服务中,推理服务的程序通过检查目录(/ml/ptuning_checkpoints/)是否存在加载checkpoints\n", - "infer_spec.mount(checkpoint_uri, \"/ml/ptuning_checkpoints\")\n", - "print(infer_spec.to_dict())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "m = Model(\n", - " model_data=model_uri,\n", - " inference_spec=infer_spec,\n", - ")\n", - "\n", - "# 部署模型\n", - "p = m.deploy(\n", - " service_name=\"chatglm_ft_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.gn6i-c16g1.4xlarge\", # 1 * T4\n", - " options={\n", - " # 配置EAS RPC框架的超时时间, 单位为毫秒\n", - " \"metadata.rpc.keepalive\": 20000,\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "向推理服务发送请求,测试推理服务是否正常启动。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resp = p.raw_predict(\n", - " {\n", - " \"prompt\": \"你好\",\n", - " },\n", - ")\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "基于以上微调后模型的推理服务,我们可以使用Gradio创建一个新的机器人。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import gradio as gr\n", - "import random\n", - "import time\n", - "\n", - "with gr.Blocks() as demo:\n", - " chatbot = gr.Chatbot()\n", - " msg = gr.Textbox()\n", - " clear = gr.Button(\"Clear\")\n", - " submit = gr.Button(\"Submit\")\n", - "\n", - " def respond(message, chat_history):\n", - "\n", - " print(f\"Message: {message}\")\n", - " print(f\"ChatHistory: {chat_history}\")\n", - " resp = p.raw_predict(\n", - " {\n", - " \"prompt\": message,\n", - " \"history\": chat_history,\n", - " }\n", - " ).json()\n", - " print(f\"Response: {resp['response']}\")\n", - "\n", - " chat_history.append((message, resp[\"response\"]))\n", - " return \"\", chat_history\n", - "\n", - " submit.click(respond, [msg, chatbot], [msg, chatbot])\n", - "\n", - "demo.launch(share=True)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,可以通过`p.delete_service()`删除服务,释放资源。\n", - "\n", - "> 请注意,删除在线推理服务之后,对应的Gradio的应用将无法使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg b/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg deleted file mode 100644 index 13848a5..0000000 Binary files a/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg and /dev/null differ diff --git a/docs/source/tutorial/checkpoint/checkpoint.ipynb b/docs/source/tutorial/checkpoint/checkpoint.ipynb deleted file mode 100644 index 55cfbff..0000000 --- a/docs/source/tutorial/checkpoint/checkpoint.ipynb +++ /dev/null @@ -1,1285 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 在训练作业中使用checkpoint\n", - "\n", - "在机器学习模型训练过程中,往往需要较长的时间完成训练数据的迭代,实现模型的收敛,然而训练过程可能会因为各种原因中断,例如机器故障、网络问题、或是代码原因等。为了避免中断后需要重头开始训练,开发者通常会在训练过程中,定期将模型的状态保存为`checkpoint`文件,以便在训练中断后,能够从保存的`checkpoint`文件获取模型参数,优化器状态,训练步数等训练状态,恢复训练。\n", - "\n", - "本文档介绍如何在PAI的训练作业中使用checkpoint。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 准备工作\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI Python SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用checkpoint保存和恢复训练作业\n", - "\n", - "当使用SDK提供的`pai.estimator.Estimator` 提交训练作业时,训练作业默认会挂载用户的OSS Bucket路径到训练作业的`/ml/output/checkpoints`目录。训练代码可以将checkpoint文件写出到对应的路径,从而保存到OSS中。提交训练作业之后,可以通过 `estimator.checkpoints_data()` 方法可以获取`checkpoints`保存的OSS路径。\n", - "\n", - "当需要使用已有的`checkpoint`时,用户可以通过 `checkpoints_path` 参数指定一个OSS Bucket路径,PAI会将该路径挂载到训练作业的`/ml/output/checkpoints`目录,训练作业可以通过读取对应数据路径下的checkpoint文件来恢复训练。\n", - "\n", - "\n", - "\n", - "```python\n", - "\n", - "from pai.estimator import Estimator\n", - "\n", - "\n", - "# 1. 使用默认的checkpoints路径保存模型的checkpoints\n", - "est = Estimator(\n", - "\timage_uri=\"\",\n", - "\tcommand=\"python train.py\",\n", - ")\n", - "\n", - "# 训练作业默认会挂载一个OSS Bucket路径到 /ml/output/checkpoints\n", - "# 用户训练代码可以通过写文件到 /ml/output/checkpoints 保存checkpoint\n", - "est.fit()\n", - "\n", - "# 查看训练作业的checkpoints路径\n", - "print(est.checkpoints_data())\n", - "\n", - "# 2. 使用其他训练作业产出的checkpoints恢复训练\n", - "est_load = Estimator(\n", - "\timage_uri=\"\",\n", - "\tcommand=\"python train.py\",\n", - "\t# 指定使用上一个训练作业输出的checkpoints.\n", - "\tcheckpoints_path=est.checkpoints_data(),\n", - ")\n", - "\n", - "# 训练代码从 /ml/output/checkpoints 中加载checkpoint\n", - "est_load.fit()\n", - "\n", - "```\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 在PyTorch中使用checkpoint\n", - "\n", - "在PyTorch中,通常使用`torch.save`方法将模型的参数、优化器的状态、训练进度等信息,以字典的形式作为`checkpoint`进行保存。保存的`checkpoint`文件可以通过 `torch.load` 进行加载。PyTorch提供了如何在训练中保存和加载checkpoint的教程:[Save And Loading A General Checkpoint In PyTorch](https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html)。\n", - "\n", - "我们将基于PyTorch的示例教程,演示如何在PAI的训练作业中使用checkpoint。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业使用的代码如下:\n", - "\n", - "1. 在训练开始之前,通过 `/ml/output/checkpoints/` 路径加载checkpoint获取初始化模型参数,优化器,以及训练进度。\n", - "\n", - "2. 基于checkpoint的状态信息训练模型,在训练过程中,定期保存checkpoint到 `/ml/output/checkpoints/` 路径。\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p train_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/train.py\n", - "# Additional information\n", - "import os\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from torch.utils.data import Dataset, DataLoader\n", - "import torch.nn.functional as F\n", - "\n", - "\n", - "EPOCH = 5\n", - "CHECKPOINT_NAME = \"checkpoint.pt\"\n", - "LOSS = 0.4\n", - "\n", - "# Define a custom mock dataset\n", - "class RandomDataset(Dataset):\n", - " def __init__(self, num_samples=1000):\n", - " self.num_samples = num_samples\n", - "\n", - " def __len__(self):\n", - " return self.num_samples\n", - "\n", - " def __getitem__(self, idx):\n", - " x = torch.randn(10) # Generating random input tensor\n", - " y = torch.randint(0, 2, (1,)).item() # Generating random target label (0 or 1)\n", - " return x, y\n", - "\n", - "\n", - "# Define your model\n", - "class MyModel(nn.Module):\n", - " def __init__(self):\n", - " super(MyModel, self).__init__()\n", - " self.fc = nn.Linear(10, 2)\n", - " \n", - " def forward(self, x):\n", - " return self.fc(x)\n", - "\n", - "\n", - "net = MyModel()\n", - "criterion = nn.CrossEntropyLoss()\n", - "optimizer = optim.SGD(net.parameters(), lr=0.001)\n", - "start_epoch = 0\n", - "\n", - "def load_checkpoint():\n", - " \"\"\"Load checkpoint if exists.\"\"\"\n", - " global net, optimizer, start_epoch, LOSS\n", - " checkpoint_dir = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\")\n", - " if not checkpoint_dir:\n", - " return\n", - " checkpoint_path = os.path.join(checkpoint_dir, CHECKPOINT_NAME)\n", - " if not os.path.exists(checkpoint_path):\n", - " return\n", - " data = torch.load(checkpoint_path)\n", - "\n", - " net.load_state_dict(data[\"model_state_dict\"])\n", - " optimizer.load_state_dict(data[\"optimizer_state_dict\"])\n", - " start_epoch = data[\"epoch\"]\n", - "\n", - "\n", - "def save_checkpoint(epoch):\n", - " global net, optimizer, start_epoch, LOSS\n", - " checkpoint_dir = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\")\n", - " if not checkpoint_dir:\n", - " return\n", - " checkpoint_path = os.path.join(checkpoint_dir, CHECKPOINT_NAME)\n", - " torch.save({\n", - " 'epoch': epoch + 1,\n", - " 'model_state_dict': net.state_dict(),\n", - " 'optimizer_state_dict': optimizer.state_dict(),\n", - " }, checkpoint_path)\n", - "\n", - "\n", - "def parse_args():\n", - " import argparse\n", - " parser = argparse.ArgumentParser()\n", - " parser.add_argument(\"--epochs\", type=int, default=10)\n", - " args = parser.parse_args()\n", - " return args\n", - "\n", - "\n", - "def train():\n", - " args = parse_args()\n", - " load_checkpoint()\n", - " batch_size = 4\n", - " dataloader = DataLoader(RandomDataset(), batch_size=batch_size, shuffle=True)\n", - " num_epochs = args.epochs\n", - " print(num_epochs)\n", - " for epoch in range(start_epoch, num_epochs):\n", - " net.train()\n", - " for i, (inputs, targets) in enumerate(dataloader):\n", - " # Forward pass\n", - " outputs = net(inputs)\n", - " loss = criterion(outputs, targets)\n", - " \n", - " # Backward pass and optimization\n", - " optimizer.zero_grad()\n", - " loss.backward()\n", - " optimizer.step()\n", - " \n", - " # Print training progress\n", - " if (i+1) % 10 == 0:\n", - " print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(dataloader)}], Loss: {loss.item()}')\n", - " \n", - " # Save checkpoint\n", - " save_checkpoint(epoch=epoch)\n", - " # save the model\n", - " torch.save(net.state_dict(), os.path.join(os.environ.get(\"PAI_OUTPUT_MODEL\", \".\"), \"model.pt\"))\n", - " \n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将以上的代码提交到PAI执行,训练作业最终提供挂载的OSS路径保存模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/liangquan/code/pypai/pai/common/oss_utils.py:13: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", - " from tqdm.autonotebook import tqdm\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "d8266991a0d042c6a54531f252ecc727", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Uploading file: /var/folders/hc/5w4bg25j1ns2mm0yb06zzzbh0000gp/T/tmpt3_0rsuf/source.tar.gz: 0%| | 0…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "View the job detail by accessing the console URI: https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/train1u1it512gqg\n", - "TrainingJob launch starting\n", - "MAX_PARALLELISM=0\n", - "C_INCLUDE_PATH=/home/pai/include\n", - "KUBERNETES_PORT=tcp://10.192.0.1:443\n", - "KUBERNETES_SERVICE_PORT=443\n", - "LANGUAGE=en_US.UTF-8\n", - "PIP_TRUSTED_HOST=mirrors.cloud.aliyuncs.com\n", - "MASTER_ADDR=train1u1it512gqg-master-0\n", - "HOSTNAME=train1u1it512gqg-master-0\n", - "LD_LIBRARY_PATH=:/lib/x86_64-linux-gnu:/home/pai/lib:/home/pai/jre/lib/amd64/server\n", - "MASTER_PORT=23456\n", - "HOME=/root\n", - "PAI_USER_ARGS=\n", - "PYTHONUNBUFFERED=0\n", - "PAI_OUTPUT_CHECKPOINTS=/ml/output/checkpoints/\n", - "PAI_CONFIG_DIR=/ml/input/config/\n", - "WORLD_SIZE=1\n", - "REGION_ID=cn-hangzhou\n", - "CPLUS_INCLUDE_PATH=/home/pai/include\n", - "RANK=0\n", - "OPAL_PREFIX=/home/pai/\n", - "PAI_TRAINING_JOB_ID=train1u1it512gqg\n", - "TERM=xterm-color\n", - "KUBERNETES_PORT_443_TCP_ADDR=10.192.0.1\n", - "PAI_OUTPUT_MODEL=/ml/output/model/\n", - "ELASTIC_TRAINING_ENABLED=false\n", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/pai/bin:/home/pai/hadoop/bin\n", - "PIP_INDEX_URL=https://mirrors.cloud.aliyuncs.com/pypi/simple\n", - "KUBERNETES_PORT_443_TCP_PORT=443\n", - "KUBERNETES_PORT_443_TCP_PROTO=tcp\n", - "LANG=en_US.UTF-8\n", - "aliyun_logs_containerType_tags=containerType=Algorithm\n", - "PAI_TRAINING_USE_ECI=true\n", - "KUBERNETES_SERVICE_PORT_HTTPS=443\n", - "KUBERNETES_PORT_443_TCP=tcp://10.192.0.1:443\n", - "ELASTIC_INFERENCE_ENABLED=false\n", - "LC_ALL=en_US.UTF-8\n", - "JAVA_HOME=/home/pai\n", - "KUBERNETES_SERVICE_HOST=10.192.0.1\n", - "PWD=/\n", - "PAI_HPS={}\n", - "TZ=UTC\n", - "HADOOP_HOME=/home/pai/hadoop\n", - "PAI_OUTPUT_LOGS=/ml/output/logs/\n", - "aliyun_logs_trainingJobId_tags=trainingJobId=train1u1it512gqg\n", - "PAI_ODPS_CREDENTIAL=/ml/input/credential/odps.json\n", - "PAI_WORKING_DIR=/ml/usercode/\n", - "Change to Working Directory, /ml/usercode/\n", - "User program launching\n", - "-----------------------------------------------------------------\n", - "10\n", - "Epoch [1/10], Step [10/250], Loss: 0.3664854168891907\n", - "Epoch [1/10], Step [20/250], Loss: 0.5867650508880615\n", - "Epoch [1/10], Step [30/250], Loss: 0.8810225129127502\n", - "Epoch [1/10], Step [40/250], Loss: 1.3596220016479492\n", - "Epoch [1/10], Step [50/250], Loss: 1.0757191181182861\n", - "Epoch [1/10], Step [60/250], Loss: 0.5261836051940918\n", - "Epoch [1/10], Step [70/250], Loss: 1.0891999006271362\n", - "Epoch [1/10], Step [80/250], Loss: 1.2425217628479004\n", - "Epoch [1/10], Step [90/250], Loss: 0.7928518652915955\n", - "Epoch [1/10], Step [100/250], Loss: 0.500701367855072\n", - "Epoch [1/10], Step [110/250], Loss: 1.1105762720108032\n", - "Epoch [1/10], Step [120/250], Loss: 0.7642831802368164\n", - "Epoch [1/10], Step [130/250], Loss: 0.9435116052627563\n", - "Epoch [1/10], Step [140/250], Loss: 0.4632255434989929\n", - "Epoch [1/10], Step [150/250], Loss: 0.8282555937767029\n", - "Epoch [1/10], Step [160/250], Loss: 0.5644117593765259\n", - "Epoch [1/10], Step [170/250], Loss: 0.8821360468864441\n", - "Epoch [1/10], Step [180/250], Loss: 0.6495410799980164\n", - "Epoch [1/10], Step [190/250], Loss: 0.6814499497413635\n", - "Epoch [1/10], Step [200/250], Loss: 1.1818656921386719\n", - "Epoch [1/10], Step [210/250], Loss: 0.4218548536300659\n", - "Epoch [1/10], Step [220/250], Loss: 0.5892952680587769\n", - "Epoch [1/10], Step [230/250], Loss: 0.8104468584060669\n", - "Epoch [1/10], Step [240/250], Loss: 0.3310832977294922\n", - "Epoch [1/10], Step [250/250], Loss: 1.0296210050582886\n", - "Epoch [2/10], Step [10/250], Loss: 0.747037947177887\n", - "Epoch [2/10], Step [20/250], Loss: 1.0555682182312012\n", - "Epoch [2/10], Step [30/250], Loss: 0.5005624294281006\n", - "Epoch [2/10], Step [40/250], Loss: 0.6007864475250244\n", - "Epoch [2/10], Step [50/250], Loss: 0.8172819018363953\n", - "Epoch [2/10], Step [60/250], Loss: 0.7322960495948792\n", - "Epoch [2/10], Step [70/250], Loss: 0.6178841590881348\n", - "Epoch [2/10], Step [80/250], Loss: 0.9776118993759155\n", - "Epoch [2/10], Step [90/250], Loss: 0.8088865876197815\n", - "Epoch [2/10], Step [100/250], Loss: 0.7169486284255981\n", - "Epoch [2/10], Step [110/250], Loss: 0.8003190159797668\n", - "Epoch [2/10], Step [120/250], Loss: 0.9178279638290405\n", - "Epoch [2/10], Step [130/250], Loss: 0.5217956900596619\n", - "Epoch [2/10], Step [140/250], Loss: 1.2751939296722412\n", - "Epoch [2/10], Step [150/250], Loss: 1.1024904251098633\n", - "Epoch [2/10], Step [160/250], Loss: 0.6336060762405396\n", - "Epoch [2/10], Step [170/250], Loss: 0.799022376537323\n", - "Epoch [2/10], Step [180/250], Loss: 0.7938567996025085\n", - "Epoch [2/10], Step [190/250], Loss: 1.060591220855713\n", - "Epoch [2/10], Step [200/250], Loss: 0.9365970492362976\n", - "Epoch [2/10], Step [210/250], Loss: 0.6945515871047974\n", - "Epoch [2/10], Step [220/250], Loss: 0.4772261381149292\n", - "Epoch [2/10], Step [230/250], Loss: 1.0332412719726562\n", - "Epoch [2/10], Step [240/250], Loss: 0.7284632325172424\n", - "Epoch [2/10], Step [250/250], Loss: 0.4485410451889038\n", - "Epoch [3/10], Step [10/250], Loss: 0.7845520377159119\n", - "Epoch [3/10], Step [20/250], Loss: 0.5619648694992065\n", - "Epoch [3/10], Step [30/250], Loss: 0.725273609161377\n", - "Epoch [3/10], Step [40/250], Loss: 0.7783026695251465\n", - "Epoch [3/10], Step [50/250], Loss: 0.5168777704238892\n", - "Epoch [3/10], Step [60/250], Loss: 0.67060387134552\n", - "Epoch [3/10], Step [70/250], Loss: 0.9300781488418579\n", - "Epoch [3/10], Step [80/250], Loss: 0.6534505486488342\n", - "Epoch [3/10], Step [90/250], Loss: 0.557340681552887\n", - "Epoch [3/10], Step [100/250], Loss: 0.667724609375\n", - "Epoch [3/10], Step [110/250], Loss: 0.5125826001167297\n", - "Epoch [3/10], Step [120/250], Loss: 0.4494149088859558\n", - "Epoch [3/10], Step [130/250], Loss: 0.6902559995651245\n", - "Epoch [3/10], Step [140/250], Loss: 0.5450549125671387\n", - "Epoch [3/10], Step [150/250], Loss: 1.0632681846618652\n", - "Epoch [3/10], Step [160/250], Loss: 0.7964761257171631\n", - "Epoch [3/10], Step [170/250], Loss: 0.5218536257743835\n", - "Epoch [3/10], Step [180/250], Loss: 0.6972622275352478\n", - "Epoch [3/10], Step [190/250], Loss: 0.7963941097259521\n", - "Epoch [3/10], Step [200/250], Loss: 0.5798731446266174\n", - "Epoch [3/10], Step [210/250], Loss: 0.7930802702903748\n", - "Epoch [3/10], Step [220/250], Loss: 0.7618649005889893\n", - "Epoch [3/10], Step [230/250], Loss: 0.9831617474555969\n", - "Epoch [3/10], Step [240/250], Loss: 0.7935497164726257\n", - "Epoch [3/10], Step [250/250], Loss: 0.9747794270515442\n", - "Epoch [4/10], Step [10/250], Loss: 0.6432996392250061\n", - "Epoch [4/10], Step [20/250], Loss: 0.6515889167785645\n", - "Epoch [4/10], Step [30/250], Loss: 0.8191264867782593\n", - "Epoch [4/10], Step [40/250], Loss: 0.5717310905456543\n", - "Epoch [4/10], Step [50/250], Loss: 1.0365064144134521\n", - "Epoch [4/10], Step [60/250], Loss: 0.7181562185287476\n", - "Epoch [4/10], Step [70/250], Loss: 0.6014276146888733\n", - "Epoch [4/10], Step [80/250], Loss: 0.8743482232093811\n", - "Epoch [4/10], Step [90/250], Loss: 0.5963127613067627\n", - "Epoch [4/10], Step [100/250], Loss: 0.7012943029403687\n", - "Epoch [4/10], Step [110/250], Loss: 0.6271654367446899\n", - "Epoch [4/10], Step [120/250], Loss: 0.646144449710846\n", - "Epoch [4/10], Step [130/250], Loss: 0.5112266540527344\n", - "Epoch [4/10], Step [140/250], Loss: 0.8657329678535461\n", - "Epoch [4/10], Step [150/250], Loss: 0.677897572517395\n", - "Epoch [4/10], Step [160/250], Loss: 0.798669695854187\n", - "Epoch [4/10], Step [170/250], Loss: 0.805213451385498\n", - "Epoch [4/10], Step [180/250], Loss: 0.7744658589363098\n", - "Epoch [4/10], Step [190/250], Loss: 0.4748728275299072\n", - "Epoch [4/10], Step [200/250], Loss: 0.6623726487159729\n", - "Epoch [4/10], Step [210/250], Loss: 0.6851851940155029\n", - "Epoch [4/10], Step [220/250], Loss: 0.5917701721191406\n", - "Epoch [4/10], Step [230/250], Loss: 0.586968719959259\n", - "Epoch [4/10], Step [240/250], Loss: 0.758073091506958\n", - "Epoch [4/10], Step [250/250], Loss: 0.7908360958099365\n", - "Epoch [5/10], Step [10/250], Loss: 0.747495174407959\n", - "Epoch [5/10], Step [20/250], Loss: 0.7880417108535767\n", - "Epoch [5/10], Step [30/250], Loss: 1.4239259958267212\n", - "Epoch [5/10], Step [40/250], Loss: 0.709957480430603\n", - "Epoch [5/10], Step [50/250], Loss: 0.45279955863952637\n", - "Epoch [5/10], Step [60/250], Loss: 0.6855078935623169\n", - "Epoch [5/10], Step [70/250], Loss: 0.7050631046295166\n", - "Epoch [5/10], Step [80/250], Loss: 0.8256967067718506\n", - "Epoch [5/10], Step [90/250], Loss: 0.9627029895782471\n", - "Epoch [5/10], Step [100/250], Loss: 0.7069070339202881\n", - "Epoch [5/10], Step [110/250], Loss: 0.6772119998931885\n", - "Epoch [5/10], Step [120/250], Loss: 0.5547316670417786\n", - "Epoch [5/10], Step [130/250], Loss: 0.4749568998813629\n", - "Epoch [5/10], Step [140/250], Loss: 0.5910231471061707\n", - "Epoch [5/10], Step [150/250], Loss: 0.5789163112640381\n", - "Epoch [5/10], Step [160/250], Loss: 0.994613766670227\n", - "Epoch [5/10], Step [170/250], Loss: 0.7664419412612915\n", - "Epoch [5/10], Step [180/250], Loss: 0.7812412977218628\n", - "Epoch [5/10], Step [190/250], Loss: 0.932634174823761\n", - "Epoch [5/10], Step [200/250], Loss: 0.4732060134410858\n", - "Epoch [5/10], Step [210/250], Loss: 0.6712639927864075\n", - "Epoch [5/10], Step [220/250], Loss: 0.7019771337509155\n", - "Epoch [5/10], Step [230/250], Loss: 0.668921709060669\n", - "Epoch [5/10], Step [240/250], Loss: 0.5486156344413757\n", - "Epoch [5/10], Step [250/250], Loss: 0.8131189346313477\n", - "Epoch [6/10], Step [10/250], Loss: 0.5800281167030334\n", - "Epoch [6/10], Step [20/250], Loss: 0.9032570719718933\n", - "Epoch [6/10], Step [30/250], Loss: 0.6829659938812256\n", - "Epoch [6/10], Step [40/250], Loss: 0.577970027923584\n", - "Epoch [6/10], Step [50/250], Loss: 0.9745671153068542\n", - "Epoch [6/10], Step [60/250], Loss: 0.6292040348052979\n", - "Epoch [6/10], Step [70/250], Loss: 0.9189562201499939\n", - "Epoch [6/10], Step [80/250], Loss: 1.0687212944030762\n", - "Epoch [6/10], Step [90/250], Loss: 0.6210573315620422\n", - "Epoch [6/10], Step [100/250], Loss: 0.7758654356002808\n", - "Epoch [6/10], Step [110/250], Loss: 1.055539846420288\n", - "Epoch [6/10], Step [120/250], Loss: 0.7991855144500732\n", - "Epoch [6/10], Step [130/250], Loss: 0.8390480279922485\n", - "Epoch [6/10], Step [140/250], Loss: 0.5641282200813293\n", - "Epoch [6/10], Step [150/250], Loss: 0.5416208505630493\n", - "Epoch [6/10], Step [160/250], Loss: 0.8556939363479614\n", - "Epoch [6/10], Step [170/250], Loss: 0.8848042488098145\n", - "Epoch [6/10], Step [180/250], Loss: 0.6585526466369629\n", - "Epoch [6/10], Step [190/250], Loss: 0.5264347791671753\n", - "Epoch [6/10], Step [200/250], Loss: 0.7451325058937073\n", - "Epoch [6/10], Step [210/250], Loss: 0.8498039841651917\n", - "Epoch [6/10], Step [220/250], Loss: 0.9514821767807007\n", - "Epoch [6/10], Step [230/250], Loss: 0.5831080675125122\n", - "Epoch [6/10], Step [240/250], Loss: 0.7323013544082642\n", - "Epoch [6/10], Step [250/250], Loss: 0.799047589302063\n", - "Epoch [7/10], Step [10/250], Loss: 0.7431624531745911\n", - "Epoch [7/10], Step [20/250], Loss: 0.7462856769561768\n", - "Epoch [7/10], Step [30/250], Loss: 0.7637103796005249\n", - "Epoch [7/10], Step [40/250], Loss: 0.7512863874435425\n", - "Epoch [7/10], Step [50/250], Loss: 0.8934370279312134\n", - "Epoch [7/10], Step [60/250], Loss: 0.6657339334487915\n", - "Epoch [7/10], Step [70/250], Loss: 0.7996265292167664\n", - "Epoch [7/10], Step [80/250], Loss: 0.7883811593055725\n", - "Epoch [7/10], Step [90/250], Loss: 0.7327611446380615\n", - "Epoch [7/10], Step [100/250], Loss: 0.7103905081748962\n", - "Epoch [7/10], Step [110/250], Loss: 0.8145009875297546\n", - "Epoch [7/10], Step [120/250], Loss: 0.6999544501304626\n", - "Epoch [7/10], Step [130/250], Loss: 0.6132965087890625\n", - "Epoch [7/10], Step [140/250], Loss: 0.8219666481018066\n", - "Epoch [7/10], Step [150/250], Loss: 0.573877215385437\n", - "Epoch [7/10], Step [160/250], Loss: 0.864593505859375\n", - "Epoch [7/10], Step [170/250], Loss: 0.7187140583992004\n", - "Epoch [7/10], Step [180/250], Loss: 0.601334810256958\n", - "Epoch [7/10], Step [190/250], Loss: 0.6193158626556396\n", - "Epoch [7/10], Step [200/250], Loss: 0.7600311040878296\n", - "Epoch [7/10], Step [210/250], Loss: 0.6659085154533386\n", - "Epoch [7/10], Step [220/250], Loss: 0.6364413499832153\n", - "Epoch [7/10], Step [230/250], Loss: 0.878304123878479\n", - "Epoch [7/10], Step [240/250], Loss: 0.7139410972595215\n", - "Epoch [7/10], Step [250/250], Loss: 0.6852972507476807\n", - "Epoch [8/10], Step [10/250], Loss: 1.0263853073120117\n", - "Epoch [8/10], Step [20/250], Loss: 0.7559791803359985\n", - "Epoch [8/10], Step [30/250], Loss: 0.6709325313568115\n", - "Epoch [8/10], Step [40/250], Loss: 0.5146634578704834\n", - "Epoch [8/10], Step [50/250], Loss: 0.6418485641479492\n", - "Epoch [8/10], Step [60/250], Loss: 0.72318035364151\n", - "Epoch [8/10], Step [70/250], Loss: 0.7116968631744385\n", - "Epoch [8/10], Step [80/250], Loss: 0.7035868763923645\n", - "Epoch [8/10], Step [90/250], Loss: 0.6085933446884155\n", - "Epoch [8/10], Step [100/250], Loss: 0.5128545761108398\n", - "Epoch [8/10], Step [110/250], Loss: 0.6380510330200195\n", - "Epoch [8/10], Step [120/250], Loss: 0.4963105320930481\n", - "Epoch [8/10], Step [130/250], Loss: 0.6693160533905029\n", - "Epoch [8/10], Step [140/250], Loss: 0.6602588891983032\n", - "Epoch [8/10], Step [150/250], Loss: 0.8440876007080078\n", - "Epoch [8/10], Step [160/250], Loss: 0.7596740126609802\n", - "Epoch [8/10], Step [170/250], Loss: 0.695992112159729\n", - "Epoch [8/10], Step [180/250], Loss: 0.6737014651298523\n", - "Epoch [8/10], Step [190/250], Loss: 0.6722623705863953\n", - "Epoch [8/10], Step [200/250], Loss: 0.5857406854629517\n", - "Epoch [8/10], Step [210/250], Loss: 0.9563039541244507\n", - "Epoch [8/10], Step [220/250], Loss: 0.7375826835632324\n", - "Epoch [8/10], Step [230/250], Loss: 0.8751094341278076\n", - "Epoch [8/10], Step [240/250], Loss: 0.7180076837539673\n", - "Epoch [8/10], Step [250/250], Loss: 0.6384711861610413\n", - "Epoch [9/10], Step [10/250], Loss: 0.6789698004722595\n", - "Epoch [9/10], Step [20/250], Loss: 0.6645065546035767\n", - "Epoch [9/10], Step [30/250], Loss: 0.6996726989746094\n", - "Epoch [9/10], Step [40/250], Loss: 0.7402397394180298\n", - "Epoch [9/10], Step [50/250], Loss: 0.6388964653015137\n", - "Epoch [9/10], Step [60/250], Loss: 0.9401450753211975\n", - "Epoch [9/10], Step [70/250], Loss: 0.6708970665931702\n", - "Epoch [9/10], Step [80/250], Loss: 0.728550136089325\n", - "Epoch [9/10], Step [90/250], Loss: 0.7362596988677979\n", - "Epoch [9/10], Step [100/250], Loss: 0.7750495672225952\n", - "Epoch [9/10], Step [110/250], Loss: 0.807244062423706\n", - "Epoch [9/10], Step [120/250], Loss: 0.754521369934082\n", - "Epoch [9/10], Step [130/250], Loss: 0.5469345450401306\n", - "Epoch [9/10], Step [140/250], Loss: 0.8965460062026978\n", - "Epoch [9/10], Step [150/250], Loss: 0.7952369451522827\n", - "Epoch [9/10], Step [160/250], Loss: 0.6263578534126282\n", - "Epoch [9/10], Step [170/250], Loss: 0.5788871049880981\n", - "Epoch [9/10], Step [180/250], Loss: 0.7363749146461487\n", - "Epoch [9/10], Step [190/250], Loss: 0.7322844862937927\n", - "Epoch [9/10], Step [200/250], Loss: 0.6707043051719666\n", - "Epoch [9/10], Step [210/250], Loss: 0.7251213192939758\n", - "Epoch [9/10], Step [220/250], Loss: 0.6435517072677612\n", - "Epoch [9/10], Step [230/250], Loss: 0.534774124622345\n", - "Epoch [9/10], Step [240/250], Loss: 0.6989405751228333\n", - "Epoch [9/10], Step [250/250], Loss: 0.7413943409919739\n", - "Epoch [10/10], Step [10/250], Loss: 0.6014090776443481\n", - "Epoch [10/10], Step [20/250], Loss: 0.8173813819885254\n", - "Epoch [10/10], Step [30/250], Loss: 0.8984671235084534\n", - "Epoch [10/10], Step [40/250], Loss: 0.6354056000709534\n", - "Epoch [10/10], Step [50/250], Loss: 0.7964866757392883\n", - "Epoch [10/10], Step [60/250], Loss: 0.7849454879760742\n", - "Epoch [10/10], Step [70/250], Loss: 0.5637381076812744\n", - "Epoch [10/10], Step [80/250], Loss: 0.7669687271118164\n", - "Epoch [10/10], Step [90/250], Loss: 0.6140038371086121\n", - "Epoch [10/10], Step [100/250], Loss: 0.7134058475494385\n", - "Epoch [10/10], Step [110/250], Loss: 0.6768066883087158\n", - "Epoch [10/10], Step [120/250], Loss: 0.6304113268852234\n", - "Epoch [10/10], Step [130/250], Loss: 0.7426990866661072\n", - "Epoch [10/10], Step [140/250], Loss: 0.7469097971916199\n", - "Epoch [10/10], Step [150/250], Loss: 0.7591947913169861\n", - "Epoch [10/10], Step [160/250], Loss: 0.7327935099601746\n", - "Epoch [10/10], Step [170/250], Loss: 0.8590223789215088\n", - "Epoch [10/10], Step [180/250], Loss: 0.6994909644126892\n", - "Epoch [10/10], Step [190/250], Loss: 0.8262240886688232\n", - "Epoch [10/10], Step [200/250], Loss: 0.6071692109107971\n", - "Epoch [10/10], Step [210/250], Loss: 0.915013313293457\n", - "Epoch [10/10], Step [220/250], Loss: 0.8758894205093384\n", - "Epoch [10/10], Step [230/250], Loss: 0.6473208665847778\n", - "Epoch [10/10], Step [240/250], Loss: 0.6843898296356201\n", - "Epoch [10/10], Step [250/250], Loss: 0.6645953059196472\n", - "\n", - "Training job (train1u1it512gqg) succeeded, you can check the logs/metrics/output in the console:\n", - "https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/train1u1it512gqg\n" - ] - } - ], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "epochs = 10\n", - "\n", - "\n", - "# 训练作业默认会挂载一个OSS Bucket路径到 /ml/output/checkpoints/\n", - "est = Estimator(\n", - " command=\"python train.py --epochs {}\".format(epochs),\n", - " source_dir=\"./train_src/\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - " base_job_name=\"torch_checkpoint\",\n", - ")\n", - "\n", - "est.fit()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 训练作业的checkpoints目录\n", - "print(est.checkpoints_data())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以上训练作业对训练数据做了10次迭代,通过使用checkpoint,我们可以在原先模型的基础上继续训练,例如使用训练数据继续迭代20次迭代。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "1465353ea22d4b9a86f7b5b892f23471", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Uploading file: /var/folders/hc/5w4bg25j1ns2mm0yb06zzzbh0000gp/T/tmpshzpdx_z/source.tar.gz: 0%| | 0…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "View the job detail by accessing the console URI: https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/trainu90lc57j1vm\n", - "TrainingJob launch starting\n", - "MAX_PARALLELISM=0\n", - "C_INCLUDE_PATH=/home/pai/include\n", - "KUBERNETES_SERVICE_PORT=443\n", - "KUBERNETES_PORT=tcp://10.192.0.1:443\n", - "LANGUAGE=en_US.UTF-8\n", - "PIP_TRUSTED_HOST=mirrors.cloud.aliyuncs.com\n", - "MASTER_ADDR=trainu90lc57j1vm-master-0\n", - "HOSTNAME=trainu90lc57j1vm-master-0\n", - "LD_LIBRARY_PATH=:/lib/x86_64-linux-gnu:/home/pai/lib:/home/pai/jre/lib/amd64/server\n", - "MASTER_PORT=23456\n", - "HOME=/root\n", - "PAI_USER_ARGS=\n", - "PYTHONUNBUFFERED=0\n", - "PAI_OUTPUT_CHECKPOINTS=/ml/output/checkpoints/\n", - "PAI_CONFIG_DIR=/ml/input/config/\n", - "WORLD_SIZE=1\n", - "REGION_ID=cn-hangzhou\n", - "CPLUS_INCLUDE_PATH=/home/pai/include\n", - "RANK=0\n", - "OPAL_PREFIX=/home/pai/\n", - "PAI_TRAINING_JOB_ID=trainu90lc57j1vm\n", - "TERM=xterm-color\n", - "KUBERNETES_PORT_443_TCP_ADDR=10.192.0.1\n", - "PAI_OUTPUT_MODEL=/ml/output/model/\n", - "ELASTIC_TRAINING_ENABLED=false\n", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/pai/bin:/home/pai/hadoop/bin\n", - "PIP_INDEX_URL=https://mirrors.cloud.aliyuncs.com/pypi/simple\n", - "KUBERNETES_PORT_443_TCP_PORT=443\n", - "KUBERNETES_PORT_443_TCP_PROTO=tcp\n", - "LANG=en_US.UTF-8\n", - "PAI_TRAINING_USE_ECI=true\n", - "aliyun_logs_containerType_tags=containerType=Algorithm\n", - "KUBERNETES_PORT_443_TCP=tcp://10.192.0.1:443\n", - "KUBERNETES_SERVICE_PORT_HTTPS=443\n", - "ELASTIC_INFERENCE_ENABLED=false\n", - "LC_ALL=en_US.UTF-8\n", - "JAVA_HOME=/home/pai\n", - "KUBERNETES_SERVICE_HOST=10.192.0.1\n", - "PWD=/\n", - "PAI_HPS={}\n", - "TZ=UTC\n", - "HADOOP_HOME=/home/pai/hadoop\n", - "PAI_OUTPUT_LOGS=/ml/output/logs/\n", - "aliyun_logs_trainingJobId_tags=trainingJobId=trainu90lc57j1vm\n", - "PAI_ODPS_CREDENTIAL=/ml/input/credential/odps.json\n", - "PAI_WORKING_DIR=/ml/usercode/\n", - "Change to Working Directory, /ml/usercode/\n", - "User program launching\n", - "-----------------------------------------------------------------\n", - "30\n", - "Epoch [11/30], Step [10/250], Loss: 0.678845226764679\n", - "Epoch [11/30], Step [20/250], Loss: 0.6292213201522827\n", - "Epoch [11/30], Step [30/250], Loss: 0.6856911182403564\n", - "Epoch [11/30], Step [40/250], Loss: 0.6147192716598511\n", - "Epoch [11/30], Step [50/250], Loss: 0.7846511602401733\n", - "Epoch [11/30], Step [60/250], Loss: 0.6719473004341125\n", - "Epoch [11/30], Step [70/250], Loss: 0.8227031826972961\n", - "Epoch [11/30], Step [80/250], Loss: 0.7861220836639404\n", - "Epoch [11/30], Step [90/250], Loss: 0.7436649203300476\n", - "Epoch [11/30], Step [100/250], Loss: 0.8053247928619385\n", - "Epoch [11/30], Step [110/250], Loss: 0.716484546661377\n", - "Epoch [11/30], Step [120/250], Loss: 0.6527263522148132\n", - "Epoch [11/30], Step [130/250], Loss: 0.7980918884277344\n", - "Epoch [11/30], Step [140/250], Loss: 0.6761615872383118\n", - "Epoch [11/30], Step [150/250], Loss: 0.8030520081520081\n", - "Epoch [11/30], Step [160/250], Loss: 0.6580255627632141\n", - "Epoch [11/30], Step [170/250], Loss: 0.7671869993209839\n", - "Epoch [11/30], Step [180/250], Loss: 0.6622000932693481\n", - "Epoch [11/30], Step [190/250], Loss: 0.747247576713562\n", - "Epoch [11/30], Step [200/250], Loss: 0.705307126045227\n", - "Epoch [11/30], Step [210/250], Loss: 0.6516950130462646\n", - "Epoch [11/30], Step [220/250], Loss: 0.6065223217010498\n", - "Epoch [11/30], Step [230/250], Loss: 0.6885045766830444\n", - "Epoch [11/30], Step [240/250], Loss: 0.7392936944961548\n", - "Epoch [11/30], Step [250/250], Loss: 0.6803852319717407\n", - "Epoch [12/30], Step [10/250], Loss: 0.8813486695289612\n", - "Epoch [12/30], Step [20/250], Loss: 0.7780698537826538\n", - "Epoch [12/30], Step [30/250], Loss: 0.7158650159835815\n", - "Epoch [12/30], Step [40/250], Loss: 0.5826153755187988\n", - "Epoch [12/30], Step [50/250], Loss: 0.6013429760932922\n", - "Epoch [12/30], Step [60/250], Loss: 0.7084614634513855\n", - "Epoch [12/30], Step [70/250], Loss: 0.6825753450393677\n", - "Epoch [12/30], Step [80/250], Loss: 0.6074261665344238\n", - "Epoch [12/30], Step [90/250], Loss: 0.8619674444198608\n", - "Epoch [12/30], Step [100/250], Loss: 0.6013283729553223\n", - "Epoch [12/30], Step [110/250], Loss: 0.6808617115020752\n", - "Epoch [12/30], Step [120/250], Loss: 0.6765388250350952\n", - "Epoch [12/30], Step [130/250], Loss: 0.7072106599807739\n", - "Epoch [12/30], Step [140/250], Loss: 0.6905199289321899\n", - "Epoch [12/30], Step [150/250], Loss: 0.6942532062530518\n", - "Epoch [12/30], Step [160/250], Loss: 0.7181805968284607\n", - "Epoch [12/30], Step [170/250], Loss: 0.6357207298278809\n", - "Epoch [12/30], Step [180/250], Loss: 0.6719130277633667\n", - "Epoch [12/30], Step [190/250], Loss: 0.7218160629272461\n", - "Epoch [12/30], Step [200/250], Loss: 0.7158771753311157\n", - "Epoch [12/30], Step [210/250], Loss: 0.7585588693618774\n", - "Epoch [12/30], Step [220/250], Loss: 0.8121419548988342\n", - "Epoch [12/30], Step [230/250], Loss: 0.7744668126106262\n", - "Epoch [12/30], Step [240/250], Loss: 0.7164073586463928\n", - "Epoch [12/30], Step [250/250], Loss: 0.5488151907920837\n", - "Epoch [13/30], Step [10/250], Loss: 0.7662173509597778\n", - "Epoch [13/30], Step [20/250], Loss: 0.7802825570106506\n", - "Epoch [13/30], Step [30/250], Loss: 0.7456352114677429\n", - "Epoch [13/30], Step [40/250], Loss: 0.6143842935562134\n", - "Epoch [13/30], Step [50/250], Loss: 0.7393404245376587\n", - "Epoch [13/30], Step [60/250], Loss: 0.6536136865615845\n", - "Epoch [13/30], Step [70/250], Loss: 0.7647539377212524\n", - "Epoch [13/30], Step [80/250], Loss: 0.6415259838104248\n", - "Epoch [13/30], Step [90/250], Loss: 0.8065975904464722\n", - "Epoch [13/30], Step [100/250], Loss: 0.654565155506134\n", - "Epoch [13/30], Step [110/250], Loss: 0.6512014865875244\n", - "Epoch [13/30], Step [120/250], Loss: 0.6851429343223572\n", - "Epoch [13/30], Step [130/250], Loss: 0.7639355659484863\n", - "Epoch [13/30], Step [140/250], Loss: 0.7886079549789429\n", - "Epoch [13/30], Step [150/250], Loss: 0.677024245262146\n", - "Epoch [13/30], Step [160/250], Loss: 0.6869807243347168\n", - "Epoch [13/30], Step [170/250], Loss: 0.7076682448387146\n", - "Epoch [13/30], Step [180/250], Loss: 0.6720783710479736\n", - "Epoch [13/30], Step [190/250], Loss: 0.6578226685523987\n", - "Epoch [13/30], Step [200/250], Loss: 0.6924010515213013\n", - "Epoch [13/30], Step [210/250], Loss: 0.8084946870803833\n", - "Epoch [13/30], Step [220/250], Loss: 0.7015032768249512\n", - "Epoch [13/30], Step [230/250], Loss: 0.6897311210632324\n", - "Epoch [13/30], Step [240/250], Loss: 0.7233715653419495\n", - "Epoch [13/30], Step [250/250], Loss: 0.82469242811203\n", - "Epoch [14/30], Step [10/250], Loss: 0.7118442058563232\n", - "Epoch [14/30], Step [20/250], Loss: 0.66881263256073\n", - "Epoch [14/30], Step [30/250], Loss: 0.6966590881347656\n", - "Epoch [14/30], Step [40/250], Loss: 0.8390185236930847\n", - "Epoch [14/30], Step [50/250], Loss: 0.7978378534317017\n", - "Epoch [14/30], Step [60/250], Loss: 0.6207278966903687\n", - "Epoch [14/30], Step [70/250], Loss: 0.6512827277183533\n", - "Epoch [14/30], Step [80/250], Loss: 0.6850301027297974\n", - "Epoch [14/30], Step [90/250], Loss: 0.628646194934845\n", - "Epoch [14/30], Step [100/250], Loss: 0.6093996167182922\n", - "Epoch [14/30], Step [110/250], Loss: 0.7588788866996765\n", - "Epoch [14/30], Step [120/250], Loss: 0.6795099377632141\n", - "Epoch [14/30], Step [130/250], Loss: 0.6357916593551636\n", - "Epoch [14/30], Step [140/250], Loss: 0.7358158826828003\n", - "Epoch [14/30], Step [150/250], Loss: 0.6896149516105652\n", - "Epoch [14/30], Step [160/250], Loss: 0.6862155199050903\n", - "Epoch [14/30], Step [170/250], Loss: 0.659408688545227\n", - "Epoch [14/30], Step [180/250], Loss: 0.717597246170044\n", - "Epoch [14/30], Step [190/250], Loss: 0.6779205203056335\n", - "Epoch [14/30], Step [200/250], Loss: 0.6569654941558838\n", - "Epoch [14/30], Step [210/250], Loss: 0.6521044373512268\n", - "Epoch [14/30], Step [220/250], Loss: 0.5803452134132385\n", - "Epoch [14/30], Step [230/250], Loss: 0.6112836599349976\n", - "Epoch [14/30], Step [240/250], Loss: 0.6311125755310059\n", - "Epoch [14/30], Step [250/250], Loss: 0.6427040696144104\n", - "Epoch [15/30], Step [10/250], Loss: 0.7193827629089355\n", - "Epoch [15/30], Step [20/250], Loss: 0.6781796216964722\n", - "Epoch [15/30], Step [30/250], Loss: 0.7042354345321655\n", - "Epoch [15/30], Step [40/250], Loss: 0.6776638627052307\n", - "Epoch [15/30], Step [50/250], Loss: 0.6593765020370483\n", - "Epoch [15/30], Step [60/250], Loss: 0.6749820113182068\n", - "Epoch [15/30], Step [70/250], Loss: 0.6199281811714172\n", - "Epoch [15/30], Step [80/250], Loss: 0.6898410320281982\n", - "Epoch [15/30], Step [90/250], Loss: 0.6938673257827759\n", - "Epoch [15/30], Step [100/250], Loss: 0.6369883418083191\n", - "Epoch [15/30], Step [110/250], Loss: 0.6758348345756531\n", - "Epoch [15/30], Step [120/250], Loss: 0.7379288673400879\n", - "Epoch [15/30], Step [130/250], Loss: 0.6447997689247131\n", - "Epoch [15/30], Step [140/250], Loss: 0.6910532712936401\n", - "Epoch [15/30], Step [150/250], Loss: 0.7426170110702515\n", - "Epoch [15/30], Step [160/250], Loss: 0.6422319412231445\n", - "Epoch [15/30], Step [170/250], Loss: 0.5789802670478821\n", - "Epoch [15/30], Step [180/250], Loss: 0.7434327602386475\n", - "Epoch [15/30], Step [190/250], Loss: 0.6754781007766724\n", - "Epoch [15/30], Step [200/250], Loss: 0.5865523815155029\n", - "Epoch [15/30], Step [210/250], Loss: 0.6548283696174622\n", - "Epoch [15/30], Step [220/250], Loss: 0.7495550513267517\n", - "Epoch [15/30], Step [230/250], Loss: 0.6538060903549194\n", - "Epoch [15/30], Step [240/250], Loss: 0.7314434051513672\n", - "Epoch [15/30], Step [250/250], Loss: 0.7135218381881714\n", - "Epoch [16/30], Step [10/250], Loss: 0.7383496761322021\n", - "Epoch [16/30], Step [20/250], Loss: 0.644036591053009\n", - "Epoch [16/30], Step [30/250], Loss: 0.6101108193397522\n", - "Epoch [16/30], Step [40/250], Loss: 0.7390760779380798\n", - "Epoch [16/30], Step [50/250], Loss: 0.6870918273925781\n", - "Epoch [16/30], Step [60/250], Loss: 0.6894906759262085\n", - "Epoch [16/30], Step [70/250], Loss: 0.7674188017845154\n", - "Epoch [16/30], Step [80/250], Loss: 0.7476275563240051\n", - "Epoch [16/30], Step [90/250], Loss: 0.7009009718894958\n", - "Epoch [16/30], Step [100/250], Loss: 0.6951045989990234\n", - "Epoch [16/30], Step [110/250], Loss: 0.7023512721061707\n", - "Epoch [16/30], Step [120/250], Loss: 0.6900476217269897\n", - "Epoch [16/30], Step [130/250], Loss: 0.7070642709732056\n", - "Epoch [16/30], Step [140/250], Loss: 0.6627304553985596\n", - "Epoch [16/30], Step [150/250], Loss: 0.676548182964325\n", - "Epoch [16/30], Step [160/250], Loss: 0.7038763761520386\n", - "Epoch [16/30], Step [170/250], Loss: 0.6916297078132629\n", - "Epoch [16/30], Step [180/250], Loss: 0.7028259634971619\n", - "Epoch [16/30], Step [190/250], Loss: 0.6524210572242737\n", - "Epoch [16/30], Step [200/250], Loss: 0.7346513867378235\n", - "Epoch [16/30], Step [210/250], Loss: 0.612514317035675\n", - "Epoch [16/30], Step [220/250], Loss: 0.7455917596817017\n", - "Epoch [16/30], Step [230/250], Loss: 0.747292160987854\n", - "Epoch [16/30], Step [240/250], Loss: 0.7447240352630615\n", - "Epoch [16/30], Step [250/250], Loss: 0.6769564747810364\n", - "Epoch [17/30], Step [10/250], Loss: 0.7425077557563782\n", - "Epoch [17/30], Step [20/250], Loss: 0.6944329738616943\n", - "Epoch [17/30], Step [30/250], Loss: 0.6961978673934937\n", - "Epoch [17/30], Step [40/250], Loss: 0.6465986967086792\n", - "Epoch [17/30], Step [50/250], Loss: 0.714703381061554\n", - "Epoch [17/30], Step [60/250], Loss: 0.5930614471435547\n", - "Epoch [17/30], Step [70/250], Loss: 0.6468428373336792\n", - "Epoch [17/30], Step [80/250], Loss: 0.686537504196167\n", - "Epoch [17/30], Step [90/250], Loss: 0.7371711730957031\n", - "Epoch [17/30], Step [100/250], Loss: 0.7700399160385132\n", - "Epoch [17/30], Step [110/250], Loss: 0.7529278993606567\n", - "Epoch [17/30], Step [120/250], Loss: 0.7036042213439941\n", - "Epoch [17/30], Step [130/250], Loss: 0.7871543765068054\n", - "Epoch [17/30], Step [140/250], Loss: 0.6956086158752441\n", - "Epoch [17/30], Step [150/250], Loss: 0.7426921725273132\n", - "Epoch [17/30], Step [160/250], Loss: 0.7222756743431091\n", - "Epoch [17/30], Step [170/250], Loss: 0.6826121807098389\n", - "Epoch [17/30], Step [180/250], Loss: 0.6970388293266296\n", - "Epoch [17/30], Step [190/250], Loss: 0.7087472677230835\n", - "Epoch [17/30], Step [200/250], Loss: 0.6320711374282837\n", - "Epoch [17/30], Step [210/250], Loss: 0.7280303835868835\n", - "Epoch [17/30], Step [220/250], Loss: 0.6934517621994019\n", - "Epoch [17/30], Step [230/250], Loss: 0.7071420550346375\n", - "Epoch [17/30], Step [240/250], Loss: 0.6856362223625183\n", - "Epoch [17/30], Step [250/250], Loss: 0.6945990324020386\n", - "Epoch [18/30], Step [10/250], Loss: 0.6465855240821838\n", - "Epoch [18/30], Step [20/250], Loss: 0.7086865901947021\n", - "Epoch [18/30], Step [30/250], Loss: 0.6256162524223328\n", - "Epoch [18/30], Step [40/250], Loss: 0.6532611846923828\n", - "Epoch [18/30], Step [50/250], Loss: 0.6484596729278564\n", - "Epoch [18/30], Step [60/250], Loss: 0.6955176591873169\n", - "Epoch [18/30], Step [70/250], Loss: 0.6615030765533447\n", - "Epoch [18/30], Step [80/250], Loss: 0.7038217186927795\n", - "Epoch [18/30], Step [90/250], Loss: 0.6943345069885254\n", - "Epoch [18/30], Step [100/250], Loss: 0.7004052996635437\n", - "Epoch [18/30], Step [110/250], Loss: 0.7458634972572327\n", - "Epoch [18/30], Step [120/250], Loss: 0.6851629614830017\n", - "Epoch [18/30], Step [130/250], Loss: 0.682853102684021\n", - "Epoch [18/30], Step [140/250], Loss: 0.6481672525405884\n", - "Epoch [18/30], Step [150/250], Loss: 0.7038549780845642\n", - "Epoch [18/30], Step [160/250], Loss: 0.6995554566383362\n", - "Epoch [18/30], Step [170/250], Loss: 0.6800370216369629\n", - "Epoch [18/30], Step [180/250], Loss: 0.6488386392593384\n", - "Epoch [18/30], Step [190/250], Loss: 0.7000787854194641\n", - "Epoch [18/30], Step [200/250], Loss: 0.7428950071334839\n", - "Epoch [18/30], Step [210/250], Loss: 0.6872988343238831\n", - "Epoch [18/30], Step [220/250], Loss: 0.6482336521148682\n", - "Epoch [18/30], Step [230/250], Loss: 0.6626957058906555\n", - "Epoch [18/30], Step [240/250], Loss: 0.6778802275657654\n", - "Epoch [18/30], Step [250/250], Loss: 0.7027387022972107\n", - "Epoch [19/30], Step [10/250], Loss: 0.6812503933906555\n", - "Epoch [19/30], Step [20/250], Loss: 0.6751934289932251\n", - "Epoch [19/30], Step [30/250], Loss: 0.6624279618263245\n", - "Epoch [19/30], Step [40/250], Loss: 0.6787773966789246\n", - "Epoch [19/30], Step [50/250], Loss: 0.7765601873397827\n", - "Epoch [19/30], Step [60/250], Loss: 0.6592363119125366\n", - "Epoch [19/30], Step [70/250], Loss: 0.7038179039955139\n", - "Epoch [19/30], Step [80/250], Loss: 0.7358537316322327\n", - "Epoch [19/30], Step [90/250], Loss: 0.708828330039978\n", - "Epoch [19/30], Step [100/250], Loss: 0.7642552852630615\n", - "Epoch [19/30], Step [110/250], Loss: 0.7605912089347839\n", - "Epoch [19/30], Step [120/250], Loss: 0.6976773738861084\n", - "Epoch [19/30], Step [130/250], Loss: 0.6766220331192017\n", - "Epoch [19/30], Step [140/250], Loss: 0.7171740531921387\n", - "Epoch [19/30], Step [150/250], Loss: 0.6521143913269043\n", - "Epoch [19/30], Step [160/250], Loss: 0.6554864645004272\n", - "Epoch [19/30], Step [170/250], Loss: 0.6797289848327637\n", - "Epoch [19/30], Step [180/250], Loss: 0.6546230316162109\n", - "Epoch [19/30], Step [190/250], Loss: 0.6951708197593689\n", - "Epoch [19/30], Step [200/250], Loss: 0.7692861557006836\n", - "Epoch [19/30], Step [210/250], Loss: 0.6987319588661194\n", - "Epoch [19/30], Step [220/250], Loss: 0.7281709909439087\n", - "Epoch [19/30], Step [230/250], Loss: 0.6981549263000488\n", - "Epoch [19/30], Step [240/250], Loss: 0.6613932847976685\n", - "Epoch [19/30], Step [250/250], Loss: 0.6515719890594482\n", - "Epoch [20/30], Step [10/250], Loss: 0.683667004108429\n", - "Epoch [20/30], Step [20/250], Loss: 0.6330690383911133\n", - "Epoch [20/30], Step [30/250], Loss: 0.6992578506469727\n", - "Epoch [20/30], Step [40/250], Loss: 0.7081963419914246\n", - "Epoch [20/30], Step [50/250], Loss: 0.7147829532623291\n", - "Epoch [20/30], Step [60/250], Loss: 0.6547238826751709\n", - "Epoch [20/30], Step [70/250], Loss: 0.627391517162323\n", - "Epoch [20/30], Step [80/250], Loss: 0.6972628831863403\n", - "Epoch [20/30], Step [90/250], Loss: 0.6500757932662964\n", - "Epoch [20/30], Step [100/250], Loss: 0.7282431125640869\n", - "Epoch [20/30], Step [110/250], Loss: 0.6599644422531128\n", - "Epoch [20/30], Step [120/250], Loss: 0.691277265548706\n", - "Epoch [20/30], Step [130/250], Loss: 0.6712023019790649\n", - "Epoch [20/30], Step [140/250], Loss: 0.6875613927841187\n", - "Epoch [20/30], Step [150/250], Loss: 0.6852554082870483\n", - "Epoch [20/30], Step [160/250], Loss: 0.7059615850448608\n", - "Epoch [20/30], Step [170/250], Loss: 0.7474350333213806\n", - "Epoch [20/30], Step [180/250], Loss: 0.6700282096862793\n", - "Epoch [20/30], Step [190/250], Loss: 0.7267058491706848\n", - "Epoch [20/30], Step [200/250], Loss: 0.6795942783355713\n", - "Epoch [20/30], Step [210/250], Loss: 0.7355214953422546\n", - "Epoch [20/30], Step [220/250], Loss: 0.7097989320755005\n", - "Epoch [20/30], Step [230/250], Loss: 0.6741981506347656\n", - "Epoch [20/30], Step [240/250], Loss: 0.7197920680046082\n", - "Epoch [20/30], Step [250/250], Loss: 0.6666856408119202\n", - "Epoch [21/30], Step [10/250], Loss: 0.6850540637969971\n", - "Epoch [21/30], Step [20/250], Loss: 0.6577891111373901\n", - "Epoch [21/30], Step [30/250], Loss: 0.7145082354545593\n", - "Epoch [21/30], Step [40/250], Loss: 0.6782787442207336\n", - "Epoch [21/30], Step [50/250], Loss: 0.7092875242233276\n", - "Epoch [21/30], Step [60/250], Loss: 0.6552045941352844\n", - "Epoch [21/30], Step [70/250], Loss: 0.665422260761261\n", - "Epoch [21/30], Step [80/250], Loss: 0.7131606340408325\n", - "Epoch [21/30], Step [90/250], Loss: 0.6851215362548828\n", - "Epoch [21/30], Step [100/250], Loss: 0.7093809843063354\n", - "Epoch [21/30], Step [110/250], Loss: 0.6839103698730469\n", - "Epoch [21/30], Step [120/250], Loss: 0.6863808035850525\n", - "Epoch [21/30], Step [130/250], Loss: 0.6923962831497192\n", - "Epoch [21/30], Step [140/250], Loss: 0.7143585085868835\n", - "Epoch [21/30], Step [150/250], Loss: 0.7165741324424744\n", - "Epoch [21/30], Step [160/250], Loss: 0.7011140584945679\n", - "Epoch [21/30], Step [170/250], Loss: 0.7145777344703674\n", - "Epoch [21/30], Step [180/250], Loss: 0.6781455278396606\n", - "Epoch [21/30], Step [190/250], Loss: 0.704175591468811\n", - "Epoch [21/30], Step [200/250], Loss: 0.6643280982971191\n", - "Epoch [21/30], Step [210/250], Loss: 0.7143128514289856\n", - "Epoch [21/30], Step [220/250], Loss: 0.7122169137001038\n", - "Epoch [21/30], Step [230/250], Loss: 0.7329443693161011\n", - "Epoch [21/30], Step [240/250], Loss: 0.7038950324058533\n", - "Epoch [21/30], Step [250/250], Loss: 0.683397114276886\n", - "Epoch [22/30], Step [10/250], Loss: 0.6960069537162781\n", - "Epoch [22/30], Step [20/250], Loss: 0.6595947742462158\n", - "Epoch [22/30], Step [30/250], Loss: 0.7287018895149231\n", - "Epoch [22/30], Step [40/250], Loss: 0.7046036720275879\n", - "Epoch [22/30], Step [50/250], Loss: 0.7062811255455017\n", - "Epoch [22/30], Step [60/250], Loss: 0.7442296743392944\n", - "Epoch [22/30], Step [70/250], Loss: 0.6482053399085999\n", - "Epoch [22/30], Step [80/250], Loss: 0.722833514213562\n", - "Epoch [22/30], Step [90/250], Loss: 0.6747336387634277\n", - "Epoch [22/30], Step [100/250], Loss: 0.7139792442321777\n", - "Epoch [22/30], Step [110/250], Loss: 0.680081844329834\n", - "Epoch [22/30], Step [120/250], Loss: 0.686549186706543\n", - "Epoch [22/30], Step [130/250], Loss: 0.6854720115661621\n", - "Epoch [22/30], Step [140/250], Loss: 0.6525530815124512\n", - "Epoch [22/30], Step [150/250], Loss: 0.6676555871963501\n", - "Epoch [22/30], Step [160/250], Loss: 0.7014628052711487\n", - "Epoch [22/30], Step [170/250], Loss: 0.7186480760574341\n", - "Epoch [22/30], Step [180/250], Loss: 0.6748342514038086\n", - "Epoch [22/30], Step [190/250], Loss: 0.7034397125244141\n", - "Epoch [22/30], Step [200/250], Loss: 0.6637327075004578\n", - "Epoch [22/30], Step [210/250], Loss: 0.6852638125419617\n", - "Epoch [22/30], Step [220/250], Loss: 0.6631066203117371\n", - "Epoch [22/30], Step [230/250], Loss: 0.7248471975326538\n", - "Epoch [22/30], Step [240/250], Loss: 0.7282781004905701\n", - "Epoch [22/30], Step [250/250], Loss: 0.678613007068634\n", - "Epoch [23/30], Step [10/250], Loss: 0.6844161748886108\n", - "Epoch [23/30], Step [20/250], Loss: 0.6881325244903564\n", - "Epoch [23/30], Step [30/250], Loss: 0.6631232500076294\n", - "Epoch [23/30], Step [40/250], Loss: 0.7202731370925903\n", - "Epoch [23/30], Step [50/250], Loss: 0.6977999210357666\n", - "Epoch [23/30], Step [60/250], Loss: 0.7103397846221924\n", - "Epoch [23/30], Step [70/250], Loss: 0.6726264953613281\n", - "Epoch [23/30], Step [80/250], Loss: 0.6642501354217529\n", - "Epoch [23/30], Step [90/250], Loss: 0.7357184886932373\n", - "Epoch [23/30], Step [100/250], Loss: 0.7160366773605347\n", - "Epoch [23/30], Step [110/250], Loss: 0.6603021621704102\n", - "Epoch [23/30], Step [120/250], Loss: 0.6760040521621704\n", - "Epoch [23/30], Step [130/250], Loss: 0.696141242980957\n", - "Epoch [23/30], Step [140/250], Loss: 0.6645365357398987\n", - "Epoch [23/30], Step [150/250], Loss: 0.7011918425559998\n", - "Epoch [23/30], Step [160/250], Loss: 0.6758050322532654\n", - "Epoch [23/30], Step [170/250], Loss: 0.6683043837547302\n", - "Epoch [23/30], Step [180/250], Loss: 0.6827936172485352\n", - "Epoch [23/30], Step [190/250], Loss: 0.699557900428772\n", - "Epoch [23/30], Step [200/250], Loss: 0.6873543858528137\n", - "Epoch [23/30], Step [210/250], Loss: 0.6973046064376831\n", - "Epoch [23/30], Step [220/250], Loss: 0.6847941279411316\n", - "Epoch [23/30], Step [230/250], Loss: 0.686026930809021\n", - "Epoch [23/30], Step [240/250], Loss: 0.712138831615448\n", - "Epoch [23/30], Step [250/250], Loss: 0.6938803791999817\n", - "Epoch [24/30], Step [10/250], Loss: 0.6833834648132324\n", - "Epoch [24/30], Step [20/250], Loss: 0.7029370069503784\n", - "Epoch [24/30], Step [30/250], Loss: 0.6896952390670776\n", - "Epoch [24/30], Step [40/250], Loss: 0.6966062784194946\n", - "Epoch [24/30], Step [50/250], Loss: 0.6755800247192383\n", - "Epoch [24/30], Step [60/250], Loss: 0.6890952587127686\n", - "Epoch [24/30], Step [70/250], Loss: 0.6705589294433594\n", - "Epoch [24/30], Step [80/250], Loss: 0.7066176533699036\n", - "Epoch [24/30], Step [90/250], Loss: 0.758873701095581\n", - "Epoch [24/30], Step [100/250], Loss: 0.699566125869751\n", - "Epoch [24/30], Step [110/250], Loss: 0.7008506059646606\n", - "Epoch [24/30], Step [120/250], Loss: 0.686880350112915\n", - "Epoch [24/30], Step [130/250], Loss: 0.6831185817718506\n", - "Epoch [24/30], Step [140/250], Loss: 0.6989403963088989\n", - "Epoch [24/30], Step [150/250], Loss: 0.7022895812988281\n", - "Epoch [24/30], Step [160/250], Loss: 0.7047298550605774\n", - "Epoch [24/30], Step [170/250], Loss: 0.6803637742996216\n", - "Epoch [24/30], Step [180/250], Loss: 0.6698098182678223\n", - "Epoch [24/30], Step [190/250], Loss: 0.6965357661247253\n", - "Epoch [24/30], Step [200/250], Loss: 0.7183314561843872\n", - "Epoch [24/30], Step [210/250], Loss: 0.7083855271339417\n", - "Epoch [24/30], Step [220/250], Loss: 0.688880205154419\n", - "Epoch [24/30], Step [230/250], Loss: 0.6859614253044128\n", - "Epoch [24/30], Step [240/250], Loss: 0.6815621852874756\n", - "Epoch [24/30], Step [250/250], Loss: 0.7023071050643921\n", - "Epoch [25/30], Step [10/250], Loss: 0.6979001760482788\n", - "Epoch [25/30], Step [20/250], Loss: 0.6792093515396118\n", - "Epoch [25/30], Step [30/250], Loss: 0.7000377178192139\n", - "Epoch [25/30], Step [40/250], Loss: 0.6891401410102844\n", - "Epoch [25/30], Step [50/250], Loss: 0.6950706839561462\n", - "Epoch [25/30], Step [60/250], Loss: 0.6931962966918945\n", - "Epoch [25/30], Step [70/250], Loss: 0.6918748021125793\n", - "Epoch [25/30], Step [80/250], Loss: 0.7022840976715088\n", - "Epoch [25/30], Step [90/250], Loss: 0.7233110666275024\n", - "Epoch [25/30], Step [100/250], Loss: 0.6882573366165161\n", - "Epoch [25/30], Step [110/250], Loss: 0.6959525346755981\n", - "Epoch [25/30], Step [120/250], Loss: 0.6953780651092529\n", - "Epoch [25/30], Step [130/250], Loss: 0.7029913067817688\n", - "Epoch [25/30], Step [140/250], Loss: 0.7104859948158264\n", - "Epoch [25/30], Step [150/250], Loss: 0.6983399391174316\n", - "Epoch [25/30], Step [160/250], Loss: 0.6920713186264038\n", - "Epoch [25/30], Step [170/250], Loss: 0.7179511189460754\n", - "Epoch [25/30], Step [180/250], Loss: 0.6971415281295776\n", - "Epoch [25/30], Step [190/250], Loss: 0.7037041783332825\n", - "Epoch [25/30], Step [200/250], Loss: 0.6952695846557617\n", - "Epoch [25/30], Step [210/250], Loss: 0.7007227540016174\n", - "Epoch [25/30], Step [220/250], Loss: 0.686070442199707\n", - "Epoch [25/30], Step [230/250], Loss: 0.692324161529541\n", - "Epoch [25/30], Step [240/250], Loss: 0.6936407089233398\n", - "Epoch [25/30], Step [250/250], Loss: 0.6896817088127136\n", - "Epoch [26/30], Step [10/250], Loss: 0.7085744142532349\n", - "Epoch [26/30], Step [20/250], Loss: 0.6863793730735779\n", - "Epoch [26/30], Step [30/250], Loss: 0.6817866563796997\n", - "Epoch [26/30], Step [40/250], Loss: 0.7037662267684937\n", - "Epoch [26/30], Step [50/250], Loss: 0.7046667337417603\n", - "Epoch [26/30], Step [60/250], Loss: 0.6918007135391235\n", - "Epoch [26/30], Step [70/250], Loss: 0.713044285774231\n", - "Epoch [26/30], Step [80/250], Loss: 0.6832862496376038\n", - "Epoch [26/30], Step [90/250], Loss: 0.667504608631134\n", - "Epoch [26/30], Step [100/250], Loss: 0.6760569214820862\n", - "Epoch [26/30], Step [110/250], Loss: 0.707482099533081\n", - "Epoch [26/30], Step [120/250], Loss: 0.6977518200874329\n", - "Epoch [26/30], Step [130/250], Loss: 0.6955530047416687\n", - "Epoch [26/30], Step [140/250], Loss: 0.7124805450439453\n", - "Epoch [26/30], Step [150/250], Loss: 0.6924611330032349\n", - "Epoch [26/30], Step [160/250], Loss: 0.6965060234069824\n", - "Epoch [26/30], Step [170/250], Loss: 0.6868378520011902\n", - "Epoch [26/30], Step [180/250], Loss: 0.7103825807571411\n", - "Epoch [26/30], Step [190/250], Loss: 0.6711806654930115\n", - "Epoch [26/30], Step [200/250], Loss: 0.6948347091674805\n", - "Epoch [26/30], Step [210/250], Loss: 0.7058894634246826\n", - "Epoch [26/30], Step [220/250], Loss: 0.6947336196899414\n", - "Epoch [26/30], Step [230/250], Loss: 0.689943253993988\n", - "Epoch [26/30], Step [240/250], Loss: 0.6956008672714233\n", - "Epoch [26/30], Step [250/250], Loss: 0.6892440319061279\n", - "Epoch [27/30], Step [10/250], Loss: 0.6945648193359375\n", - "Epoch [27/30], Step [20/250], Loss: 0.697243332862854\n", - "Epoch [27/30], Step [30/250], Loss: 0.6995589137077332\n", - "Epoch [27/30], Step [40/250], Loss: 0.6961522698402405\n", - "Epoch [27/30], Step [50/250], Loss: 0.7141368389129639\n", - "Epoch [27/30], Step [60/250], Loss: 0.6883167028427124\n", - "Epoch [27/30], Step [70/250], Loss: 0.681597888469696\n", - "Epoch [27/30], Step [80/250], Loss: 0.6933290362358093\n", - "Epoch [27/30], Step [90/250], Loss: 0.6990853548049927\n", - "Epoch [27/30], Step [100/250], Loss: 0.6930828094482422\n", - "Epoch [27/30], Step [110/250], Loss: 0.6889819502830505\n", - "Epoch [27/30], Step [120/250], Loss: 0.6966762542724609\n", - "Epoch [27/30], Step [130/250], Loss: 0.7014245986938477\n", - "Epoch [27/30], Step [140/250], Loss: 0.7081984281539917\n", - "Epoch [27/30], Step [150/250], Loss: 0.6894259452819824\n", - "Epoch [27/30], Step [160/250], Loss: 0.695622444152832\n", - "Epoch [27/30], Step [170/250], Loss: 0.6961721181869507\n", - "Epoch [27/30], Step [180/250], Loss: 0.6897941827774048\n", - "Epoch [27/30], Step [190/250], Loss: 0.6890014410018921\n", - "Epoch [27/30], Step [200/250], Loss: 0.6775841116905212\n", - "Epoch [27/30], Step [210/250], Loss: 0.6889995336532593\n", - "Epoch [27/30], Step [220/250], Loss: 0.6887487769126892\n", - "Epoch [27/30], Step [230/250], Loss: 0.6713950037956238\n", - "Epoch [27/30], Step [240/250], Loss: 0.6815714836120605\n", - "Epoch [27/30], Step [250/250], Loss: 0.6999087333679199\n", - "Epoch [28/30], Step [10/250], Loss: 0.7005322575569153\n", - "Epoch [28/30], Step [20/250], Loss: 0.6854400634765625\n", - "Epoch [28/30], Step [30/250], Loss: 0.7016850113868713\n", - "Epoch [28/30], Step [40/250], Loss: 0.6971641182899475\n", - "Epoch [28/30], Step [50/250], Loss: 0.6831482648849487\n", - "Epoch [28/30], Step [60/250], Loss: 0.6957387924194336\n", - "Epoch [28/30], Step [70/250], Loss: 0.6991732716560364\n", - "Epoch [28/30], Step [80/250], Loss: 0.6832884550094604\n", - "Epoch [28/30], Step [90/250], Loss: 0.6862078309059143\n", - "Epoch [28/30], Step [100/250], Loss: 0.7001485824584961\n", - "Epoch [28/30], Step [110/250], Loss: 0.686698317527771\n", - "Epoch [28/30], Step [120/250], Loss: 0.6935960054397583\n", - "Epoch [28/30], Step [130/250], Loss: 0.6797569990158081\n", - "Epoch [28/30], Step [140/250], Loss: 0.6913435459136963\n", - "Epoch [28/30], Step [150/250], Loss: 0.7099695205688477\n", - "Epoch [28/30], Step [160/250], Loss: 0.6739814877510071\n", - "Epoch [28/30], Step [170/250], Loss: 0.691004753112793\n", - "Epoch [28/30], Step [180/250], Loss: 0.6871265172958374\n", - "Epoch [28/30], Step [190/250], Loss: 0.6769859790802002\n", - "Epoch [28/30], Step [200/250], Loss: 0.6753854751586914\n", - "Epoch [28/30], Step [210/250], Loss: 0.6798712015151978\n", - "Epoch [28/30], Step [220/250], Loss: 0.6959697008132935\n", - "Epoch [28/30], Step [230/250], Loss: 0.6912880539894104\n", - "Epoch [28/30], Step [240/250], Loss: 0.7011526823043823\n", - "Epoch [28/30], Step [250/250], Loss: 0.6955965757369995\n", - "Epoch [29/30], Step [10/250], Loss: 0.700312077999115\n", - "Epoch [29/30], Step [20/250], Loss: 0.688980758190155\n", - "Epoch [29/30], Step [30/250], Loss: 0.687660813331604\n", - "Epoch [29/30], Step [40/250], Loss: 0.6973135471343994\n", - "Epoch [29/30], Step [50/250], Loss: 0.7041200995445251\n", - "Epoch [29/30], Step [60/250], Loss: 0.6702690720558167\n", - "Epoch [29/30], Step [70/250], Loss: 0.695311427116394\n", - "Epoch [29/30], Step [80/250], Loss: 0.7089749574661255\n", - "Epoch [29/30], Step [90/250], Loss: 0.6968417763710022\n", - "Epoch [29/30], Step [100/250], Loss: 0.6854453086853027\n", - "Epoch [29/30], Step [110/250], Loss: 0.6853547096252441\n", - "Epoch [29/30], Step [120/250], Loss: 0.6865882277488708\n", - "Epoch [29/30], Step [130/250], Loss: 0.6883337497711182\n", - "Epoch [29/30], Step [140/250], Loss: 0.705528974533081\n", - "Epoch [29/30], Step [150/250], Loss: 0.6866053938865662\n", - "Epoch [29/30], Step [160/250], Loss: 0.6900249123573303\n", - "Epoch [29/30], Step [170/250], Loss: 0.6984312534332275\n", - "Epoch [29/30], Step [180/250], Loss: 0.7001223564147949\n", - "Epoch [29/30], Step [190/250], Loss: 0.6993950605392456\n", - "Epoch [29/30], Step [200/250], Loss: 0.6955195069313049\n", - "Epoch [29/30], Step [210/250], Loss: 0.7174205183982849\n", - "Epoch [29/30], Step [220/250], Loss: 0.6770732998847961\n", - "Epoch [29/30], Step [230/250], Loss: 0.6760091781616211\n", - "Epoch [29/30], Step [240/250], Loss: 0.6769121885299683\n", - "Epoch [29/30], Step [250/250], Loss: 0.7050588130950928\n", - "Epoch [30/30], Step [10/250], Loss: 0.6745777130126953\n", - "Epoch [30/30], Step [20/250], Loss: 0.6881678104400635\n", - "Epoch [30/30], Step [30/250], Loss: 0.6794246435165405\n", - "Epoch [30/30], Step [40/250], Loss: 0.7122002840042114\n", - "Epoch [30/30], Step [50/250], Loss: 0.698681116104126\n", - "Epoch [30/30], Step [60/250], Loss: 0.7196323871612549\n", - "Epoch [30/30], Step [70/250], Loss: 0.6916103363037109\n", - "Epoch [30/30], Step [80/250], Loss: 0.6879148483276367\n", - "Epoch [30/30], Step [90/250], Loss: 0.7075177431106567\n", - "Epoch [30/30], Step [100/250], Loss: 0.6686447858810425\n", - "Epoch [30/30], Step [110/250], Loss: 0.7030155062675476\n", - "Epoch [30/30], Step [120/250], Loss: 0.7014066576957703\n", - "Epoch [30/30], Step [130/250], Loss: 0.7121413946151733\n", - "Epoch [30/30], Step [140/250], Loss: 0.6912719011306763\n", - "Epoch [30/30], Step [150/250], Loss: 0.6733638048171997\n", - "Epoch [30/30], Step [160/250], Loss: 0.7193289399147034\n", - "Epoch [30/30], Step [170/250], Loss: 0.6880522966384888\n", - "Epoch [30/30], Step [180/250], Loss: 0.7069193720817566\n", - "Epoch [30/30], Step [190/250], Loss: 0.6976951360702515\n", - "Epoch [30/30], Step [200/250], Loss: 0.6925494074821472\n", - "Epoch [30/30], Step [210/250], Loss: 0.6907849907875061\n", - "Epoch [30/30], Step [220/250], Loss: 0.6824172735214233\n", - "Epoch [30/30], Step [230/250], Loss: 0.6865588426589966\n", - "Epoch [30/30], Step [240/250], Loss: 0.6921617984771729\n", - "Epoch [30/30], Step [250/250], Loss: 0.6736024618148804\n", - "\n", - "Training job (trainu90lc57j1vm) succeeded, you can check the logs/metrics/output in the console:\n", - "https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/trainu90lc57j1vm\n" - ] - } - ], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 训练数据的总迭代次数为30\n", - "epochs = 30\n", - "\n", - "resume_est = Estimator(\n", - " command=\"python train.py --epochs {}\".format(epochs),\n", - " source_dir=\"./train_src/\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - " # 使用上一个训练作业的checkpoints,相应的OSS Bucket路径会被挂载到 /ml/output/checkpoints 路径下\n", - " checkpoints_path=est.checkpoints_data(),\n", - " base_job_name=\"torch_resume_checkpoint\",\n", - ")\n", - "\n", - "resume_est.fit()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过训练作业日志的,我们可以看到训练作业加载了之前训练作业的checkpoint,在此基础上,从第11个epoch开始继续训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "本文以`PyTorch`为示例,介绍了如何在PAI的训练作业中使用`checkpoint`:训练代码可以通过`/ml/output/checkpoints/`路径保存和加载`checkpoints`文件,`checkpoints`文件将被保存到OSS Bucket上。当用户使用其他的训练框架,例如`TensorFlow`、`HuggingFace transformers`、`ModelScope`等,也可以通过类似的方式在PAI的训练作业中使用`checkpoint`。\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/framework.rst b/docs/source/tutorial/framework.rst deleted file mode 100644 index 48fbbb2..0000000 --- a/docs/source/tutorial/framework.rst +++ /dev/null @@ -1,17 +0,0 @@ -=========================================== -机器学习框架 -=========================================== - - - - -.. toctree:: - :maxdepth: 1 - :caption: 示例教程 - - - 训练和部署PyTorch模型 - 训练和部署XGBoost模型 - 训练和部署Tensorflow模型 - 基于HuggingFace BERT训练和部署文本分类模型 - 使用ModelScope ViT训练和部署图片分类模型 diff --git a/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb b/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb deleted file mode 100644 index cb90726..0000000 --- a/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb +++ /dev/null @@ -1,848 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "bb57c39e-16f6-4f84-b071-7751bd01b4c4", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "source": [ - "# HuggingFace BERT模型部署和微调训练\n", - "\n", - "[HuggingFace](https://huggingface.co/) 是一个开源开放的AI社区平台,允许用户共享自己的AI项目、数据集和模型,同时也为用户提供了各种机器学习工具,包括`transformers`、`diffusers`、`accelerate`等。通过HuggingFace社区,用户可以轻松地构建和训练自己的模型,并将其应用于各种实际场景中。\n", - "\n", - "当前文档中,我们以HuggingFace提供的[BERT预训练模型-英文-base](https://huggingface.co/bert-base-uncased)预训练模型为示例,展示如何在PAI微调训练和部署BERT模型,主要内容包括以下:\n", - "\n", - "1. SDK安装和配置:\n", - "\n", - "安装所需的SDK,并完成PAI Python SDK配置。\n", - "\n", - "2. 直接部署BERT模型创建推理服务\n", - "\n", - "将HuggingFace上的BERT模型直接模型部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "3. 使用BERT模型微调训练\n", - "\n", - "基于BERT模型,我们使用公共数据集进行微调训练,以获得一个可以用于情感分类的模型,然后将输出的模型部署到PAI-EAS,创建一个在线推理服务。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "73692608-6d3f-4551-9eeb-e169bfa93799", - "metadata": {}, - "source": [ - "## Step1: SDK的安装配置\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。请通过以下命令安装PAI Python SDK,以及需要使用到的Huggingface datasets等依赖库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c09a58a3-7cf9-43ac-b386-3bafffbf6321", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bee87d5", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install datasets huggingface_hub" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5212ae4f-cb05-45a0-82f1-3d1cd89be38b", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在**命令行终端**中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在命令行终端中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过执行以下代码验证当前的配置是否成功。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55bcb9aa-58ee-47a0-9656-446c5bf67845", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5952d3b9", - "metadata": {}, - "source": [ - "## Step2: 部署BERT模型创建推理服务\n", - "\n", - "\n", - "[PAI-EAS](https://www.aliyun.com/activity/bigdata/pai/eas) (Elastic Algorithm Service) 是PAI平台上的模型在线预测服务,支持使用镜像模式部署模型,并且提供了常见的机器学习框架的推理镜像。 在以下示例中,我们将使用PAI-EAS提供的镜像,将HuggingFace上的BERT模型直接部署到PAI,创建一个在线推理服务。\n", - "\n", - "[BERT](https://arxiv.org/abs/1810.04805)是Google提出的一种预训练语言模型,使用自监督学习方法在大型英文语料库上进行训练。他可以直接用于\"完形填空\"的任务,也可以作为下游任务的预训练模型,通过微调训练,用于分类,问答等不同的任务。我们通过以下代码下载HuggingFace提供的BERT模型,用于创建一个支持“完形填空”的推理服务。\n", - "\n", - "> 对于如何在离线模式下保存和使用HuggingFace模型,用户可以参考HuggingFace的官方文档: [HuggingFace Offline Mode](https://huggingface.co/docs/transformers/installation#fetch-models-and-tokenizers-to-use-offline)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25ae41ce", - "metadata": {}, - "outputs": [], - "source": [ - "from huggingface_hub import snapshot_download\n", - "\n", - "\n", - "# 下载BERT模型(PyTorch版本)\n", - "model_dir = snapshot_download(\n", - " repo_id=\"bert-base-uncased\",\n", - " local_dir=\"./bert\",\n", - " allow_patterns=[\n", - " \"config.json\",\n", - " \"pytorch_model.bin\",\n", - " \"vocab.txt\",\n", - " \"tokenizer_config.json\",\n", - " \"tokenizer.json\",\n", - " ],\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bf489d6a", - "metadata": {}, - "source": [ - "用户也可以通过以下的方式保存模型(需要用户在本地install`transformers`, `pytorch`等依赖库):\n", - "\n", - "```python\n", - "\n", - "from transformers import BertTokenizer, BertModel\n", - "\n", - "# 下载模型\n", - "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n", - "model = BertModel.from_pretrained(\"bert-base-uncased\")\n", - "\n", - "# 保存模型到本地路径\n", - "model_dir = \"./bert/\"\n", - "model.save_pretrained(model_dir)\n", - "tokenizer.save_pretrained(model_dir)\n", - "\n", - "```\n", - "\n", - "保存的模型,可以直接通过`transformers`库加载使用:\n", - "\n", - "```python\n", - "\n", - "from transformers import BertTokenizer, BertModel\n", - "\n", - "model = BertModel.from_pretrained(\"./bert/\")\n", - "tokenizer = BertTokenizer.from_pretrained(\"./bert/\")\n", - "\n", - "```\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "14acee39", - "metadata": {}, - "source": [ - "将保存在本地的BERT模型和tokenizer上传到OSS Bucket,拿到模型的OSS路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25debdca", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "# 上传模型\n", - "bert_model_uri = upload(\n", - " source_path=model_dir, oss_path=\"huggingface/model/bert/\", bucket=sess.oss_bucket\n", - ")\n", - "print(bert_model_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "088cf89b", - "metadata": {}, - "source": [ - "\n", - "在部署模型之前,我们需要准备模型推理服务的代码,用于加载模型,提供HTTP服务。在以下示例中,我们使用[FastAPI](https://fastapi.tiangolo.com/)编写了一个简单的HTTP服务,用于加载模型,提供预测服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a524a388", - "metadata": {}, - "outputs": [], - "source": [ - "# 创建推理服务使用的代码\n", - "!mkdir -p serving_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b804c39c", - "metadata": {}, - "source": [ - "完整的推理服务程序代码如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8cd70ff", - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serving_src/run.py\n", - "\n", - "import os\n", - "import logging\n", - "\n", - "import uvicorn, json, datetime\n", - "from fastapi import FastAPI, Request\n", - "from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification\n", - "\n", - "# 用户指定模型,默认会被加载到当前路径下\n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "logging.basicConfig(level=logging.INFO)\n", - "logger = logging.getLogger(\"model_server\")\n", - "\n", - "app = FastAPI()\n", - "\n", - "@app.post(\"/\")\n", - "async def predict(request: Request):\n", - " global bert_pipeline\n", - " json_data = await request.json()\n", - " logger.info(\"Input data: %s\", json_data)\n", - " result = bert_pipeline(json_data[\"text\"])\n", - " logger.info(\"Prediction result: %s\", result)\n", - " return result\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " task = os.environ.get(\"HF_TASK\", \"fill-mask\")\n", - " bert_pipeline = pipeline(task=task, model=MODEL_PATH, tokenizer=MODEL_PATH)\n", - "\n", - " uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get(\"LISTENING_PORT\", 8000)))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "77a3568b", - "metadata": {}, - "source": [ - "SDK 提供的 `pai.model.InferenceSpec` 用于描述如何加载模型,以及如何提供预测服务。在以下代码中,我们使用 `pai.model.container_serving_spec` 方法,使用 PAI 提供的推理镜像和本地代码 `serving_src`,创建一个 `InferenceSpec` 对象。对应的本地代码会被上传保存到用户OSS,然后通过挂载的方式将相应的代码准备到运行容器中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab6c3828", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch CPU 推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"latest\",\n", - " accelerator_type=\"CPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - ").image_uri\n", - "print(image_uri)\n", - "\n", - "\n", - "# 构建一个使用镜像部署的InferenceSpec,可以用于BERT模型部署为推理服务.\n", - "bert_inference_spec = container_serving_spec(\n", - " # 模型服务的启动命令\n", - " command=\"python run.py\",\n", - " # 模型服务依赖的代码\n", - " source_dir=\"./serving_src\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"transformers\",\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " # 推理 pipeline 使用 device_map=\"auto\" 时需要安装\n", - " \"accelerate\",\n", - " ],\n", - ")\n", - "\n", - "print(bert_inference_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "debba5d4", - "metadata": {}, - "source": [ - "### 模型部署\n", - "\n", - "通过构建Model,调用`Model.deploy`方法,可以将模型部署到PAI-EAS,生成在线服务。\n", - "\n", - "关于如何使用SDK部署模型的详细介绍,用户可以参考文档:[PAI Python SDK部署推理服务](https://help.aliyun.com/document_detail/2261532.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60b07fb7", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "\n", - "m = Model(\n", - " inference_spec=bert_inference_spec,\n", - " model_data=bert_model_uri,\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"hf_bert_serving_{}\".format(random_str(6)), # 推理服务名称.\n", - " instance_type=\"ecs.c6.xlarge\", # 服务使用的机器实例规格: 4 vCPU, 8 GB\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5e64254b", - "metadata": {}, - "source": [ - "deploy方法返回的Predictor对象,指向了新创建的推理服务,他提供了`.predict`方法,支持用户向推理服务发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2df66b1f", - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict(data={\"text\": \"Hello, I'm a [MASK] model.\"})\n", - "\n", - "print(res)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "57f86644", - "metadata": {}, - "source": [ - "在测试完成之后,我们可以通过`predictor.delete_service`删除推理服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d78a2587", - "metadata": {}, - "outputs": [], - "source": [ - "# 执行完成之后,删除对应的服务\n", - "\n", - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "66b601b9-030e-49c1-8534-b6a53ea7903d", - "metadata": {}, - "source": [ - "## Step3: Finetune BERT预训练模型\n", - "\n", - "[BERT](https://arxiv.org/abs/1810.04805)使用自监督学习方法在大型英文语料库上进行训练,他学习到了英语语言的内在表示,可以通过微调的方式,应用于不同的下游任务,从而获得更好的性能。在当前示例中,我们将使用Huggingface上 Yelp英文评论数据集[yelp_review_full](https://huggingface.co/datasets/yelp_review_full) 对BERT模型进行微调,以获得一个可以用于情感分类的模型。\n", - "\n", - "\n", - "### 准备模型和数据集\n", - "\n", - "在当前步骤中,我们将准备微调训练使用的数据集,然后上传到OSS上供训练作业使用。\n", - "\n", - "> 通过HuggingFace提供的transformers和datasets库可以使用读取本地文件的方式(离线模式),或是从HuggingFace Hub下载模型和数据的方式。为了提高训练作业的执行速度,我们在当前示例中,将模型和数据集准备到OSS,挂载到训练作业执行环境中,供训练作业直接加载使用。\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f760ddcb", - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import load_dataset\n", - "from pai.common.oss_utils import upload\n", - "\n", - "data_path = \"./train_data\"\n", - "\n", - "# 从HuggingFace Hub加载数据集\n", - "dataset = load_dataset(\"yelp_review_full\")\n", - "\n", - "# 保存到数据集,保存的数据集可以通过`datasets.load_from_disk`加载使用\n", - "dataset.save_to_disk(data_path)\n", - "\n", - "train_data_uri = upload(\n", - " source_path=data_path,\n", - " oss_path=\"huggingface/dataset/yelp_review_full/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5e518592", - "metadata": {}, - "source": [ - "\n", - "### 准备训练代码\n", - "参考HuggingFace提供的对于[Masked Language Model 的微调文档](https://huggingface.co/course/chapter7/3?fw=tf),我们编写了以下训练脚本,它将使用我们上传的数据集完成模型的微调。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a2534222", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 创建代码保存目录\n", - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0f2a7761", - "metadata": {}, - "source": [ - "\n", - "在我们编写的训练作业脚本中,通过环境变量的方式获取训练作业的超参,输出数据,输出模型保存地址。对于PAI训练服务提供的环境变量的详细介绍,可以见文档:[训练作业预置环境变量](https://help.aliyun.com/document_detail/2261505.html)\n", - "\n", - "完整的训练代码如下:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26c62bb8-963e-4ffe-8843-715482896cd3", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "%%writefile train_src/finetune.py\n", - "\n", - "import os\n", - "\n", - "from datasets import load_dataset, load_from_disk\n", - "from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, DataCollatorWithPadding, HfArgumentParser\n", - "import numpy as np\n", - "import evaluate\n", - "\n", - "\n", - "def compute_metrics(eval_pred):\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", - "\n", - "def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", - "\n", - "\n", - "def train():\n", - " # 通过环境变量获取预训练模型地址, 训练数据,以及模型保存地址\n", - " model_name_or_path = os.environ.get(\"PAI_INPUT_MODEL\", \"bert-base-cased\")\n", - " input_train_data = os.environ.get(\"PAI_INPUT_TRAIN_DATA\")\n", - " output_dir=os.environ.get(\"PAI_OUTPUT_MODEL\", \"./output\")\n", - "\n", - " # 使用环境变量获取训练作业超参\n", - " num_train_epochs=int(os.environ.get(\"PAI_HPS_EPOCHS\", 2))\n", - " save_strategy=os.environ.get(\"PAI_HPS_SAVE_STRATEGY\", \"epoch\")\n", - "\n", - " print(\"Loading Model...\")\n", - " model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, num_labels=5)\n", - " tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\n", - "\n", - " print(\"Loading dataset from disk...\")\n", - " dataset = load_from_disk(input_train_data)\n", - " tokenized_datasets = dataset.map(lambda examples: tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True, max_length=512),\n", - " batched=True)\n", - "\n", - " data_collator = DataCollatorWithPadding(tokenizer)\n", - " small_train_dataset = tokenized_datasets['train'].shuffle(seed=42).select(range(1000))\n", - " small_eval_dataset = tokenized_datasets['test'].shuffle(seed=42).select(range(1000))\n", - "\n", - " training_args = TrainingArguments(\n", - " output_dir=output_dir,\n", - " # 使用环境变量获取训练作业超参\n", - " num_train_epochs=num_train_epochs,\n", - " # 使用环境变量获取训练作业保存策略\n", - " save_strategy=save_strategy,\n", - " )\n", - " print(\"TrainingArguments: {}\".format(training_args.to_json_string()))\n", - " metric = evaluate.load('accuracy')\n", - "\n", - " print(\"Training...\")\n", - " trainer = Trainer(\n", - " model=model,\n", - " args=training_args,\n", - " train_dataset=small_train_dataset,\n", - " eval_dataset=small_eval_dataset,\n", - " data_collator=data_collator,\n", - " tokenizer=tokenizer,\n", - " compute_metrics=compute_metrics,\n", - " )\n", - "\n", - " trainer.train()\n", - " print(\"Saving Model...\")\n", - " trainer.save_model()\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6a603b63", - "metadata": {}, - "source": [ - "我们的训练作业将使用PAI提供的PyTorch镜像执行,需要在镜像中安装 `transformers` 和 `evaluate` 库才能够执行相应的训练脚本。通过在训练作业目录下提供 `requirements.txt` 文件,PAI的训练服务会自动安装指定的第三方依赖。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cfab739e", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "%%writefile train_src/requirements.txt\n", - "\n", - "transformers\n", - "datasets\n", - "evaluate\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b478975d-17bd-4f81-93b8-e3dd32b6b7f1", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "通过PAI Python SDK提供的训练作业API`pai.estimator.Estimator`,我们可以将训练脚本提交到PAI执行。在以下代码中,我们将指定使用的训练代码 `train_src` ,使用PAI提供的PyTorch GPU镜像训练,提交运行微调训练作业。对于使用SDK提交训练作业的详细介绍,用户可以参考文档:[PAI Python SDK提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dda481a0-7c85-4b49-b3c5-cc30ca5d3a8c", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.huggingface.estimator import HuggingFaceEstimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch GPU 训练镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\", framework_version=\"latest\", accelerator_type=\"GPU\"\n", - ").image_uri\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = HuggingFaceEstimator(\n", - " command=\"python finetune.py\", # 训练作业启动命令\n", - " source_dir=\"./train_src/\", # 训练作业代码\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 训练使用的作业机器类型, 4 vCPU, 15 GB, 1* T4 GPU\n", - " transformers_version=\"latest\",\n", - " hyperparameters={ # 训练作业超参,用户可以通过环境变量,或是\n", - " \"save_strategy\": \"epoch\",\n", - " \"epochs\": \"1\",\n", - " },\n", - " base_job_name=\"hf-bert-training\",\n", - ")\n", - "\n", - "\n", - "# est = Estimator(\n", - "# image_uri=image_uri, # 训练作业使用的镜像\n", - "# command=\"python finetune.py\", # 训练作业启动命令\n", - "# source_dir=\"./train_src/\", # 训练作业代码\n", - "# instance_type=\"ecs.gn6i-c4g1.xlarge\", # 训练使用的作业机器类型, 4 vCPU, 15 GB, 1* T4 GPU\n", - "# hyperparameters={ # 训练作业超参,用户可以通过环境变量,或是\n", - "# \"save_strategy\": \"epoch\",\n", - "# \"epochs\": \"1\",\n", - "# },\n", - "# base_job_name=\"hf-bert-training\",\n", - "# )\n", - "\n", - "print(est)\n", - "print(est.hyperparameters)\n", - "\n", - "# 提交训练作业到PAI执行\n", - "# 提交之后SDK会打印作业URL,我们可以作业详情页查看训练日志,输出模型,资源使用情况等\n", - "est.fit(\n", - " # 作业使用的预训练模型和数据集使用inputs方式传递\n", - " # 相应的OSS URI会被挂载到作业环境中,用户可以通过 `PAI_INPUT_{ChannelNameUpperCase}` 的环境变量获取挂载后的路径\n", - " inputs={\n", - " \"model\": bert_model_uri,\n", - " \"train_data\": train_data_uri,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c0354a7", - "metadata": {}, - "outputs": [], - "source": [ - "# 训练任务产出的模型地址\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "27e83a0d-d02c-42f3-bbd2-c71c775fad82", - "metadata": { - "tags": [] - }, - "source": [ - "### 部署Finetune获得的模型\n", - "\n", - "我们将复用以上推理服务的代码,将微调训练获得的模型部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "> Note: 微调模型用于情感分析任务,我们显式得修改HuggingFace pipeline的Task参数。这里我们通过环境变量的方式传入Task参数。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73a51721-11b9-4f24-b016-5c704de526b8", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch CPU 推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"latest\",\n", - " accelerator_type=\"CPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - ").image_uri\n", - "\n", - "\n", - "# 构建一个使用镜像部署的InferenceSpec,可以用于将以上产出的BERT模型部署为推理服务.\n", - "inference_spec = container_serving_spec(\n", - " # 模型服务的启动命令\n", - " command=\"python run.py\",\n", - " # 模型服务依赖的代码\n", - " source_dir=\"./serving_src\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"transformers\",\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " # 使用情感分析任务pipeline,通过环境变量的方式传递给到推理服务脚本。\n", - " environment_variables={\"HF_TASK\": \"sentiment-analysis\"},\n", - ")\n", - "\n", - "print(inference_spec.to_dict())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d57a41f3-a4fc-40d8-92d5-ca083f4de2ee", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "\n", - "# 使用训练作业产出的模型\n", - "model_data = est.model_data()\n", - "\n", - "m = Model(\n", - " inference_spec=inference_spec,\n", - " model_data=model_data,\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"hf_bert_ft_serving_{}\".format(random_str(6)), # 推理服务名称\n", - " instance_type=\"ecs.c6.xlarge\", # 服务使用的机器实例规格: 4 vCPU, 8 GB\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "82e7586d", - "metadata": {}, - "source": [ - "通过Predictor向新创建的推理服务发送预测请求,获取模型预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "af0b6e0b", - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict({\"text\": \"i am so happy today\"})\n", - "print(res)\n", - "\n", - "res = p.predict({\"text\": \"i am so sad today\"})\n", - "print(res)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bc2bdce3", - "metadata": {}, - "source": [ - "在测试完成之后,我们通过`predictor.delete_service`删除推理服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4fdd724e-e557-4461-9cdb-93874a77c49a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# 执行完成之后,删除对应的服务\n", - "\n", - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - }, - "vscode": { - "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb b/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb deleted file mode 100644 index c04412c..0000000 --- a/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb +++ /dev/null @@ -1,181 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 部署HuggingFace模型\n", - "\n", - "HuggingFace是一个开源的模型社区,机器学习开发者在社区中可以分享、发现和使用各类机器学习模型。\n", - "\n", - "本文将介绍如何将HuggingFace社区的模型部署到PAI创建模型推理服务。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署HuggingFace模型\n", - "\n", - "在本示例中,我们将使用HuggingFace社区提供的情感分类模型 [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english)部署一个模型在线服务,他支持将一段英文文本分类为正面或负面情感。\n", - "\n", - "通过相应的[模型的详情页](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/tree/main),我们可以获取部署模型所需的信息,包括模型ID(``MODEL_ID``)、模型任务类型(``TASK``)、模型版本(``REVISION``)。\n", - "\n", - "![](../../images/huggingface-model.png)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过PAI Python SDK提供的``HuggingFaceModel``,我们可以轻松地将HuggingFace社区的模型部署到PAI上。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.huggingface import HuggingFaceModel\n", - "\n", - "\n", - "# 初始化一个HuggingFaceModel\n", - "m = HuggingFaceModel(\n", - " command=\"python app.py\", # 模型服务启动命令\n", - " transformers_version=\"latest\", # 使用的transformers版本, 'latest'表示使用PAI目前支持的最新的版本\n", - " environment_variables={\n", - " \"MODEL_ID\": \"distilbert-base-uncased-finetuned-sst-2-english\", # 部署模型的ID\n", - " \"TASK\": \"text-classification\", # 部署的模型任务类型\n", - " \"REVISION\": \"main\", # 部署模型的版本信息\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "\n", - "# 部署模型,创建一个模型在线服务\n", - "p = m.deploy(\n", - " service_name=f\"hf_model_deploy_{random_str(n=8)}\", # 模型服务的名称(地域内唯一)\n", - " instance_type=\"ecs.g6.large\", # 模型服务使用的机器实例规格\n", - " options={\n", - " \"enable_webservice\": True, # 以AIWeb应用的模式启动,支持用户在Web浏览器上使用模型在线服务\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.predict(data={\"data\": [\"I love you\"]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后,删除服务,释放机器资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb b/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb deleted file mode 100644 index 229bc2b..0000000 --- a/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb +++ /dev/null @@ -1,376 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用镜像部署模型\n", - "\n", - "PAI支持用户使用镜像的方式部署模型,通过镜像,开发者可以自定义模型部署的环境,包括Python、使用的机器学习框架、依赖的第三方库等,能够支持用户灵活的部署需求。详细的介绍可以参考PAI帮助文档:[使用镜像部署模型](https://help.aliyun.com/zh/pai/user-guide/deploy-a-model-service-by-using-a-custom-image)。\n", - "\n", - "PAI Python SDK提供了便利的API,支持用户能够使用自定义镜像,或是PAI提供的预置推理,将一个本地,或是OSS上的模型快捷得部署为模型在线服务。\n", - "\n", - "本文档将介绍,用户如何通过PAI Python SDK通过自定义镜像的方式部署模型。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署模型推理服务\n", - "\n", - "模型在线服务包含了模型的文件、模型的推理服务代码、以及推理服务运行环境。\n", - "本示例将使用一个简单的`PyTorch`模型,通过`Flask`和`PAI`提供的`PyTorch`基础镜像,部署模型在线服务。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "下载示例使用的简单PyTorch模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 下载模型到本地 \"model\" 目录\n", - "\n", - "!mkdir -p model/\n", - "!wget https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/resources/toy_model.pt -P model/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备推理服务代码\n", - "\n", - "在部署模型之前,我们首先需要准备推理服务的代码,它提供HTTP接口,负责接收预测请求,使用模型进行推理,返回预测结果。\n", - "\n", - "当前示例我们将使用 ``Flask`` 编写一个简单的推理服务,保存为 ``infer_src/app.py`` 文件。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p infer_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile infer_src/app.py\n", - "import json\n", - "from flask import Flask, request\n", - "import os\n", - "import torch\n", - "import numpy as np\n", - "\n", - "app = Flask(__name__)\n", - "model = None\n", - "# 默认的模型文件路径\n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "def load_model():\n", - " \"\"\"加载模型\"\"\"\n", - " global model\n", - " model = torch.jit.load(os.path.join(MODEL_PATH, \"toy_model.pt\"))\n", - " model.eval()\n", - "\n", - "@app.route(\"/\", methods=[\"POST\"])\n", - "def predict():\n", - " data = np.asarray(json.loads(request.data)).astype(np.float32)\n", - " output_tensor = model(torch.from_numpy(data))\n", - " pred_res = output_tensor.detach().cpu().numpy()\n", - " return json.dumps(pred_res.tolist())\n", - "\n", - "if __name__ == \"__main__\":\n", - " load_model()\n", - " app.run(host=\"0.0.0.0\", port=int(os.environ.get(\"LISTENING_PORT\", 8000)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 获取PAI提供的预置推理镜像\n", - "\n", - "PAI提供了一系列预置的推理镜像,镜像内预置了机器学习框架、常用的第三方库、Python、NVIDIA CUDA库等。我们可以通过以下代码列出所有的预置镜像。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import list_images, ImageScope\n", - "\n", - "\n", - "data = [\n", - " [\n", - " \"ImageUri\",\n", - " \"FrameworkName\",\n", - " \"FrameworkVersion\",\n", - " \"AcceleratorType\",\n", - " \"PythonVersion\",\n", - " ]\n", - "]\n", - "\n", - "# 列出常用的PyTorch推理镜像\n", - "for img in list_images(framework_name=\"PyTorch\", image_scope=ImageScope.INFERENCE):\n", - " data.append(\n", - " [\n", - " img.image_uri,\n", - " img.framework_name,\n", - " img.framework_version,\n", - " img.accelerator_type,\n", - " img.python_version,\n", - " ]\n", - " )\n", - "\n", - "# 列出常用的TensorFlow推理镜像\n", - "for img in list_images(framework_name=\"TensorFlow\", image_scope=ImageScope.INFERENCE):\n", - " data.append(\n", - " [\n", - " img.image_uri,\n", - " img.framework_name,\n", - " img.framework_version,\n", - " img.accelerator_type,\n", - " img.python_version,\n", - " ]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import HTML, display\n", - "\n", - "display(\n", - " HTML(\n", - " \"{}
\".format(\n", - " \"\".join(\n", - " \"{}\".format(\"\".join(str(_) for _ in row))\n", - " for row in data\n", - " )\n", - " )\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过SDK提供的 `pai.image.retrieve` API,可以获取指定框架版本的镜像。在当前示例中,我们将使用PAI提供的PyTorch 1.12版本的CPU推理镜像" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve, ImageScope\n", - "\n", - "# # 获取PyTorch 1.10 GPU推理镜像\n", - "# print(retrieve(\n", - "# framework_name=\"PyTorch\", # 框架名称\n", - "# framework_version=\"latest\", # 框架版本\n", - "# accelerator_type=\"gpu\", # 选择支持Nvidia CUDA GPU的镜像\n", - "# image_scope=ImageScope.INFERENCE, # 镜像类型,推理镜像\n", - "\n", - "# # ).image_uri)\n", - "\n", - "# 获取最新的PyTorch CPU推理镜像\n", - "torch_image_uri = retrieve(\n", - " framework_name=\"PyTorch\", # 框架名称\n", - " framework_version=\"1.12\", # 框架版本,latest表示使用PAI支持的最新版本\n", - " # accelerator_type=\"cpu\", # 默认使用CPU镜像\n", - " image_scope=ImageScope.INFERENCE, # 镜像类型,推理镜像\n", - ").image_uri\n", - "print(torch_image_uri)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 部署推理服务\n", - "使用以上的推理服务代码,以及PyTorch推理镜像,我们将一个PyTorch模型部署为模型在线服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "\n", - "\n", - "m = Model(\n", - " model_data=\"./model/\", # 模型文件,可以是一个本地文件或是OSS Bucket路径(例如 oss:///path/to/model ),\n", - " inference_spec=container_serving_spec(\n", - " image_uri=torch_image_uri, # 推理服务使用的镜像\n", - " command=\"python app.py\", # 模型推理服务启动命令\n", - " source_dir=\"./infer_src/\", # 推理服务代码所在目录\n", - " requirements=[\"flask==2.0.0\", \"Werkzeug==2.3.4\"], # 推理服务依赖的Python包\n", - " ),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "# 部署模型服务\n", - "p = m.deploy(\n", - " service_name=f\"toy_model_{random_str(6)}\", # 模型服务名称, 地域内唯一\n", - " instance_type=\"ecs.c6.large\", # 模型服务使用的机器实例规格\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 调用推理服务\n", - "\n", - "部署服务后返回的`pai.predictor.Predictor`对象可以用于调用推理服务,发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "# 构造一个随机数组输入\n", - "dummy_input = np.random.rand(1, 10, 10).tolist()\n", - "print(dummy_input)\n", - "\n", - "result = p.raw_predict(\n", - " data=dummy_input,\n", - ")\n", - "\n", - "# 打印推理结果\n", - "print(result.json())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,删除推理服务" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb b/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb deleted file mode 100644 index 51c7c5b..0000000 --- a/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb +++ /dev/null @@ -1,216 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "tags": [ - "keep-output" - ] - }, - "source": [ - "# 部署ModelScope模型\n", - "\n", - "[ModelScope](https://www.modelscope.cn/)是一个开源的模型社区,提供了丰富的自然语言处理、计算机视觉、多模态等领域开源模型,并提供了[ModelScope library](https://github.com/modelscope/modelscope),支持开发者可以方便得获取模型,使用模型进行推理。\n", - "\n", - "PAI支持开发者将ModelScope上的模型,简单快捷得部署为在线推理服务,本文将介绍使用PAI Python SDK完成ModelScope模型的部署。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署ModelScope模型\n", - "\n", - "当前示例,我们将使用ModelScope上的[\"CSANMT连续语义增强机器翻译-英中-通用领域-large\"](https://modelscope.cn/models/damo/nlp_csanmt_translation_en2zh/summary)模型,他支持英文到中文的翻译任务。\n", - "\n", - "通过ModelScope的模型详情页,我们可以获取部署模型所需要的信息,包括**模型ID**,**模型版本**,以及**任务类型**,然后通过 `pai.modelscope.ModelScopeModel` 类,创建一个ModelScope模型对象,完成模型部署。\n", - "\n", - "![](../../images/modelscope-model.png)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.modelscope.model import ModelScopeModel\n", - "\n", - "# 配置待部署的模型信息\n", - "m = ModelScopeModel(\n", - " command=\"python app.py\", # 默认的ModelScope模型推理服务启动命令\n", - " modelscope_version=\"latest\", # ModelScope library的版本号,latest表示最新版本\n", - " environment_variables={\n", - " \"MODEL_ID\": \"damo/nlp_csanmt_translation_en2zh\", # ModelScope的模型ID\n", - " \"TASK\": \"translation\", # 模型的任务类型\n", - " \"REVISION\": \"v1.0.1\", # 模型的版本号\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "from pai.predictor import Predictor\n", - "\n", - "# 部署模型,在PAI-EAS创建一个推理服务\n", - "p: Predictor = m.deploy(\n", - " service_name=\"ms_model_{0}\".format(random_str(8)), # 配置推理服务名称\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 配置推理服务实例规格\n", - " options={\n", - " \"metadata.rpc.keepalive\": 20000, # 配置推理服务RPC超时时间: 20s\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "通过以上方式部署的模型推理服务,支持通过空字符串的预测请求,获取模型的输入输出信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "from pai.predictor import RawResponse\n", - "\n", - "# 通过一个空的预测请求,获取模型的推理输入输出的数据格式\n", - "res: RawResponse = p.raw_predict(data=\"\")\n", - "\n", - "pprint(res.json())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "基于以上获得的输入数据格式信息,我们可以构建相应的预测请求,发送给到推理,获取翻译结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict(\n", - " # 参考以上的获得的输入输出数据格式,配置推理请求的数据\n", - " data={\n", - " \"input\": {\n", - " \"text\": \"Alibaba Group's mission is to let the world have no difficult business\"\n", - " }\n", - " }\n", - ")\n", - "pprint(res)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,删除推理服务,释放机器资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 删除推理服务\n", - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb b/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb deleted file mode 100644 index 106f994..0000000 --- a/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb +++ /dev/null @@ -1,638 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用ModelScope ViT模型完成图像分类模型微调和部署\n", - "\n", - "## 背景介绍\n", - "\n", - "\n", - "[ModelScope](https://www.modelscope.cn)是一个旨在为泛AI开发者提供灵活、易用、低成本的一站式“模型即服务”(MaaS)的开源平台。它汇集了丰富的预训练模型,覆盖了NLP、CV、Audio、AIGC、多模态大模型等多个领域。利用ModelScope所提供的模型以及ModelScope Library,开发者可以用一行代码实现模型推理,或者用十几行代码实现对预训练模型的调优训练,方便开发者基于行业数据集快速构建专属行业模型。\n", - "\n", - "当前示例中,我们以[ViT图像分类-通用](https://modelscope.cn/models/damo/cv_vit-base_image-classification_ImageNet-labels/summary) 为示例,展示如何在PAI完成一个ModelScope模型的微调训练,然后将获得的模型部署为一个在线推理服务的过程。主要流程包括:\n", - "\n", - "1. 准备工作:\n", - "\n", - "安装PAI Python SDK,并完成SDK配置。\n", - "\n", - "2. 模型的微调训练\n", - "\n", - "编写微调训练脚本,使用[花朵分类](https://www.modelscope.cn/models/zydfx1111/flower)数据集对模型进行微调训练,以获得一个可以用于花朵分类的模型。\n", - "\n", - "3. 部署推理服务\n", - "\n", - "将微调训练作业输出的模型,部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "## 前提条件\n", - "\n", - "- 已获取阿里云账号的鉴权AccessKey ID和AccessKey Secret,详情请参见:[获取AccessKey](https://help.aliyun.com/document_detail/116401.html)。\n", - "- 已创建或是加入一个PAI AI工作空间,详情请参见:[创建工作空间](https://help.aliyun.com/document_detail/326193.html)。\n", - "- 已创建OSS Bucket,详情请参见:[控制台创建存储空间](https://help.aliyun.com/document_detail/31885.html)。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step1: 准备工作\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 提交微调训练作业\n", - "\n", - "ModelScope的[ViT图片分类-通用](https://modelscope.cn/models/damo/cv_vit-base_image-classification_ImageNet-labels/summary)模型使用经典的[ViT Base](https://github.com/google-research/vision_transformer)模型结构,在ImageNet-1k数据集进行预训练,可以直接用于[ImageNet 1k标签](https://deeplearning.cms.waikato.ac.nz/user-guide/class-maps/IMAGENET/)覆盖图像的分类任务,也可以作为下游任务的预训练模型。\n", - "\n", - "当前示例,我们将以[花朵分类数据集](https://www.modelscope.cn/datasets/tany0699/flowers14/summary)对模型进行微调训练,从而获得一个可以用于花朵分类的模型。\n", - "\n", - "### 准备微调训练脚本\n", - "\n", - "ModelScope提供了功能完善的Python Library,能够支持用户方便得使用ModelScope模型进行推理以及微调训练,在本示例中,我们将使用ModelScope Library编写相应的微调训练脚本,然后提交到PAI执行微调训练作业。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 准备相应训练作业脚本目录\n", - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的微调训练脚本代码如下:\n", - "\n", - "> 对于ModelScope library的使用介绍,请参见:[ModelScope文档](https://www.modelscope.cn/docs/ModelScope%20Library%E6%A6%82%E8%A7%88%E4%BB%8B%E7%BB%8D)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/finetune.py\n", - "\n", - "import os\n", - "import re\n", - "import logging\n", - "import shutil\n", - "\n", - "\n", - "from modelscope.msdatasets import MsDataset\n", - "from modelscope.metainfo import Trainers\n", - "from modelscope.trainers import build_trainer\n", - "\n", - "\n", - "# 从环境变量中获取超参(由PAI的训练服务注入)\n", - "BATCH_SIZE = int(os.environ.get(\"PAI_HPS_BATCH_SIZE\", 16))\n", - "LEARNING_RATE = float(os.environ.get(\"PAI_HPS_INITIAL_LEARNING_RATE\", 1e-3))\n", - "NUM_EPOCHS = int(os.environ.get(\"PAI_HPS_EPOCHS\", 1))\n", - "NUM_CLASSES = int(os.environ.get(\"PAI_HPS_NUM_CLASSES\", 14))\n", - "MODEL_ID_OR_PATH = os.environ.get(\"PAI_INPUT_MODEL\", \"damo/cv_vit-base_image-classification_ImageNet-labels\")\n", - "\n", - "# 通过环境变量获取输出模型,和checkpoints保存路径\n", - "OUTPUT_MODEL_DIR = os.environ.get(\"PAI_OUTPUT_MODEL\", \"./model/\")\n", - "WORK_DIR = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\", \"./checkpoints/\")\n", - "\n", - "\n", - "# 将产出的模型保存到模型输出目录(OUTPUT_MODEL_DIR)\n", - "def save_model():\n", - " best_ckpt_pattern = re.compile(\n", - " pattern=r\"^best_accuracy_top-1_epoch_\\d+.pth$\"\n", - " )\n", - " print(\"Saving best checkpoint as pytorch_model.pt\")\n", - " print(\"List work dir: \", os.listdir(WORK_DIR))\n", - "\n", - " f_name = next((f for f in os.listdir(WORK_DIR) if best_ckpt_pattern.match(f)), None)\n", - " if f_name:\n", - " # 使用最佳checkpoints作为输出模型\n", - " print(\"Found best checkpoint: \", f_name)\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, f_name),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"pytorch_model.pt\"),\n", - " )\n", - " os.remove(os.path.join(WORK_DIR, f_name))\n", - " else:\n", - " # 如果没有,则使用最后一个epoch的checkpoints作为输出模型\n", - " print(\"Not found best checkpoint.\")\n", - " last_ckpt_file = \"epoch_{}.pth\".format(NUM_EPOCHS)\n", - " if os.path.isfile(os.path.join(WORK_DIR, last_ckpt_file)):\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, last_ckpt_file),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"pytorch_model.pt\"),\n", - " )\n", - " else:\n", - " print(\"Not found latest checkpoint: {}.\".format(os.path.join(WORK_DIR, last_ckpt_file)))\n", - " # 模型配置信息\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, \"configuration.json\"),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"configuration.json\"),\n", - " )\n", - "\n", - "\n", - "# 修改配置文件\n", - "def cfg_modify_fn(cfg):\n", - " cfg.train.dataloader.batch_size_per_gpu = BATCH_SIZE # batch大小\n", - " cfg.train.dataloader.workers_per_gpu = 8 # 每个gpu的worker数目\n", - " cfg.train.max_epochs = NUM_EPOCHS # 最大训练epoch数\n", - " cfg.model.mm_model.head.num_classes = NUM_CLASSES # 分类数\n", - " cfg.model.mm_model.train_cfg.augments[0].num_classes = NUM_CLASSES # 分类数\n", - " cfg.model.mm_model.train_cfg.augments[1].num_classes = NUM_CLASSES # 分类数\n", - " cfg.train.optimizer.lr = LEARNING_RATE # 学习率\n", - " cfg.train.lr_config.warmup_iters = 1 # 预热次数\n", - "\n", - " # Note: OSS挂载到输出路径中,不支持软链接.\n", - " cfg.train.checkpoint_config.create_symlink = False\n", - "\n", - "\n", - " return cfg\n", - "\n", - "def train():\n", - " ms_train_dataset = MsDataset.load(\n", - " 'flowers14', namespace='tany0699',\n", - " subset_name='default', split='train') # 加载训练集\n", - "\n", - " ms_val_dataset = MsDataset.load(\n", - " 'flowers14', namespace='tany0699',\n", - " subset_name='default', split='validation') # 加载验证集\n", - "\n", - "\n", - " # 构建训练器\n", - " kwargs = dict(\n", - " model=MODEL_ID_OR_PATH, # 模型id\n", - " work_dir=WORK_DIR,\n", - " train_dataset=ms_train_dataset, # 训练集 \n", - " eval_dataset=ms_val_dataset, # 验证集\n", - " cfg_modify_fn=cfg_modify_fn # 用于修改训练配置文件的回调函数\n", - " )\n", - " trainer = build_trainer(name=Trainers.image_classification, default_args=kwargs)\n", - "\n", - " # 进行训练\n", - " trainer.train()\n", - "\n", - " # 进行评估\n", - " result = trainer.evaluate()\n", - " print('Evaluation Result:', result)\n", - "\n", - " # 保存模型\n", - " save_model()\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()\n", - " " - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在当前的训练作业中,我们将使用PAI提供的PyTorch训练镜像,需要在镜像中安装ModelScope Library。通过在训练作业脚本目录下准备一个`requirements.txt`文件,可以在训练作业启动时,自动安装依赖的第三方库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/requirements.txt\n", - "\n", - "\n", - "# 部分ModelScope依赖library由ModelScope Host,需要显式配置以下参数\n", - "--find-links https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html\n", - "modelscope[cv]==1.3.1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的训练作业脚本目录结构如下:\n", - "\n", - "```shell\n", - "\n", - "train_src\n", - " ├── finetune.py\n", - " └── requirements.txt\n", - "\n", - "```\n", - "\n", - "后续我们将通过PAI Python SDK将训练脚本提交到PAI执行。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业到PAI\n", - "\n", - "SDK提供了High-Level的API,`pai.estimator.Estimator`,支持用户方便地使用镜像配合训练脚本,提交训练作业到PAI。以下代码中,我们将使用以上的训练作业脚本(`train_src`目录),配合PAI提供的PyTorch训练镜像,提交一个训练作业。\n", - "\n", - "对于如何使用SDK提交训练作业的详细介绍,可以见文档:[提交训练作业](https://help.aliyun.com/document_detail/2261505.html)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用PAI提供的最新的PyTorch GPU镜像\n", - "torch_img_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"gpu\",\n", - ").image_uri\n", - "\n", - "# 使用训练配置信息,创建Estimator对象\n", - "est = Estimator(\n", - " command=\"python finetune.py\", # 训练作业的启动命令\n", - " source_dir=\"train_src\", # 训练作业脚本本地目录(绝对路径,或是相对路径)\n", - " image_uri=torch_img_uri, # 作业的镜像类型\n", - " # instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 12vCPU 92GiB NVIDIA V100 × 1 (32GB GPU memory)\n", - " instance_type=\"ecs.gn7i-c8g1.2xlarge\", # 8vCPU 30GiB NVIDIA A10 × 1 (24GB GPU Memory)\n", - " base_job_name=\"vit-finetune\", # 作业名称\n", - " hyperparameters={ # 训练作业超参,用户可以通过环境变量或是读取配置文件的方式获取.\n", - " \"batch_size\": 128,\n", - " \"initial_learning_rate\": 1e-4,\n", - " \"epochs\": 2,\n", - " # 花朵数据集一共14个分类\n", - " \"num_classes\": 14,\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过`fit` API提交训练作业。当前示例中,我们在训练脚本中使用ModelScope的library去下载数据集。当用户需要使用自定义数据集时,可以通过`fit`方法传递相应数据OSS路径,训练作业会通过挂载的方式将相应的数据准备的执行环境中。\n", - "\n", - "```python\n", - "\n", - "est.fit(\n", - "\t# 用户的训练作业脚本可以通过环境变量 PAI_INPUT_{ChannelNameUpperCase} 获得数据的本地路径.\n", - "\t\"train\": \"oss:///train/data/path/\",\n", - ")\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "est.fit()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业执行成功之后,用户可以通过`estimator.model_data()`获取相应产出模型的OSS路径" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step3: 部署推理服务\n", - "\n", - "PAI-EAS是PAI提供的推理服务部署平台,支持使用Processor或是镜像的方式部署推理服务。在以下的流程中,我们将使用微调获得的模型,使用镜像部署的方式部署一个在线推理服务。\n", - "\n", - "### 准备推理服务使用的代码\n", - "\n", - "镜像部署的模式,要求用户提供一个推理服务程序,他负责加载模型,提供HTTP API,以支持接受用户推理请求,调用模型处理推理请求,返回推理结果。在当前示例中,我们将使用[FastAPI](https://fastapi.tiangolo.com/)编写一个推理服务程序,加载以上训练作业输出的模型,在PAI创建一个推理服务。\n", - "\n", - "我们首先创建一个目录(`serve_src`),用于保存的推理服务程序代码。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p serve_src/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们准备的推理服务程序,支持用户通过HTTP POST发送的图片,然后调用ModelScope的推理pipeline获取预测结果,返回给到用户。\n", - "\n", - "> ModelScope 推理pipeline返回的结果中带有`numpy.ndarray`数据,需要我们通过自定义Encoder将其序列化。\n", - "\n", - "完整代码如下,我们将其保存到`serve_src`目录下,用于后续创建推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serve_src/run.py\n", - "\n", - "import os\n", - "import io\n", - "import json\n", - "\n", - "import uvicorn\n", - "from fastapi import FastAPI, Response, Request\n", - "import numpy as np\n", - "\n", - "from modelscope.pipelines import pipeline\n", - "from modelscope.utils.constant import Tasks\n", - "from PIL import Image\n", - "\n", - "# 用户指定模型,默认会被加载到当前路径下。 \n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "class NumpyEncoder(json.JSONEncoder):\n", - "\n", - " def default(self, obj):\n", - " if isinstance(obj, np.ndarray):\n", - " return obj.tolist()\n", - " elif isinstance(obj, np.generic):\n", - " return obj.item()\n", - " else:\n", - " return json.JSONEncoder.default(self, obj)\n", - "\n", - "app = FastAPI()\n", - "\n", - "@app.post(\"/\")\n", - "async def predict(request: Request):\n", - " global p\n", - " content = await request.body()\n", - " img = Image.open(io.BytesIO(content))\n", - " res = p(img)\n", - " return Response(content=json.dumps(res, cls=NumpyEncoder), media_type=\"application/json\")\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " p = pipeline(\n", - " Tasks.image_classification,\n", - " model=MODEL_PATH,\n", - " )\n", - " uvicorn.run(app, host='0.0.0.0', port=8000)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 创建推理服务\n", - "\n", - "我们将使用PAI提供的ModelScope推理镜像,使用以上的推理服务程序,创建一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import container_serving_spec\n", - "from pai.session import get_default_session\n", - "from pai.model import Model\n", - "from random import randint\n", - "\n", - "\n", - "# 使用PAI QuickStart提供的ModelScope的镜像创建推理服务\n", - "image_uri = (\n", - " \"registry.{}.aliyuncs.com/paiflow-public/quickstart:modelscope-1.2.0\".format(\n", - " get_default_session().region_id,\n", - " )\n", - ")\n", - "\n", - "\n", - "# 创建一个Model对象,他可以用于创建推理服务\n", - "m: Model = Model(\n", - " # 使用以上训练作业产出的模型\n", - " model_data=est.model_data(),\n", - " # 配置模型的推理配置,包括使用的镜像,使用的推理服务脚本,推理的依赖包等。\n", - " inference_spec=container_serving_spec(\n", - " source_dir=\"./serve_src/\",\n", - " command=\"python run.py\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " ),\n", - ")\n", - "\n", - "\n", - "print(m.inference_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "指定推理服务的名称,以及使用的机器实例规格,创建一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import Predictor\n", - "\n", - "\n", - "p: Predictor = m.deploy(\n", - " service_name=\"modelscope_vit_{}\".format(randint(0, 100000)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - " options={\n", - " # 推理镜像较大的镜像下,需要配置额外的磁盘空间\n", - " \"features.eas.aliyun.com/extra-ephemeral-storage\": \"40GB\"\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`model.deploy` 返回的`Predictor`对象可以用于向相应的推理服务发送请求,获得推理结果。\n", - "\n", - "\n", - "这里我们使用已经准备的一张花朵图片测试推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAIAAADTED8xAAEAAElEQVR4nJz9SbMsy5oYCn2Nu0dE5mp2d84+Td2+SqWyMoRUpSc9wMCAESOYMMWMGQOmTPgfjPkhPAMmGAjZk6okIanqNnXvPeeefu+9usyMCPevYeARkZG51j733nLbtnZmZDQe7l/f4v/lv/u/AgAAIOLyd/2hfl6GowGAu4O5u5uZmaF7jJGZIwciQkSaR1KrF9arzKz+3W639QQAMDNVrc+Kgc3MwOtXBRVVVVVQd3cEZFItOWdDYGaQyMzMjIg+DwBYJhBCWE5ARBhK/RDmOSIiIaoqES3zt3mEeR2YOYSwvEgpZXn95XGIaAh1Al6vo7poqKrTxOZVdXd0aAQMfATLYIVxp/nLN99+8d03n33zTVEpIkShTU3btk1MzByDiAhR2Gw2bdOFELpmc9F2beBg0HG4DDEh+DiCFALcYwOr4TTto8FxrRyxLhERIYGZiYiqglrdSkRknK7k060EFUQERER293pbdZg/oKqKaSmlfgCi5fXP/i6Qtv4QmZdzlk1x97pfdW7Lpri7AamqiNQtY+blcXUO9TgAlFJKKQHeM5Y5rdHA3QFn9EBwd5yf/3jqZ/dZUKiCy9M3f89ARARcFmLZOvdpQ+tCLL8uz5o29RSZ4RS961UTep/ux/rX5cMCN+sbfv8gAAeA+cwK+hXHDdwQ6jdVHYbhfr+7v78fhkFMRZXICJAAwTwQCRczQBQ3GPqRKDTx0Ddtw+GD6+uL1BRTNGtiCCG6FDRc3miG1EcrsPpcidFC15ZrHdzdicjnVzh7x7Mj01PmbSciMyMiIFrOXK/2GfCcQdETS3q6rUegOL1o2btlDutHENF7EeDs+rPP6ADzHQPRGuZOVsHPX4+I1nT68ZI9HvN9EbHebxrmTkSMxEiBuCLV+oVxZkQ44eu0OE/iwPqVlwV93/H1U04miQhwCgf1oTPVQAeACu5ePwu4ARi4uA1Sdvv9/f397f2u73t1c3dCIwMwNxEiCrEyQipZzMDMIqcuNZGw5XjVbdnBHAOniDCK0rw769WEeUrTq80vaGZqE8c1MzSvq0pE5vZ4m+q2rJfIl5WrD/LjyhCRO8AMALDCmQV+1ov55B6d/bSmejNU+fvoVJ3D+sj3IcAZmh4f6SerQEQ4AwStZKWJaD2a+uOZrd/qibeFJ9Zi/aqBOXAINAk5J1N9jJN+PPr4Wev3fZJKPcaNsye+b6ADwQpYEM3NEdBcIhXVUS2b9nncD30/DJU7O1aRxQoUAkA1IkIgJALEnGUs6qqEkjmT20Wz3cbmxcVFmxonLiIGE8U5vtH87g7HF3SAKlcgYpHi7hUBaL6ikhtEXCjo2YvbxI/95Lbu7jhJShXgkCqkV7kRT9n+Y2p7Bij1yBnowyMEWN+wsp31g87A770IcIbo53g//+UZoBcZcaJzeH4TeA+4rN8KHuPb6YBTpomIKQRmjiEsCLCgGTwF1itW9cRLnS2Tu6/lluUmZ5z05A447cBEKeZ/BMiOy1MQ0NzNvSCNbqOWQUqfx7FkA6wKFfhEQwjAtd4NTYEQAVAM3NGAwHzQIsPwWfkdFm1/9tPr7YUCmDuFSHCCAIbzhE8hb3nxqqtMItCZPDFjy5NQcQp/E9yrgZmZ23pbF+D+/t1fHz+7cP3o+qAzgoWnCiHOOlh9tWW2iPhHIMD81CNghRV7PUp7gIAnYPGYZpxR2RN4fcS2Tm4C6DC9PxEycyAOHBbldTm5vudxexawO92281f2E6KyXtM1AvgjEeh9g3xCAICJQsC8gvXug0sv+ZCHUcqhjH0RdecYmtRNTzQDQzQHBnRQdUdHcDNAJGYyBZHSD8Phbgdqz6+vL7cXmxACcohRRZYJuzvRvAsr4MYZjGCN+Wdi5IoUr0FzvZsLAvjxPucMEycJcKZTAAuPOtvrx8u7HDzbmvUJbsdnneHGMtaw8UcjQL0bnYo0OIt6tCKrOOu7OCup+JT88+jdnvjp8UIQETAxcyIOxAGJiY8nAOqpqObLriwrOz/ojKLAUxre+vgaPp48YRnkE6At5z2ez0Me+rHv8yimfZGx5KLijjFGd3dVMTRXBwdzIAAgMFQ3ByAMzFxci5aibmY39/e/+/Lri6Z7/erlZdfaY6VrkV2RlndxM1jhwHrZF2Mawbx3p8SLjrQWfKWDzm96AtmOJ8C3Bug19K8/nBGykzVcjeOtVBdIg1Mii3gUCJcjfzQC4CNVchF41tBPPhO/p7D5fRD2/XM4mwwgViW46htVXJhAcyWBzI8E8AqNv4d4rxfOVyLQ+yaPp7LTdHD+hvNqQIWVukkAAGAIhrCX3GseVdUtm44qpWgWYWYzMwUEQ3PDam9FntQ2AycOzDGJjQpOHDmhmr25eff11cXF5abrOjAJp5Kez3PmGRTMjE6JOqw3d4ab9Rs9uQLL4qyXdy1oLgiwBsHlw9kTl1/PEGYhPY8p6QL3awxZ/i7Qv74/EYWFQixybRWmF6PK2RSrDXWFDFhN3YvKSw5EtGgEYJOBf7n/YvuvN190lMWmviyZmQFN1tzqB1CxxU5HfNTo692qqd7dqxn48agkalmjRWqieVa4Ytl1MqWU5d39kZa2XqX6VV0rlAQkrjYHtRWVQnMXtwLWl3EYhhG1mBc3MRVTnW3yIYRSVDQjInIIRMyhPkVEDLBCPyIicEwtMzccUEUB7g/9Qz9cXQmFhvyEUvoCRj69xdl4kujA2nI9W+KnV65kKIRS1N2B0N2z6Gywx3qOu6uquj22gNchInhqzl48J4/hcAHX9R3qfjHzsoMVVs2slDLt74wGiybwB3EAWKOm64RAfs4EEJFmbgBHGvCIcp9+Xm8DPFKSFlBbcAPmHa24CivkhEdI/763ezyxs+NHUMDzUU9bDKxwShGZGcwBgACJiByMJuR3QgAwRjMYs+zHYTcc+mBZSjZR1SzmCBQ4QkInZhAm4oBgSIRMyEdxeZoMIQUmZUAjYiYEN0UobgKuCKZPy8FwCv3vW6JFAprM0Kcscf11vWXfN+yc/E/rWbmFT9p5NRs4TA61kyc+muTj4+uDC7FbnLALgJ0gwJN07jH4kk/G3eMazQoVzWgA1QE60fKn5Y0FgNazcffKNNfGrPWb1zOrn5JiwFnpwbNdOUW/U8ptZy9FRLicf2piqhc8QoGJE9bPZ0sfiB29in+hro1P7l9xt/oB/CD5Xb97eHjYbYJI5WtQDf/MTBRUld1DiGYzq2EGXETYyk4diTEYO0MxDsxIrGagWcZRJZGtFcEKr2cL/hgHHq9PFTXh1BIwr+fJNq0XZKFWsFA9OFG9zi6ERwNPjSJPUrezrwjnoF8/hBCqJ3htgTWz8D7U+b3zm1ZnFlhodTo9JU2e/V178tbLiis1BVZ7tjzR3auZHJiapqn+pLPlOHvWekUWhFwPekRFjms3s87jyUQwI8B6oeoHQiQHnzWNBbwMoZgagBGPJrsy3B52d4eHHjfT4xBsUhkZEdzdiJGUmQ0RgB3JmbGM5gZADgoAiF7DPRyUQwhcgzqgl3wofaMpybkMjbN93U+DC9YkZlm9xXpxBIBzZnu+5utnnUMLkuupde6pNT+7A56KA2fvshyfCdbTuLQIq2a2WMzdPTyml3Ws5TxceUBodfLa8wVzzM+i8SyA/r43/L1L8PgEmH31pRQYKcao6cJPJVd45Cc/+bye86MnrgF6OenxRp4AxMowioho7tVUYq4AYK5uZlbcRhMhcOBdGe/6/V2/vx/7EigSp5TqDdWh+m6IiMiZWVURyBGc0AjQBHAKrTBQQEYGQgwQOFIITIbIkDUPeezz0MopBM/TXuSBRfVar9XjJV3j83pTrHqLH93/8d4tK/7kttIp4VsdBH8kEj/e1uWns31Zz3y553oTT0Sgsx09u/sZSKFPlu0q6VZVFx+f9n5B/DHuHZfx9FWX84+aAICq5pwlqFavar2S6n3ofAsdKjteqefHuR03dUUI12+xgPjZfHyWKZdf6yF3d/OqUoupuAl4URnNDfWh37/d3d/1+16y5+gxglJAKkVFxMAZEYEoAJpSiK7qCEDkiFNwHfFiWGaqQWxOIXBgdmZCBxglD3kUiXWP5rCwE7P9Qv7ft1/H0+CIACd879Ty40/yXl9IkoPNt50vr1+rQeWcGjrQKnTiSRBaP6hu69kOLtACs4q8cIOJA5yduh4ruJzBpt59hv71CfDo5Z+853FdHj1ltSarDVhJbJURhRBijE6TbeHMVLxW89fYj4tyMvNKOCUnFQGW6M4nQWG51dqheLI9FRkmPgCzhGGOoO4KNma5P+xvH+4fhkNxo5zr5eIwDEMphZlDTGbGCiEEBXTCRRXhgISMTMRMjESEjAYYoGGmyBQJIyEFUrdRxho0iaduVDgF5ceUaD56NM2dIcDjfcRTmeTJUemlr5QQWAD3KR0aEedQG19Q8eycc3CHE3xY7lMRIISwvEVdliesQD5bJ9eLcmQcp0otIvKRuZ0zTXoab8/nt36Qz2bTs5N9pb+HEMxMYbIO1V/PcHVtJlujwSKS4ermZw86I/nrrTq75H2+xvX50wlMNVa4mPR5PIzDIY/IxDpZ1UBtHEcRCSGEEESkXsUO5s4AhgCIzFztX0izIWHeGiZkphA51tOq4fEUyh+v6mMEWL/mAnO+dpytFucPHxV23nfR2VIfCe5TkhicIh6cbuKTd67h01X6r1J0vX8I4QjoAE7zNUTVKG6IiG5sUClJtc+YCSKiozvZtAnVgAMMQOAIgG4Aj6w58/DJHU4Ok8hQt9O0ICISe7WJKRR1dRMDEQAgYMIQQSUSpBQbjpGZANgcARggVEFGHAAdwRAcqEbnGwKDLBu4kHmxGpjjju7kiNUsZ4juKFjFqsm4aRG8ep0RwNAQnRjc3Vzc0IIDA5KDebWZEyO6AZq47HW8zf3b4f6m9EOA5qKl3nPOVTOjSAhk5IICEQxMTY2VyBGxZiaEdKmqThhi5ClWXhMyujeUutS0HANSJI4aIIfxEtCN0QNAcE/OAZmAqAgiAQYh0wmNAQF3D33btuSoqhBwHDMiXm4v/Jj7oVXmNzNVixRADRADBQMXUzHDOaguYBXQVq4e0OqKcXBzq0o0AGj1t8wWFHdzVXMvcmIyWejXkkByNtgRcIrVn7mGu3skdncrAjPVrq6KsEgvE9w/hT1npPp9xPt9KPg0ApzSyJO70RHLxUzd1U1ViymAByJmTikBGBHZCsWqPKruTwRUrJ77mKKfcV93R6sKzcw3ZluHmRkaVufO6UK5u6spKEzha4ZWgQYUvJg7grvnnEWEiBjYzOaMhklMjwA1g6dShCWfY9mmREFEFHwt2ABASinF1MSUQmTAGhvCzK4GlZRMBpvJhUQxmJmIFhExdarkgNK2Q+aSs6ITEjKr6iCFlicuBhIEYKoiPZkZqrvXuLf1Cp9t7pmZ8uy0J+WohcY/Jv+PH3F2wu/lV+cIsNZilysmVrQ6zU8TAOb3Or7h46mczcBPRdLj2zIBgDqoW9HqH3Uzyyoi4ujIIcbAjKqlXmJwXANHW9j+ZKRGBLAlE+sxAkwLBADuuLCs+YhaFVGmrWKkigCVNfPC1GcWchRSZ4onZgauYEBoZsMw5FGYOTIWM/cpG2uCdcSKABVJAhIRzNCJABApIGJwh5UjnIhijCmlpmkSBwZE8xrtg9OLQFUz1R3RwU1EJomwicGDghu4gQOTofdaTJRSxICebdTiqlX1QiQFNTee5ZRqTqobMWvV8xY/Ajg81UOehJPHsPH4kjMStkKD4yXvE1DXI8AKHR//jEcwf0JRXuPA++5wNvUnkfvsiILXeIEK/VlE3cxRwN2MwRMihWCgpRRDMRODBADkYGbgix3AAGimLRUuj0BzQqXUcNbv4WjfcACwKgTOdGuhBI7IgJMJEKYMQzRHxroJDo7IiIaIjiBa00p8yEVEYowOVsq40P6JD1SQWik2tBoAQEg1TAgBqxYUkKraUAMoAgc0hxqBbIbOCIALChEagCPsyxhjjE3TdI0jjiXvh34c8ygDM+cyqqoJB2I1ISAthV2SawghECNjpTPpuFZeQxfW2738PR7E4/o/BZBHqMBHavHZPdf7eHzQ6eb+3hHOYyqegu81B3hSBEI8hkCfxdm/jwOsQwnWb+UE7uSuai6mWbSaER2mUHud1F+asj9J1c1dHVgBwAHBq/cUABwUkAFqFDXQ7IifXsF88v0uLHg+sExJXYloAnf3GR+AiHxlzkNzcrATcxAZeIW2errgMfuZmRkQ85RqXB31VWclMxWROS/xZJHPrMbm5MCBK21uYqzQWRP2wLyKLITISEQEk7kU3TFtOmTSQAYmZvsy3Pe7vu970a7rRMacc29CRK6FkXLODNikuGm7zWbThAhQJbzlfVdbTIhyEtKyosdHGWkNGyewtgIteIQDj5nAGbifYcj7oo/qOHGEPR4TWVoHA74HQ445AI8Q7zF3A4CaYI6zbnQ8GcjdpAr96upW3NQNERUBHcW8qCAnd1fwYlqjrGYDWKikD4ENfHHF17/T/M2pZkguwfGLO7MGzc/Csrs7TZRYwQmgxjPi4rgxN/BFQyBE9RpcSWv508CRSN1kDtxAxGopQCIkcoCaiIizAUpKqR9qpAbN7mpGcua18Z6IIocUYowxhMBIgI5e88YQJg/N9FBFAAQjiF3XSz70+0MZ+3E85GF3OIzj6DFeMEgu/bAPAxFgzhldy5hjCJumvdzmDHax2YQQAHHUKYiNGMlhjvMGIgKbgvPri8wWYTmDijrW+UzwFAlfgzs8Bf0zJJ/cfKGz74uPDGfIt1g/bZX8doKVK950Qpbw+OvjeT9GsLU7+uw1xE1tEkmLmzqoo7ogoqOhGguGYEBIGEqRYipuiEiARoZadb4pFLFmQfks/BCgnYlhfvTmHKF/HuZe9QFCNPCqky47M6FuxR0AQAzVsozgbkTkSFPWO4FkKaU4ADKrmpiYGdBsp6tGiRAWhuCz/s2ADJPfPaVEImYmdgxrwdmiNYVjzWhGRCiC4MAzs0JQdHW/u7/djf397nDIwyAlq4wll1JAkxDknIe+b2NC9P1ul4fRpXRtWz16hzLeHdqLbtM0DVYdBiBUEoNYy3dUZanO2Rf7gZnaESHXHxbAOyPwT5LmM6A/ObK86LwsdbwXAWgV67L2Eq+ZL5wCvb9fp0bEMw7g79EBFvvGmQy2QJ4YKLgjGaiBF3UiB3R0E58CbZnZshiomTlSRX9nR0ebU1+mNaKKBjPXmkAWAcCOeq9jrWtSk1wrAmCV6ib4rsahaUcR0Ku790gLQuDpLYjMwNzYycDcXEyLapXzTU2LPGbcVR+oViCcBTmcc6wRMMaINcGvuKyykSbwUnUzUgdzYq5CihOCGZIbOiCIuwN+9tUXg5S+SKnauXtxK245D0owjmMZMwSOyKPKvj+AO6dY3PZ52PcH3j0cLi+vLi9D00XwSAjkAbEuHBzD7vFsW3X2e5yR1qN/ZgV7C98+A4/vP3IGkIv69OQ4QYD1Tc0M8Rg4sYB1tU4sdzziySo6+mxTJzlgxWRw8tQc/bWV5pkZhRQcBFd+VgRHNHURMS3kTuiFCQGkFDAYS0kpEwFAQEdCJIYaeOluk9rl01PAbIk1XWhPXXRmVrcamB5iUNKSszMaArhZhUIHc6s5DzGEGIOqllzIoWmapmkKVEpT/Z3mju5GwMw2PpSxFGZu2zYPxsabwGRTxlxKaak1FEK43GzHcUSHrutqRBAzX2y2IUUzG8dRVQmQajkMc3R3NUNjYkTk6iY3xcBZimvpYtx2bXG7efvm25t3tw/3mAKmGMAHKUWKmAJh1273+72IdF1rZrf7vRRttxtyAOLd2BMgA4YQ9OFhPwxDiK9evXqx6RBpzIVtkta4BjM5mk1+BkQEJvZj4h6tsgWX7VgI5cQJVW0VlrYoS7Ai/7gyHNkqC56ZY4wVaGG142uec+IJXsMuEa1MH+v5vQ+Xvm88EqPef9okNVaoJau0yz3E6MaCiCoIjE4Ihobqqm5FNQQjMgOqJonA6FanvXrzyaJ/fIu6UrXelhGYuro5oLsBoTNlK5WPVzMNARIiIKobu2tNgUUwAIWKzzjHI6ETVvOnghVTscmboaZm4I4+W/3P5AE0DyFILtWsjg619AsiljFrkTPhZxmBmZDYALxyHc+oHENsGm7SIY8P/eF2/9DncTQJwCmwz1FxlfOIOQFGDoETE6gqQw28QAYkRDORIiWPo5SAZIExhVqfKyIBYY15Xkm1U4RYZcGPwWAtGpyJG37kJLD+leYKP49Z6B87ApzC/ZolzbA4QcgsGj19oyfhGxH9NHlgGWeca3lPcgBz9CkFtSYSIkDgZCbuiuA8Qwm4q4toFmvUPQIAGABX4CME0+XhXr18hGizvltNQ0AI5sik7gIuUP2T5gCCnlXqNjAgI0VmRgIkgeplJkBwQvSptg8AoE3eDAQEYhcxcFErIsVUxXUpq4aYujbxlMVWGWOlfIxUmGs9lVrypSLKkHMpxdVg5s/VwrMs5orfO5hny92mTdtOAR8eHt7c3dzvd1nFAShwCAHNYmHgwCmmlPaH7CEiYpsaRGQH4VDjVavZp5SSIZdxyDmP7sXUmZj5+YVvm45jAq8c06pOvEiYdSd4Velt2f1F/KNVoibMCFDBHU7J6FpEfxKQ1k/5HtwIj+WWI7yegjKe1hV6PBZh6YyXwSnon036CPo1TAqAnNCk0u91kg3AsSoRIRJwQBoQRFWtAHRA6IgITshEhOZINhWRqKb5qUwDymSCmY1bhO6g6KKujOKWJYupqg42BasRYGBWD5FDICegAGAIRIhEZlZMTSEhExG4IzA4uIuBm01JPGo2B/1Ps2qaJoTgojnnyrITB0Q0UWYmwCbGJqU6ARNdAqJoZbGAGhqA7MFpDr10METg1HKKBb0fxtv9w8NhL24cQmjSsWgkgDVNII4x6ghNCsRYhYds4KxE1HWdu5tLDddj5pwHVS1D2Y/D7e6BKTJSRCLkUHMiVuCEUDVkrK6YRSU4g7o1+NZB1elxKrrg7CpZU1Jc+WfPsOv7EOAcfNdfTw8+SeOfRIPlw1p8enyfNQ4cH2pUZRd0qNJ3/VVU3dQM2AEdGDiwg5C7F9MiYqiz2IbIU6DJ2Yzr2oADz46xygbMQdBFtIAJWDWJZCkiAnHW/h2isag2rIEYG0RFRGSqZQsd3NUshLrfhGiGru5irm7VVFU50CS2KE4+CkSdyyUs8quqMlJoU60K6nMeXJwBYr2kBLDUZaoLi3Mw9uWr5yLycDjcPeweDvvsaoRE1IS2Kg/M3DTdwu2vm02dBhCICLEjJg7YNK2ZqR1LEGC1LUYi4qJyGIeGQwDk2MbA86YiAyjCEkWGc0b4+b6cKgNraH58pq9CmqvQv0aDNZF9jGlnIzx+AJwiU51ZDaN/313gFD2O23BqHl2PM9503MtaS8KOHhB2MkaTAlbPIQJiJAYHTuoPWlSiuLuh4VTODMzEvRKiKcC+pu3TpJkC4wkOiOigJUvJplnKfhxyzuKWKPpyvpmiikhEUtUSo8amCbHWXpy5NpoBghmCuqmpmSm6iNRNry4w1kjkjlPSfXUCLPSiSvkpxE3bNk0TiFU1ICk4huDuZVX8o9LjlFLkwMwIWC25lX7Frt3f3r67v725e8hSHBGZDDyGWLOWW44pBHTQIqLStpuqkxSV7GOoAliKIZCY5mwGIjVxX8iRustLV3NAMR1ybikIRyICQDSfPfFus4+2xuuvKfq0748K55zB23JV/WlJfseV43yR1f9oDvAk+MIC/SuF+H03WkD8sSPshMCvSNca+o+LMl/u1QA/z4uRnBx1quockAjdEEXE1YaSa2YtmiFwpaBQFWIEJJuylOdMGVj8VvMExHTI434csmlRqYV6kEhM66zMgBHVlRyyQ9/3bWq0FWu7igO12huaO4GpzggwZQYUFTGtAhlhQJT6jjX+eRGCpxIYKO4eQqhg7e7kAJX187zf80rGGKttFHGqTkBERFx1g7uH+ze3N2/evdv3A8eY2gYRVS3GGDm0sW1DJEBXM8AAWCtOA8CYM6nHOoeUhtyr192hgGTM9aExJVMlm8yXWaTiJKBPRtrqPl+Zj5+EtzO/0AItcurwOuMAi4HIlzoRfoJav18HeAymxysRjozpD9aw1+C+xtqzcTanE2Jw+qz6zjUABpjJJzsXATBMtLNCv0+hoHgMInCoeTNL5aJJE5jJfx2GU03wcRxHldGkqAAiM6l7FbQBQMzQvVrZdcxahBEjh4AEVZLG6dXMvYaXuXsV+vVozjvif604S6sk4xhjIFYWLbJEyJkZzAkcBeaqjDNtqnEQWkQcAhIwEVEKMYUYQvjV29++vXl3v3so6psUuVYSQK3W2G27bTh4UYUCBtCEGGKV/kFNmANxapq2bYfxUH11dcKRIhHFGA3VK/1eCTDM7FALxWMNW6ov/BiQFmq4lgtgbf9ZyTBr/FnQ4ExDgD+WAzDOSmIF9yWjDADdoLpycHoFBEiBK0+rUuBEtKZnV/vi2p4FNBcyqEUvJgkS6bGjrX7VZCKg6IZYFEZEAVVABUPyGEIAiDExBxIrkgNbP+yZYV96LDHFDXN8yJJCNFFwj4aJMNTEHbABhYhAqxMWOLAgFNO969sy3Ay9gQdixhgAg3BPY800AUJzEFFxcTCMFmHsBz+YPO+2l6nrYgoUdiwIwFDlegdXcx3Rd5pvfbhHQWZDHTwbKDNtmYNqCvH5s+cmampYNHIa2KKRiQO5GmgNZyPK46hO6kQGCBQ5RCMYTYbhYnvZGHopqeOX220g3u12vxvGh1EKNxfbrkstZ0hAXdhcwuYSN9d8wUiCpdg4hXmHqKJi2oBDDDW0EE1DCP0wiAgFDqk1syGPLpIoGWuigAY564uuvdhcTCwLHMCQHM0QlcDdXQmnqFE3UAuAxBw5wJwsT+5MpItInAsBRFjy9ZYQ4BnYwQncwWdkyZXR1kIubuoGtSImTFLyZBlXqJl03zvWmsBy5AyDYWXV+X4RaEFHe2RMPROBjgbjKrCDuylVBXc+ofJXdTOdJBp1FLdKbEcvLo7qTkoOtaoHG9ag/OovV/Aqu4tb3/elFDsy24nqGzi5myo42mrmlWnknHuFBBSAAjMRTWZf81MrGri7iWopCI48uQIjhxrAk2IKSBjB3ckn91AIiUKoy1V0IrRNSiJCPrEUEUFzQ61p9Y7AMSDTWMpoYz8OMmYEqHGfTWqiYwLqYlMzBxbtbhHBOQREdAWZilg51PhcszpnQCylmBk6pBDbbasiyQlEyaQKn+LQUJhWYCb8S+J4hYkFjhnAV0ot0LEE8hrYFoHiD5HDzU4KFh1v/ujM31Me/bGstr7JGfF+/GH9+UxWWyyqC+gvyDYFmnu15yt4FcGNkQAMMdDKQVFFdENQdzNQcSdTMUYyEVdzoNBycoAKtdOyel16M1OwUsr9fjdKdndDqOYjrZGp1dTt4OLmPvF1d1N1dVUHUnZIxG1MU0HPukpVZnUgB0ZsQkTzMmZyD6kqoNzE2LZttT8GpOr9ACdCBHekAADZZO37jDXojM3MqllMHZCAKGQpiBhbNsK7w2489H3fj8Ogqg2FxIGR2IGZUwhd16WYQgjok+5RoZBq0iA4gQVDIwAiBRARQ6gpDTLX4WuaZtN1kktwdCguvlhyiKimRCMguRuAufvcL2Da9EfkkogWGmRWqfcpRM1C5vvgdibWJ4oEzUUSHl/2ezjAcos1E3jy1/WR9Yc1cMMKrd/3KzoQAiPWiDCudjRwMkd0RKDZJzfhIrMjEwZHEnUHZSd1Q2Qx8GIYYLINMCOCF1eAapZAd3MbtYw5jyUrOjAhgpqDA6EXVaGJY6ubrkz4UpTMgwG5JbVsLoBGjFbj5IGmAGlgJCbYtB0TuSgEJsBEDAQpxCamqtMf38hdazMVP9ZFi8Q1LaaGsjKSMxlOhSiJp0kmstg2yNzfHW4e7oZhQILIoYkpENeSNolDZQiRQ30oYg1SAKrZDYRExMDI7IwCXkyKSCmlqNStpTkFhw3AMQACs3OI9d8qs7Dy6sls7+40qUGLX+BJ6HKfysksHti68meF4h6PxZW2El4IEeYkYKhKoM+y+veVRnwS6M+40jLORKAzwv/k5eegX60BgAYYgRghEaqTAYBbwMn0SeCEToC1aGhMbWycuAGM5lP1AnMUA1E3FXeWqOIQ3KlWWJhyUaFKNGPOh3FQcEAkJq3KQU08MFe0GiBUTG3hUURZCjsisRMaoSIIgxDElXCHAAHJySNAw4HUXTQ4djHVN23bFqY0SyBAnsFi2khDcCcHJkIEYELEQAREgVAEa+I8z/GkQEghUIrmftCy1yF7bkIKzE2IkUPi0HG86raX3SZxCDAlfuNc2wYRRbW6uBGRmCBwllxKGaWM41hUQghN01TTUyBCNTInIgYy5jBzALMlP8zmOFBzdzsFksmdtyTBIrj5OpN/XcjMZ9vu9+MAnHqxELFaxipvXgMhfo8ItAiFT4LydNdTTFh/XT6ciVK4ck88Jv8AgIAMyITJuThEQDdFZGRHdDYkxIDEgRCCx9jipSjG1DIFQ0JHRwCcwtpykYZslNSoFmZyhYAINUPRAbyojSX34wCIwFgVA0YERjNVFWWvATylFjBEICIGVDdCrrBSwEbXbDq6pmnpj3vGiIGQDUAUijLRpmlr3dyuaWuattWo4hAYiWYzrhmgqrMbAtnka4tcm0FFYcmYReeqKiLMKGD7sc853/b3uzyoKghX7xMBJA6bpt12Xde0qLbAVgWOSbA0m6QURGRyQhHZ7fc556Jic1maGq9BgOyASA0HdFMx9+rx8IBUEaAScvMamOo6BWtU0MepguqqUM+ELjNa4lEJreu5lIh9GgcWeKN1UX5Y7nIezfZ9CLD8Xe77pBrxPdAPq2jnNaNY7nmGAwBAbgaVHFIiMEBHIHdiQgck53nPOBCYdHErYhQicAAnBWdkZnQ1Q6ypYoubRMEjkbo61gg2F9NRyjCOjqBuxVxMkUNkNrWiKg4GXoP1i+myvokDIAr4aIIF93lo8kApXsQWp8B4ZJ8MYUAYQ2CkAJiQmxDrykymD3NDi8w17KciQH0QuQdnRBR0clc3cGciZg7MhJ7zREoMNKTo7ne7u4fdroY8QCAUQ3SKEIEicZqs/kpI5FNg35owgVWH+lTUQ1X7cbh9uB9LVvcQJyOpiaKDRyJzAqzFipCYAM3MRGudqyr5iJn5VKRXEIIBMjvUlpUENCV5VuiXCv/VhkMIcuLfrUrE93CAhbYuWaD13Dmg6ByAf48ItMDxkwjwPeOMb6wx5OzX858AauVXBiRwRuCpCQMTgKvgTBuYHCxE5jDmWufdFKbinAiEzHOLSEQEpprDW1+s6rI1hFNVs4qwm3ldfaejw8VtSi/xWu/QJtDsUusO5j4WMfUQYl9KkAJN56etARkQkLrUNBwCcSBmQDPTIedgTdM4OcGUYOhqXjObzao4V6kgEYgZOWkuAYk5BCYMcUrfIQ+pi01S0P5h6CUremgTx8gDpJQ2selSs01tEyMjmWiK/JjDuzsQevXqERqAuA3juO8P6l4dDjwHGJMDcnBRAuSIhJNQWpe3qNZbzv6ZyWNlZspIs5sVVxEQMNtIaipflfwcjvkDj6Hl8ViLN/MmrrwKj0Y4uwZmqr+21TzGAZzsdGHx1NRiVfVtl1daf16cU5Xlrf0AaxaBAAHIAQopOpI7E1TvjmpxJwJXlZwzMHEMrnmz2ai6FKtokHN294BTX87AVIMVqs84Ii+PLqWMkoFwu93u93cHGRWp6boYY8lipk3T9HqoQTg4iwpLwGYeRgJoU4OAu6HXOyhgzwyeXz9TFVNpuo3moiIXV8/vb99qkW3XvXr2nDk8HB5sbrBJ83pWNQCRGElBsymohRgAsahWl08ItWSTMTGGyEhqxcCZuE2xFx+GIecc2hhiLKVctxcppTY1XUhNjG1IAdBVx35oUgopBSR1KDghdtM0WQUAYpMEfNz3+6HPIi8/eFUXTVUdICEzEbgHpLZtu9QQoIfoajnnFMJYShVXFt5ba8wQnSDe9IEoYO0obIg41ZhQRcQ4d6tYOMDCHuu1PBcCrIayJVDKp2gAqLkJNdcC1rzu+5XgBe7PxJsaVIxLiMSCZadX/d7xvtPMxBxrzWeuaAYu4OTuMEUuGIKBGhKhNSEqhoxiepwwA2oRd+OAMdRUcWAke4pCwOy8BAERgXEkAJqzCgNxNa5O7liHSNxUi6eauyMTOIxStD+I2+vQbKVEqPFFUkXanDMjdanJACklJApEkzpoNhlfahaCGcPk82J0J69BBZOsDBCnpjgAZmBGpgDIgGYy9P1h6MdxVBUrAIhO2GDoYtOltokxUWBEdjRAJKzVU/zYQmnSJWKKgemQh3f3t7cP947w4tXLqqdWVzov5Ntrww+1YCFEIjI2k9KX7O5LUkDN0q73V9MAILi4hpGqdwpg0n0nGeAcMNaTPDsOT8km+AfoyvAH9giDFRNYuATN6TywcvngU46ws5usIW+54XJE3WxZGiIGM0eGKtIjoKlDcHcEA1VkpskNXcuogCIiOmKRHBFSbLoUU2BmInTGKRC6aklzSAUCQAiBRqxVq1CtDTE4AgBzZHYSU9NqOyUKISRGJgruDhTQwUH2w7gfxvvN5cXFxWXTMVOtekVEIsKIbdt6jE1MZSogAtWdRAGJcfla4/6JyCfPOc1JzYi1alBNfZJSoSUQIQMS7Yb+sNurlAku3WNKHbeX3WbbbSKHQBSBEKoqddwCWHmLAiESFbPDMOz3+8M4QOCuax4OewZ0B6wRQVSrTgAtrZpDYGYTGbTkUhDxmAfvvjjB7PhEByRDVNBqn7dV5hfMzfbW4wygJ9BfHT+KOrPcDwCLRPrk+D1+gLX49YeftvCgs3POqO/62rPPTsgIDmRAZm6mizQJVXZ3xurVNUOAQGxkqK4mYNV6YCGmTZu6JjXMhE5uiOaGs3USI3OQKZc8cSAiEy1eCDAAEjI6EEFAcg4KKC7iBuaulkVnDosUGIXLoOM43u4ftvvtJjUxRDMgpgBUPUdd26I7Eck4VjGspjVOrihEsFqY8ZjxhDNn8Jr7S1ihH0ytVnslToFDCKFJ4ziiaiSGhEYYInddc0HdRbvZbraBCMWqV50rfMAxeC7MESsc+TD0t7uHfX9ApnbTDVJ2/aGUUgDIgQwiUgjIjMyckGtBrioDZxFx08nCPAXDzuYfAICaCm5oTks/Kte6qj5HT1X4f2Q78fVYhJlT5nAUW2aUnlWKPxIB1uLBKYAesW05vtZOzj6sBa/HHAAfazZEAI7oNZ6RwcGyu+ec1cXMCEEBCwhacAAzZw5EEIjFi892lTaFNsU2xRRDQAMTNzdXxFQrLACSky9OKHIMxCEEHUcroiSKDmaFAQBqwAIRQc5WRDADgKvW8mz1ddRtyOO7+7sQQhfTq4vnPCfEVV2oaVtwF0SbOwC4+5JvWeMOplhX4iwKtdRXTW1Qc6yVvwRgKqc6JamFkFJKqYmBA3ETIhMoWYypTU3Hbdc0XUzkYKTgtV7AihtXlWyOOhtEcs45Z3UjZgIrKof+gERY/YcGgack5jY1KFqtogZ+GPq+78WFiETVasWaGgU4c4AGCBGr3LWu4FkL+q370fspoTwF/iME2qkItKbCR+vq+/IY/0AOcAbNZz8tXxfuc4a1Zyi0CFFnd5tOY0dDRzcEJ3Q1Vc1F+nFwV0TkQGpIjsUUrDA0SAbIbmKqpiWEkAI1MbUpMFOoVNTN3dwUa6UoREaGOY+kltyJMW6attJaVR1VvMgYpkCamjmlUzsjrRBcq+RWw0UNSn13e5OHYRPSVXMRKJh64gAIHGPXdS6Sy2hmHGOM0UTY5yoBSBgwEi89j6uvwMGrdDyZ52cQqcpwoKloikqpERkGjmyBY0ypCbELKRDXuhluVrHdZt/ZFAtEtFRCv7+/N/AYYwDb9/vd2OeSYd4dZgpM1YFdKxHpmKuxX0T2Q3/oD46WUjI3cRUT0RqoO8k5TLGWP1AiAid3cQsGNex8gm+cKievAfpJHcB9jus6szcejUzHVvX/GARYoHO5xWIXwnUZlfc84DGSwCkHeHLobD5TBxHJOQ95GIZDjQVoKRqQmRV3AEOMImKW+8OwH3p3jMQhUGoCISJYrS2ORObG6AKADusuIVN/VbMmxO12CwAlZ1CTXPIwZgZvLIXIIVYjpjowkq6qD9hS9YBod9g/3N2/2F795PUPkKO7OSEz17+mUkoRkYoA2T3i5D0lokgcQqhlHSqBqIpCLWnAgMyMc4onOyAYOriaFpFRwDxyKCBAzLVUbggxRnKoacSEWP1XNOfm2spdk3Ouc6udHksp4ziO4yhoRCRukTiG2HDsaCrfq6WAT5g/lWtGhJq+5y5mRVVqwS+fxEURoYp4RuoA6GGOLKy7v1Z/q0KMpykvM5LMHtXT4IjHHOD7xx+EAOtxpBynSvBjJMNTNeCxtIOnWsF0ZDIZmBOam6qISCml2j1jDOrB3W2qvWZqWYlLKfvD7rDvmVlTJOwCElVnE1Y6D0SgfnTCV9CfA7nBzDiFhqiUYqomZXqu+uKmWd6XmXOfIQZaiZ7VTF72u+GwPwy9IyAzqjMzxSgm7l7xuSLATFMZ5xWo8aFEDKcccllqRJw63HgVxx0AXE1dchm9mmiNkCjF2KSGOEwVHUVNNXGopR1gRrDl3evLjuPYNE0/DofDoR96cav1VURV3SJxNQFX5llK0VI6JK8tTQlTSmqWZXR3XcW0mVntDIOAqmpzCZwamWyIugbiU8EbHymTT44T8n8Kq98/AphXPjiN2TjAkxvI3FyWPUBKMH1gJHJAnRy9pgbutWPkCQqmqYJ2nYuDGyiZ4hzQW1XAxWl3VWIhKEQ70r2WB9cDk6Rm8F1QDAhGKkTqaCjuziUyA0C8fRi+/vLry+32+vJZ4uRqMcTIHADJnJFUxMfMQWvAL5AX14zqZAGgI85Z1GTDHDbdGFmDUwJ+e08hlKFXVEWQBjwEIYLNph9GtrwBbDF4lngYu0P+m44/vPigdJtxPzyLV8FhP4w7zjcb/A2N/QZLavNnuxdvy581V9ux+fc/8Fg07AuzheeNBtcyvMQmDV7Mv2D55oIObas39uGd/QS3N80dgBqBollgZduDiMigo6oCI4emoRghtjlGjyVpCIECIyKFQIFrZnuMsUYHZpU+5z6P2YoHvGvz3fhwV+4HHcXU3NwxBoKiARwdxPQAxoiBIjMzcuratNmollFGQwF0QzVVQSjkI/qIoB44Bmb+IDZLJAUAKoCjG0CRUqU+qjGpczWnQeY+zUyO4DrV1LBVh54jqoCXObLJDHKeekEwkrsREgdAkVqdLsYahHuKJWeIeIZJR7FhBll4ivb/IWMtmS239TkaOasMJQ9W+pyHXFQVaO656e7u5FMp8+p6HIbh9vb2iy++6Lru4mITY3x5faXuwV3cGHz26q7qusGJ4FirIwSqzkhUtxCCqrbPrmuDRz0cjBBjCNwEYndX5lo1vyoVwESBO+Lqah2Goc9jB4iMATA6x0Hu73cw6HNqtoUe3r65f7d7/tFPm6a5yfc9qVhphK9jG42VRRgZeaOAB1XBEEJuuGkad8+a1VxFoBaNQSyl1PhWmu1IU50fkUgcYqxtm2quPccYm0ZMi4rM5eRFZCzlm/u3fd+XIjXa0N1VtJQCAFXUAXOYI4UYsOnaRIxulcnUEkYMYIFI3dmVzB0VAYkDB1/B0qTq1q9MS41Kd1cEXjSPOYl+bSd9DHvTT++BN8RjnuAUyETk7mGR4+stlh5jT4o0sHKBnUnA/wgEqLNZh/sBgCKIaq/lUIZdGfdSctHFJjhf7FCL6XuN8ZBx7N+8efPr3/4mRX52dXGx7S42LSIwAtQ4LCKq1muoVjYDOzbfJiIGDoRWSzkb1/129zZxERlKRvfqjWIDFOPABuRzBTslcCZP4dJD56hl3O/3D+2e2hY5BKV4KM9ykN7TwC+sxbvbu198qe/22w+7q3/6o7tNY1uSpomjbEIqu94SDQhkfDFqU1QKItN3OF4V5RBiaFQGUdOi5l5UchZEZAxWwYkjMxMFG8RjYiSYk4mRsNbAG6WMOWcp2XWQsh+HYRjuD/elFHWPMQJNGaeqWjusqGq1zgTimnRcc2sCQFFDsYgEDAA0SmE3FkM1ECMEBmcGRPS19jjDwySS1QrbZkS1ZN6EErVA2KQJ4GS98urN9RM+cKw79wiGK8jV2jNVrFXVIwd4EqV8TuZfo8GCAMs51a73JKC/57ZwVBzxuBSIKK69S1/ybux3ZRxUS62DQkRe805xyat0qEkCkwny9vadmX393bcvP3x5eXlpmw12GJFwbhlQ2+fMrk9bI0AAnkztAMpuOCGAmyNw4oaIOAYgqrUQIxKZq5qgGFFxU0YLtCkY3SWXh353N27bJiZgLNqaf2TtVjZhkMsDvvviXn/7tsnW/7tf4Mvn/kHoGQcdIQu3ZIiQQjFBhU0BGjFjeJOH39y9fbbbvXj18ur5VQipqEuRIY9D7t2dOdZO2j71mwroQFVNduEadwwA5MZ++3Br4LUCwCGP+/7wcNiP48jMRcXFshQAyCK11vwCrLUES+KQUmpiakPsKCB5dktAjgHJ1c1AAYwdo6N6LRWGcfZv+qKhIqCDzQhgtZwqTO4rmjVdm2vHwyn5x1UViTMYW85Zfn2ScP+ejLA1HOMppH7PyU+jweksj1i71vrdB9Cs0pc8SBm1ejuZCEEdAWuFwBQCcahFkGcLJqcUKHC/H27ub968e3t9fQ0EKSWKCQCW7u010GvyzcyRS8zACGqAXkMPPBJbiO4+lgyODFzj4AkwY6mpMOIgagKCiIOWDKaMaXA2H2F8198/Hy8v4YrAcbQXqd3f3Y+/vcXbIQwcv7jfvBuvuPn6t9/R13ebi2dvY+5Bo9Lg0nSNENhhJLHGEIEAaXf/8F9+8V/wi69+9rOf/eSf/PTy6soJ3DFL2R2Gtm251soAgppT5GBuKURXK2OG2lMMsZi6637sOUZjLGK7sb/d3e8Oh6Ki7UlmetVgGbDmSEze6Op8CLFJKTgGYkDjKdMzAJgX26SmKBMwTW11KIREHNYiNMx2TJxqFwA61gq+U5sT9+oGUbclnI6mnm1TvwifCkEcO67CI4vLpAnMRQYqT6s/Hduk+qOIufdJQTDLZPgo3P9p0H+KA5zdfEHTDDaCZbRsml0VkAgBCVSJKITQhNjEFEIwUHcHjojYNM2zZ1cvP3yJ71Dc7u7v9+Nhk7vRhI3FPIAHZqyLN1VtQDObc8U41Dqe6lJxgNlrHWZGyFmLuBoUjSml1FpIZoZiTlbMMmhWya6KRqbmvs/yZk/PxmcvUBrEpHqh/PYfvnr7t79s70uLXfz2/mKvEbL6nf3u7Q9+/BrNbxpvN+FmPFxyaj2kbGpQAgqAgD/c3X3+n39+95vfHA5DLvrhp6+vXl7HTWKKtaBe9UyAOdZ8OgomGgKIlKzFXau7yt2N0ciR3QlGKPt8uB92hzKYWT7UdMcQOag7igD44rkLRIGYavjD7D92UFBXraF7DECG7gCBOLJqiG6g1fi08NsZASbr54JyhARzdURwdytzMGntpwdQK90AAHhNcMWl8McJ6OIj89GiZy6tGIgoLFGfE/SfFjb0OSVnGbZIZTXiG+rio58FAeH873vHGuUmTodgBAJewOYcXKNZTwjMTUptRQCXGlur5k0Kr169+uEPf9htN5eXF5hY3RW9uA1aoCgjNQiJw0InFsNynUGkYOCkighUox0jRuJiGIiEChvEEBoKKUZENPeABAAHzTUsQkzVvRpS+5wF/TrvP3G7JmiQ3n3+zXc//0y/eNdZC1nxpk+9mJbC48OvfvfBDz99duf8ehs/vfr27Zumu+oAYjFI8Z79FoUU+4f98JuvD/v+y8++KKKvb9/96M9+8tEPPq5l423yHWFt4DxRO3ORUoWH7D6WXFSAKcSoBFnLWOThsN/lIZsqAjDV3krqDrPIQUQxcNe2kUPilJADUkAKjmBuQcVUSz4MvZQM3IRAs6Y7oQ0z12AWcGXkJRwUnrSVEwDU+q1T2+Z5o45G2zPSDusgGjsxta9PWNROOIYJnTbKPoHI95N/XwK5norn+QPHYyFqmuvcxMoRFFzNCcwQGWpgGFVfKRObz21hzJjp8uri9esPQwqxbbqug8hGaOBZxFQiUAjBI4NrTY03B51xdkqwNA1znHaNiSdARmXEFGJ0TMgNh+o8jjFCrco4GktZ5k8MJnbQMo72UHIP6syM9Pkvf7v76t01pk+2z3L/MO4Hz2Kqm6tm95uv9n/zy3f4UP7s9cv4T6kfr6472o2k7ol3yb51aQxMJNwN28uLcSxffPFFL+Pm2cWL1y8xBlUVm3L2eRYwGAGRRIpNLQJ8LHkomVLsEonaoGU/9Pf9vi+jMdRA5RCanLMUraUf3D3Gpk1N27YNhcgpIrFBzWoIgAYgZqOWIQ+iOVpgpJqNgeCungiNEIwnD7zDUh2nQtli/FkDxrKealOzXT+FlunD6vgaPh+LQDiX36tyBM7dgsOCCjNyzAgwx1UvLoKFg8BKJVgesAhFa36CiDpzGDdbikMt9fHWd65DtNRGEnpaK7Oa9VLNuQ4hcXAncQPXTYqHMgbCl6+epy4ix267SW3T5/Gb776LSF3TPru4ZGY1k1o1HAGRAgd0A1O3iRczcxsiMxt4znlQTSGAOamzQ81LjsRMtOk21Q5Sq8cdxgHF3BSIxiKOAMSjlBBTjM3Nt1+yIg6Cg2c57O/u0S126eHhISnY3eHNf/6ltz7sdh/+kz+7vHzW7iWmVpm/8vEbyXDV4b0cdns9DHvA1x9/dCj97mGPyCI2ln2z6bquqz7Urmu62EouY1EiIjCKJCK7/jCWQk3kiKPlt3e3FFgRICIUENVcsphGbRCRAoOhK9RWAMy87TYBKUIgr42HMYbQNW2WnlBDjNtnV1oyBRRTMXUEDByYjJEdJBcEB+aAVCOo3acgPENw8KoE6yqPeBHc10cqczvjAOvRpU5XvIuI1mUna9G7NR/4PeHQj+n0up0LriaxNCTC5afZguSz1I9znOPUO2r96zzW+LAciUDkyIhT5G3NCUEMjk7oiKQemDdNq24CTgTuql6p+MrwbwYBwQEBrFrQp6ARQFjI0lTamAAjUCJCcEBngwAYoHakQDfrUhNSDCHknEceQoMU+P7dmzY14FSGsv/u/t3n3/z00+sfvfrk//XZvx3vHq6tA3IDFRAnhIbaoWTRpPAydO3et796Ey6SPO/hhx/0Deq2aygO/Xi4vd/v99WUU/eypqUDIjiEEJCRkRBBRLIPHFonNlEFBzet4R8pKPjDcBj38tAfvGYz59znsfZHyiKJCwaOM2DULsYVaBygGn4JgB1jjePeNGZWyihzHiMwMrKqsh+bvhBj7VJItZPPHFbt7lOgFrp77VM+EXafW+8AIhAtIgnNsOFzpZOFD/gMML4yED2pDyyw/cclxJwhwxqC1yxs/RPgZH+lmnQCAACmcuR0cJwlAFQQr1wJzZGBkQIHKlbL86dQEwvJ3Q2BGCUXAouETQrZI6kDobqBSXFChJqVWzWcyqfqaq0FRAQI9bE4YwQxsisDooM5ErAtwXNkZimEyBEv0c0i0u3D/TCOV+3F7bvb4dubi/biz3746cd8PXz29ruHcvjyHR0KNp2ZZc076RmKeNkO2ku+H/tntIE3D7v/79/pNt28bD559T/eB++VSP1idMkQY8TrDeXBRAGhbdumiQ5arNSe2KkJwdmKigDEREiuOspAgZ2QGg6Og5aH/e6+39cawH3J1bpSps6UKmLRIiRk5hqaWhdHVQMhmIMBOgSmwByRhGi/3x92O3QNIYAr15xVBzQFDuTABJEDACJRVJzqHgDiHCM6VXOfYbnKSEtQED5V2WqBn8ewukgNUzn6R9aa9d9/TFmURcDyR2F659APADTNY1F+KrAvfoNzDlBjg2vFcTOYYh6ZEaZeiBxq/rh5zaWSiBAQwNVEy5gLGENSREAPdDLQoSYcA4AB1CSX+roE6EQBYErnRmAiYG4QEN3J0QF4KllXWVvOGRFTCB8/f3URms8H+erNnffjlcZIVxcSt98Nw83nGbpX169/sH0eP9i8DF1DWMZLZxMzy3YtaQh2Mx6iXY13u3d3v4LA317Tj/8X/4q3hEOhftyMwNw2TbPjmjgsKVC3aSevcB4pcUdtCCECmWr1e5ipStmP/YY3ITYRQaUUGQcthzyOJT/0h34cEJFigLl6KTHP3SunzOgq37oopYCIBF53JFaft7oOWYaxiRwR0ADcqBapBkRHRmCOBdwBiSjVInBEtQIMAepURq92lgEHQCKt/TUQzaaIOJqKXjsAuDnzxCqsusNWlLru9eI6eBKMFxz4/Qhw1vd3jQNrCF6Q4cgxZ8lnEuBOw0iPjjM/QSoyJwN2YKvh786IiZgDNTFOocI+pTgQoJkGokiMamO/398/FIbUSIMbjmlx+zESIiGAutQW6mY+9VcFmJIQHb2WcvDaZ4aQAKa+53Njsdk3ibVTnRobdF3Xbq/g5Ycp29sv3x4O980efvzsxb+8/lG3K+9+8fnP7359BYEEpRyK6iijoRXpcxmfvfoxtM+/efN5XzIy9bf7iFR6GX/z9Yvu49RevMlSdj0UL8P47u72BacQQmxi13VNG2v7R5Gcy5AiB441BlB0cCPLBcgxUtOl0WQYx8OwH8tgoPt8eNjf74e+SlPuXqsMdZtnAIBAZsa1cEQggymlIRITeDCo0O9q7N4gYWo3Tera5FpEpBbrJAyAaMgGYDg1e2jiCW01cHasaWIMYLPYQnPJf9G5VQzM0o57xZwK/TSbGucK/ng2Fqhbg/4yfn9CzNlwBJiLB9YxuandCAkn94Q7TO5rFz1DFUTkqYym1+vXM6tdQQNxZGZBRopAjNREqvXva3gjWJUikcwDQsMUCM1szP2olksJqaGGmiamkKgWkS5Se3fW56p6UalmATL0iIhIPhWMIAcAZGIoZjPX8lWwrtfsXjEvUnb9tml/9PL1RxfP/uNX//6DTz96/ZOLq4HKP7z95u9/gzf9q+5y3N2olpyHrEPlzzkP43CwH3Y//MHr//ruy2G3T4wHtgTgY/n1//PffiB/9em/+PNA3TcwPowHySU5ElEbQ2gTIyBijCE0TAyjjDQApbZBdvBhKNEIHZrrTdO1Hqjf5/v97u6w249Ddq3vsmDykvg/jKOamXrtPd+EGCjWuJxAnFLCokxQayq6qg19h7xtt00TA9fuaeSuDOjEiOhAQKQ+gXvwGfRxBgkEr0lhADpra7yQVJAzEWMC99M2YSsX79HLuwhOuJI4FlCsf/8AJdiP1zwpET0ey/Ga+bA83hbLEjMuTmw8wTdyDLO1J3FQnkaq5Zgq8RZ1d2JARK4t9Ii71HRNm0Lcjw99yc+ePQshtG0bMbr6OGYoFogpTnH8RSdLHxEFpkVXxkV4Q0RErhNEMoRa5X7u31f5GIgYI4YIDXGA8L/85//tN7/6/O4XX+y+fgjf7i97u4YuHpQGgxh2QEWdEyCyGXgK+5b+8mc/fvYf/v3N/S1uu3sq0dFKufkPv2gwRgjpJx+kJhXau9lzbouXSjuGYRiGwwYuU0oQHRFLGQeEttkgeimFgLabTehaYxz6/u7h/m73sB/6Yqo1IIqQY6gF2WsfxcShQGTmmhKwUKtqNwwhpBABiNVrsWERgT5vuq5rEzm4CQBDANDZz4WEwEDs7rWTZc2hgzmTq1qiDZzMHIHrwRUUGeDRCnSsb4U1m9Qdpr4nc3SMroRqWjVTWgiun4ro/3glGOBcfH/y19rf88jyzJZyPRVDlvDp+iwmCkahGvuZAYkr58Upd9GPanONSwFFJKKmaS63Xdu2sH/I41h3rokxQihlzJIBVYlTYABcBLOplgYazj1MjsuHSIjmwLgUJZ098O5Y66yMJbg3TWooDLv9u2+/e/iv391+9lW3049y2Apde2hGu33zHUXiiwQieRhczF36w0615C7wn3xyfXF5d/tOwO9RgkMr8qHy8Msv315d4FWEHzxTRs2FRpEoZmal7Pf7w+HwzIwZnTBG1lzqTpM7oBFR27bOXET2+/3Dw8MwDIsFcBzHWimEmWuV3FrgRDBhraWjk92i5jlMgYDMaIBuVTkws4B42W22m1ZEyjhERiYAtYpR6oiTxgtSi4TOCEBzUoeCE0zNZWs54pr2NgEooplV8WwtPy9W0TXwVCDzVQHmioe1AeFy8lE+Bwhr+z1UG8gUio2LdI41AYAZES8oqKqKIiIxIVIxdZvq/0jNXvWplJq7A5Kai2hwCAETM0LtuklmBiYuWkupEWFA2qGKQcPhRbttncaSG/NtZACg5J5AidzRzchBAAoRpYgeVUtxYObLbsPMATyAMhmhKAxDyZbSJnU48OaiG4ZB+oyE+ZDV7IOPOmBomjRKkZy5IXHrc8/I3GBwiEWhN0aiTZubuGfLm3a/63nUj3nzya67+vx+97e/DD//7ANtVRPICDKC5AcZbvKgXW6aqNb3+UBkQHE32E5DCJv47g5++/nbm2+7bbu/u38+mOz7iLzrD92m1c++vv77539y/ezbbD8Pwy9/1v347eWgFFJUCIBdwxeHXd5cbrrQGgykkneHy6Z7/ux5EyKDf1NyzvlhHPaqI8JorgRI/Or1R5fjuLt/6Pu+gF50myYmAGi5RaxluJXUo/OFwlbhw6vLi9hcpBBTKmPWUgCkSfTx9SsAMCkE0DTNRNoDAMUK5ae0Fg3nLpEwUbJQq3JM7BcrALpPgQ+DF0DywFUq0vmXQOxIVQVaKua6Ki19Bqr04m7gQlgRsp7MWJ902if4SUq/PliRYcG8tZLgs29rHZAEq89r6QhW1s/p6+w9UPCp9xNCcKu9g2KMtTZyjTwxM7dVqW1CESuqABQ4NU3XFQ1t17abGBvCwMRN0zGGJsQYG6YAhLWlKRo6ADKZGbqDeUBSZldH1YgUOYgZEtXOIUCEMYQQNgz3d4dnnK7brnvXv/3tF29/8ZV++Z2XYgKgBq5oijB1r5FSYmRzEzdxmNs2ACA+DIf7fj+qFc2DCCJgDBGjuxOAjeX2mzf2zZvrF80n22fQGytiMQjuCqoqLkpQXMh6lTGoNrFtQuhSkwxdFMDAnBwCIIjpmAcpBv4nP/rh1fbixcXV4XAoY+bqYHLAUHuiOTJEpou2u7q4vOw2282mi02MMTiygyKh+dqPecb5zwDg+AGPBs0zmXkNbwu1rqbz1ZnT+Uegn5SI+SmzjodY7XcnthlYibj1YICn9N311zNAn1o3V7H+kdFzQYDz46djeYH1StVRE0bZqIYcwpy+sOjNogLThXVmpm6ysvg2m+6iSW3b1rXD6jPiUDNuiWMt5lyT5IHQzfrD2HZOACGErrI4CIiYIN7A3hiFWUkZMARk9FYpadxKaB5K/9m7d//118Mvv053Q2cEJYM5gaMpWK6WRylj0VAT59UckczMgYBwr/owjhpwMCgMjIxiYy4bjijmfb7/7Kvy7GLzlz/5AW2374ZEHWbkBKSuuRioBy+QtRioIHAIvG3ay9hSNjUPYu7ccSNBJAkUdwUxzXeHy8vLy83VRdoMh95FiSgQlxjBTIuAeRPi5Wb77Or6st00ITJy7eLMSCHEanCpvtUzBIDatvUREQQARF7jyfqnJ0dYyaVWS0YtQUFzStMaGQCPqZ4VM8yMALDmQJstztn69+kukU9+nV/MqNazJjwqsgv4zgE2OF8+mdlPcUDMGNHn/IZqYqwSoZnXFHuqkmMIVd+q3KCKVlX+meu9Y20S0Y/l4dAf9gM1seu2zMEdS1FyYmSqRk0nrJl1gTyQijhhLrJ7+6YWEL9+dnl1cRkxQPW3ixmCgDtKQWfAzq212GV6jpfy9c39L393+NWX5ct3YS9s5I6uI5qLCaqAm2kxmyuFg092a3dDquESkNLdOGqMZkrUsYEWz1ouiLQYDmq63//qi3Rx8eydfPjgfN2AIQG7Qh4HN0kBkMysRIZNDNuuSSESUEAgThs2d5eQWo7bpu23l6OKut3c3FBrXcsQuG1Qg1Yfn11sRMREwbyL6aLputREYjB3kzxKgCkcOhAHrG6bo67o7jiVQFzH4Ux/AU44wAI/VbJYg9kCeAsHmCwoZrW8xHJDX2rbVWVyNqTWO6jb4n7VyZ7tCscqKUf0XSj9YxxY3s2nohweZpVCZ3Foof0EsHQ1AABcNbWluVfC9JTaQM7dcUrkMXd0RMBApEQhBELgEGoYei2RoqoIAA6zqRgxsGXd7/ubd3e7vt9yACfkCE4qruhIEIgIGaDmXxsFDimqGyKMJX/z7Tdvv3uTOPz0Rz/+05/89LLbRI7oMAwH2KASONQKbKGjuBG6GBy/flN+9dX4d5/Zl++60RIF4OouVTFRKa7FVdAV0UMI6uaETgyOSjRF8hEepHzxzTc3D7shj00THYgiU4pIwYsk8Uvm4e0+//LLq/HwP0gvf46BHMnJcz7cP8hhv9kSggHBtmmvuu02RUcbTZQ4pHRVo1/At0nGTrQyPcRvu++6rosxqmrBPIVpqUqIgRiCR+IupjYmApRSTMwMAiAihxADY+2vCibwlOhyNh4fX47gKnBtfQ5OkQGzaD3bPdlBYe4SuWrH7V67OekCtIg4pavOtpOKaejHRhtPtEk9Q4lHTOCID4u5qqbYLeeQg9N0hyrGwIpCTIg0IS1UDqCzY4FrrRpHYwMAZMLADlOF8slbiVgtwIgo5gEwi9zdP+wOB0SKTeNEzIGQANB0fgxglT3MjJg4RlJ1t+L25vbmb/7jf7hou6ZpPvnkkxRizbxRVU4RponxFtKFYHM7hLfDF//mPzdvDvG7XTwYKDiYQK1mJGQFbFApaA5YK0+QuKGHKawF0QAMUAy++/bt7jDc3Nw6OsBmKDkZXsaEiAQcHSPGMPrwu7db8r9+8YP/KLeKhIA6jvfv3vr97qK7SsgxhbZpuqZJFLJpMQocY0wbjFaNb8iJoyEgEQYOMNlSSimKUycyEXkAIOLAtElNm5oAaEMuQ7ZcOo5tapsYu5DaENsQQwhSjqLOk0RzDehrcD+D9cXzdX6TuXgWzJlpAMAOinVT0FYk32sPlPl6mkuPeY0Ocpd5AjQ/6AkR6OzIMmmfZPc5zn9VD4eIwI7oCHPqENaEklNNqAalGQDN6u/SQxkAbK63A1UZCMEJa5Gx6rdy98mNgIgOYo7qu8Pw8LAHp2evXj5/8QKZEdiRHMkdVVzICDgguUu9AzNTYOZIKQ5S7vYPNcQsdZ0ijqrEAWJITGBOAo3ARZZ4u9v/5ru7377Z/eJz7KHLTMZglmsjewfGwU3AZUpxBXR3BRdViuRI1RouBiJCImCMjf7o9Z+kbQPgX335pQ49NBEIU2oICEaJRP3+Pgb+6dUz44MFpMB50Pxw8GG4wufbJjRNpBgQQEyysxNTwBAgHASnLnc+p484mQVnAAJAdGXE6mN0d/Xe3QNSE1PDAYqOY867vQw5XVw2m3jVbZsY2ZEdaA6+X0P8mt6t4WcNYOuvM30/sdPjUhF6QYD60xyvHpEmYdvdcH3CCRZNyKGGXCufkp4yohM/wGPQPxvujqcxPD6nWpoZ1TyGU+5BK8K/dNv2WT0gWLmUJ0uW6Sprcco4Ul3E6BqMUB9g4OYo6kMuhyFzTM+fvbx+/mIcx5wzAExB/27CyOgKntCKKVqoRck5xtgkYwrbTXd9efXqxeb5cx2yiDkHSk1yD2pxkG0P7f1gv7u5+f/99s3ff/Y6XtBoY5bgyBiYmUQgZ03FXNG1Rpa6gbipuTgER3UwAHNUt6JOYp++ePnq1Qcffvr6+uWLm92tHoab/IaIkDl1rRcZDoNzsHEkxovu8iImJug4ghNkIfVNajZtik0Q9GyaHRS4VjswgL7viQiYfFEWCZUocqAw6YW16AMgMvMlb2rV0QY5OBZRGcZx3+uYod0kDpXJuKqriRrGEx0AYJH468EFJRaIP4G3BVhpycRYIUD1aSiuGiuu+EAA1Ooac3fw2lQG+ShmVzGj8vzIXPXfgKirKp0BT8cygzPtfvEkV/eRmK7hu7q31ni8XLtqPHo0khqC5rK8JCLSrLNH5lqGCRED8+SuZyL3/dDXiWUp7BZjlFKyokt5eHhouvbjTz+5evFczJqubbrWREGNHQJQzXTJmtsIZC7jAMybtrNATrjZbvdD/89/8tcffPLJmHMK3IRWHBzIHw4vN9cvUxx/99VX//a/vPu7X8cH+UG8QAUBEDIBiuCszgAJ6NAfUkocY865qBMFMBJXDsmBHK0fxkPfQ4ivX79+9uzZf/PjP7/f7+6+fnP77ZvQxr/4yZ/2rz8advuPX30YHRkDOajaIZd+yEPJf9pc7/sHfCht6lri3e5wcJECKaG5OyemFLBzjXIQVVGAQ3/YbrfqVkppmsbAyzhyjK5iZqKiZmpGqsw8lD6EkDhQcHavglrphwDYUNikpokJ1dS8WtUynSivy9a/n5i+1+CzuEdh1nptqYMCUCsgTKL//KyqD0zAVTlJPX0WqAimhk4TgUZwd54lGl1EoPeI+09N//S4Exo4vadxzXJLr4oBTH64SrwRJpUdAHS+s8Dk9IY56NLAtTpNEABA3cQ1OoJpcXXnN+/e7g6Hy+uri6urpmkCeEjRzLSIFQmAtQQUiqkqubMBEpqrKDgBIsUYL7dXL5+/ePHs+abdwljIPUFgDz958YPhyzff/P0vh7/7wn/73Yd3gAOa7Xm7GdAHdgA1x2ROrmSyubw4HA7jWBAYgIYxA4cmdYcxby43H7x4tX12lZoOuLbkgvuv34wluwzqkndQQHMeShl/Nw6M1ISmiW0KkTlsu7jpmv/584s3D2/e5NsbyXf3+88+/+KDP//0z//8L/cyuLsVw8EbK53zVgKIl2TIVCtvAkB1qbq75Fwz76ploXIGMm0psKGWvC/7wSCF+OLi6mV30TXtNrUpRMmF1iLKP3Y8CTDffz7ZVGMaV2g0lQY5vRVVN1vVN/woZdSaLke2s7YCnU3raKuZj9cPS6XpWmJ7Uj5mTjfJc/Mssaop7gi4sIJFCqxoUIUgX2MCeM1wnSAevPqtdFZ0xAzI3K2oPNzvPvvd7xzhoz/55OLqEpgCUy0I5Tr1nggcGHHOMzODSRpQNTUhhy6mv/zzf/rjjz+9DpvQFxztWbNpKYxq4y+/ffPz33z3t39Pv33z8gBX0JDR3kpRVdKM6khggu7RhFyHIQ9DJgztZitF9+Oeka42l3/5z/7MADyEqsAMeXzY7w6Hw+XtiIiEBiZSxkGGAmLgWkZEHGmMtG9iG2OTQmTmv+yvH8Kzdyl+HS5+ZW+/+Obt3/7dL8IPPoQ2tG13ETaRsVO+0thlhkG+CRoIXYWIGMFLrovIcbIOAaE76GRVNBodiSAXGwYz31yE59dXF5vtZbtxVRNFcyauLdtqLTqYIeP4F1ai7dlxPFp+zkDufXA/3WYOFq4chxAXPlBx4AiNj47DWiqZf65nHk00CwfAWarBlf6+fFheRRECnD/VveY0nyvWayFowYHlWTM/cncPXhOCsZawrJn/YpalSLWvAio6IxSV/Tj86je/+vbbbz98/brZNEheM176cdQiZcyuChQSEzqZi0rOBOaOFBSsiDpTMLiIm7/88Z9+evnqUiiMvrVw5TTc3j18+dWX//3f2/0uvNld9NQWrNFaTDioGpijmxdVUHMCcdBhLIBMIRa1XDzE7tnzFx9//GnTbQ/9+HC/r5XHzSTnnKW0pWBgIiAzkOIlmxcHiJvGARwtq6pqGPvCiZG2d35x3V1epctN2zTtuPvi3Wdv/uvf/OL1n/5o20FBCbfj/kFGTS+wbZ3idbe9uFDV2lmj7/uq7AKRuhVTLAUcylQwBv0whKZpQuo6RsTL7cU2tV1I6A4OjBQi1cJ4VkREkNP7YPd7APoM+r+Hkyyl4xYUYcBqAqoA5LPkM1n54QRia3t6RCQ/kumFD+CSD/AYBRdqvYDsBN94fA3Dakc4vhLN1yIiTnGUx/svT1HwwDVYfEKWJfXfwM0BqVb8q31frKiMJY+l1MhQR1SEcRzf3d/95je/SU1zcXUZQujHEZlA8eHhwUS1CAFQai2mpSKfkxtgqBngagjQUniWug74Q+peaIr9qDcPu5uvvvvNl7/7h9+Uz++2HK8gbDGglUMpCioBNSC4B1QDRXUTK1YQrem2OZdDP46577rth68/+eD1x1dXz7786utp9QkjBWdkoqRx2N+QihVTFdFsWkzyqFI8U6iVaAO5AXBtBJZ2d0RyGCwM4cOPLv68/fCzw6H/u+/+zX//C0XIh/7w7Tt99/DM48eXz19eXP1v/s//+w+fPxORxCGEcNjtY4xm1o9DMR3GsRZGR3AiIKam3TRNk1Kqwc9d04QQwFy08KyXGkw+VQzrQmx/3DjDhN97Gs0qr5+Sczyl/dXabavjDDjbi45V18EnUSZ8/wyewIGKXnNqps8ZvFM9LwCoNlA4gv56QHWTwfE1Jsln1nhq7ig4oIGhq1st5jGWUlQEPdAkwd/vHr765uv73d2PX/342bMrAx2HbO5iutvtqtbRhOgxQm0aQxACYUJX9UCBKQEyYAwIafMsbC5Gl9+9efjttze/+nz45lZvdmHffyCXrgY69uaBkFNADkammoM5gYoLqJmJuzmYFCpihnR59fzjjz/94PXHpvj1N98y13LQpqpSimg2M3fN0V1MNYNJ7Q5lJipZDiOnCCkRuzoQBDZDcnO3wX00LNSAfdCGh/3427//+mF/d1eGfe5FRIb+d+PwD7G52Hb/0y/+V3/x458BExE1nDDKtu2GYWi7UFR6jg2HPmQDx8DE/GKzWRLoausXMC9lqlBLq6pSzBxDyCJ/LNyfCSp/CCbgbOOvOMA+JTPArA+cSSuTQerRcZrqxxyf+3sQ4MmpwEpkmtaFyOAJhwjM6vwC/csdbK6q4qtIDzOriXj1sDmMKqPksZRaMZeh1okjFXl3d/v5l1+klK6urymGnHMx3e33+/5Qn1IzCmDFzZhZ2c1BwEPtt2UQ0BzDRvDm66+//u03h998I1/cxIfxCuKr2G0kZikHdSHQRCHVtu0CByPw4B7NxSS7iLuhHnoLnJ5dX7/68ONnz14U8dvbu7u7u013oaqiWbVUcltra5YaZMAGKmClVp8NCIrI6ARmWkBMXRRZOeyRLGdum42ifrPbMH+Y5XCXL5rLX373MI77eN1Ks3kr+e14R/bw+T/8Sv7qX6QpxA2iaAIYS9l2rQbuYhibVIMjnBCZnsEW50h1RiLzKtaGOZZmDQN/FOSsQegPVAC+Z5CDPek8fsqpzIDyiHvUz+9FgLPXO1OU18rxotQCrFjMPM6gf7lqiv2Y57okPaiDT8XvXAmW5gCYgrurG6kCQCnlYb978+7tx1cvmjapSs6jgj/s7m/v7i4uLhCRGnA3QDdTQ3RTABe3USUSo5P7xItI7ObNdzc//7z/1VcXe/2IN1ddkw5GB2nUQ2rSth2i7SE/6OhFWTWZJQN2B1N3I/dCqgRN6lJKl8+edV13/7B/+/YdAF1fPX94eDAzQENEQlQtYz/knPuulvk3M7E8asmuMiUJKjqZuYEhOKSYUogjlDzmLsUA4Pf7bbv9ixcf/eDy480PX/8//vbf/n9+/Z93o0tDmthD0C7evrsh9ZhYizBgIm5D7AE1FyRKzBSpOvLVJ2MFEyGhuLha8eJztuQEeUQ1LquolpzPMq3+qPGPgP7Hl1Sgf/LMJ4+f3SR4oFkpBoATx96E8Q4AsNSwbZjNDM2RiBxI3dFMNDLPNhaoRNprjQU/+iOqKOYKQGgIyOTuYylTOhyYmFwVdkJqE8c0gN7buJM+g+bd7uOPPy790B+GZ5uLu3d3N5999YLbV6+vN9eNbfCz27dvHu4ptLi9vN/1zzfXOASLbeTLmLqipbh64B/djPcNv+v4KzZFfjV2F18X/Hf3zz6/e/EWbHiJDOb5btxHy5uUdnSH6AhIxVuw5FKlHVHJTCHFwJ3ksd8dTDHG+AYvf/qDH18/f3Fz9+729jakmJh24w1AJhAvgirkxg7JpDPbDl5EBi2DycA2IAzqooaIEaBRb5AToCOhWzbhg7axpX1RHZ45RRXe3XaJu13zv/1v/vqC5f/2N/8GDunDLqRXP/qr//Zf/e/+Z//rj2TLOT0MeyYnTjf3d2nTqRsAgWFUSJbQqZZaHVKBKjovccjgFOIJoJuDewD0o1VmsrTg4slaun2d/Af5NOjtTC5Yf5h+mvvVVt9orVcnXtsJO9T4sVWQxRrk17R4dlcsAWmTTH6eEbbIZwvZfp+sdiIDPfYPuM+FZJ7AQp89ynOumzFz7belWcyRVd1UfS7ObUpEw6HXUiKzme0fdu7+8uXL6+vrqTLPOI7jCMUjpymPiWOMjIgiUkouWhDx4LIvPgo5Ujnkfjdejfb84qK7MBhgxL2BFtGpdLG5o7kDIxiYg5rVjlfGzEPJOeeYGuDQNM2YRcQ+/fSji7ZzVVBLkRkc3FwUYG4wijilAcJU3VJhiqp6zCor/Jm5IhZT1Fo7Vh2DGTARkAMhAX795VeXjP/kRz/qA/ztF7/+9e23f/U/+vP/0//h/xibMTdMCQ1jJhhlLFYIU+3EiDWq17AmRyw7vuz1GRg8Pn4CrH+AUvvHjqO4hSuQgyPE/4Fs5H0STVirCOsPT772+kgV/WuFhQUBljDTpesg1kDReYY2S0pVDfc5KxJrdxAiwzlzx8yPS+0Vysk8tZ2KDsOwads/+eTTi9eXinTYD4fD4XA4GORtd3HRboggJg4huFsupUgBcGAaNyGbqAtJoP1At7Ap6WXTxa3YoAiSZVTDahI0E5hKqdQNV1WpZS7dXUwB0MYRUQwohSaE8Mmrl2Y27B4sj4kQHUyKqxAYuAEauCE6ONSWpeKu5uou5uZQ/+lk54ZaeBAdEY0c3cUJEllCheCAHpgQncA/uLr67ptvRxt+9sHrP/nhD/qE//x/8q//+sd/9jbdlBByGUqEEIOZg6IymgBirVhL7qgGxcAcOvv9YL0+vviFzk5+L1B+r+JwBn6wQoDFvomINFeBX0P/96PBexHgfRcv8dlnV/ppamadCqwO+tz4w9cKLkItTwlzyIOpIlAtDD+VkkREd5uTDYgI50i7AFBtcCkwM5c+hxA++OCDTz/9FC/Cm7ub3W7X933OWS23qYsxQG0eE8kQwIq7U+SQ4iHpkCWb6mFs9uU6p4te/WFPuaiKg6qJWAE0qh65GlWCtUGD1NLCZtb3feq6GIMU2w+HwOnFi8tXr16BSL/fD4eduxOCmYAWMkVwcHVTqiZfNDdXNzEVN1EVU3H1U7KqpoREjuRQEMysJFQiRQhYS5QAAIDK4eY2Bnq93RZKhZvnn3z8083z+//wd8O/fAXmxURMk1NqE6oV0eqjREQnVMBCNiqoa6snE1hzgDOK+xjQTxDjj0SAx5rrRGof6Ri4gsg/nOGcgfESenQuAi1wb3McPzwVKHHUfRHcpjbuNrMqYEIAclc3VTsSiXnSCq6ijKG2Xa8IUKk+MhlCdYFVplcTm82sTanlqLloKc+uri+7zfOr6z6Kqh4Oh3Ec3b1ShxBCCBxjCCG4q6g6KBED0VscRxQdCt6Xyz280qbb9/2X39qu5H7Yj30vg8mAImCKjm61O1Ntd26LTl9js8EpZwHDpm2228vN5uLm4b70BxszMwOa5VGtMNQCeYKuc91jVVd1LeDFtJhmUwGbukwDOIDU+C6AWhkQ3BRgZFSs8b0W2B3NTNxs2O03l1fR6N27+/3XhybLN4N88e/+Uy9/8ekP/uT1J69TCjlnQYyEpUjiQOgMYAAMUKDmp5+D9RkCPElx/yj5Z3FsnXmIl0CD43GfIO0xpp2A4h/26AXBzjDtnAOcfVgAnVZBoEuoc/26VFt391qnfnJGwCTkLG9UD7p5JYa8EqVwzvOfii/Mz0KohfIAHIIjE6kaAb568eLZ9jKllAndsBQtpRAApdjEFAO1KaYUiEBFRTMQgqprvm1UvSTVa8HnI1zsx/huP97e3d/sSyl7H7MVdIsuDmriSoo2JTwbqLuZqzumri1F9kNPlK4ur1++/KBN3WHXa9+zKjEhqIqCFFIBdEeDBfrRzE2sqGkByq4FTEwLTkXha3FsNy9QE8BJa1CggyAguoAQkkF0MLXiWT56/vzm/u7+9sYiPW+77SHLl9/ux+EX/91t8y/+2c+uPrh8/eJtfjf2JXQth0jqBMBqTkDgBoBW68mtq78+wQTOf4VzhDn78BgS3wegC3QuMXBneHg2k2USTz/odLyPO4XvnzquAjYnbqC25mKT7VIVYOnqCoRYI9hqwtd026r7OVT1awmlmoS5+VnIXBuM1tgVZo7EwYkjgzuptyHChp9dXF51WzfrAWqXA8ligdrIbQqhNkhxNckmqqqE7GiqcCAreXgl/IJSe39/94vPy7dj2Odxd1fQC6iAcpV8zNWK1QbOVDUcV6+2WkesvVORmbtu23VbL74/7AGFiWr2L6gkQEVQlamdB7qjmYO5FK9AT8WhmBcHrVkyVT+qtRgdnEiRmNi8OswFFJwJ3J3UzFDcpBwe7jvmdpsEPJvD/a6L6aNXr7/4h89v+v/ypntx+a//6lnT3JuhOmBwL+SIbiR1Z5ABk6PACXysIQ8rNzoVYt4HP+8nye+PejhVZ5f7LONoUsfj+X+gEnw2lns+YQWCRzjwhAi0usuUqFU5FhPZVLHZppi2I7JOtaUcDLwK/T7ru2RUm7cRkrpTYA4BzCIHY3X3rutkzGTepjY2dNlutk2rqjvzgIwGWgQpRuLIROgqWQpXmR0AqBYjYhpEx90ehmZbEnxz+/V/+nl3k19fPAMRZ0BSckOfWgiomXo1C+JUKwpc3Q18HHqm1G3bxC0RyZjdsBQhGDGwu5ecGT2EwEij1F4m5g4G7ggCLm6CXtAFPKMLuoArggMoVonEHYDAI6MQsoHVuGsGNK311gAUDGqRxouLy6Iy9AduUqI2H/Y3Wv6Jtb/797/6B4+vr55v/vxPYkO9yGjSxAQGaA7qZB6QCAycHvjc7HEGG7CSQP5xUtCT40k4/h4OQHMJaHiEM0+Ox7eqXwPNzclWWA5LRSGYa6bWIJw1JlTZs15FgaEGhyACTX2PVbWoJmJfMYoa6F+HThLTZA9l5BRifaoBmDkBdCExoImSekuBDCLSVbfdpIYdEDBSuHn77v72rktN7NqGQyJORGTehNDvdzc3N5uLy1dd6+55HKXkjccXvMmfvX3zn/6+uX24bq4O9w/KWBPVyQ1ERZUcYiBSghqRaqZuYiamCk4hOODucLjaxq7r+jwOu+Hy8mo47MwUydE1F8l5qJYCUCAiNRPTYmqg9UMmKmjFbVTJpl41KCYxZSJGUodBCoFE5sC8cSKDBJyICEBzKaqgrgB3D3ccQ0hxlNLfvbu6vm63Lf764V/+9Gd/87c//08ff/Cv//ovH25+Fz66dgyjOSLnXd9QeHb9bDwMDw8Pm81mSVqCRzT4SfX3LBD4+4EY4BhNfAb9KoJzqRFCFJEKMNXpVm3lyzRwdqT6SlCHR+bK9cHl/AXC62lPVIbDR1LamQKxPu5Vap9S45YFAweolPIw9IsEhUxhipcDWZWIO2oCiLWoG84/AToACVgAIoKE3HHsOLYUai1/MGFgxgAAwTEipRC6kJrAXvLu/iEP4+XlJSKIlpxLo/rMm+5Byxc3cHPgUYqOOWfsklYMNkdTNAdARURkrZZ8N3UXdCNEQAxs6kBo6GPJZZRRSiel27Y5Z0KCRABW22EN42EcRyc3QmMnZnEYS+nH3CdQVQETcEfQ6j2sfUIRlIANxJ1rbiA6jo7kyGqMRlAqS5pq1TgDAYIGMEJlFIJwUL05XKTN4WEP+0O43r7Lh8yA4Bec2i654G48FBFIQQJakTOIX/j8Y1nA3QH4SeHnferyGkHW91lHHz+ewDlGnbojnsS6Nbt4/NPyNay/Pwb9x/etBQOXMxfYLaY411wARzUX0xr4hbXo0qpAYuUPOFfJXQ6amasBU81XVXMwJ3N2aJEJsA3xIjabkNqQEMBBWUvk1MUkZDGEbdNetc2mSahyf3fz3bdfu/vLl89dTUxyHl9qfKXM724PX7zBhwHFBhsHKSzoQOTEbqQO7oKACGwEZu4oBlLRgsmQFEFMlQyYxEVdAE1RzeRu/9CEmELs8xA5hBAe9vvNxbaoiBVzVIRRbZAyatmPNQpCi9bqKVPAbQhIhK5ugLWRqDuouWRlBnFDn/wnteqmmCJwAKqli4Epkw1ePry6zmDcNXfD4e7wQC+uJReIdBW35bDb78eiGIi7riNO7x4eamXOJdsbZu5dgWRNsBYohkdk/gzsTgHxnMPUsZgf1zC6xsA1oD7JoE7R7BGneo9164gAT0L/I3Q/9ucgP9FnENEmUuHqYOBFxWbGoyK0ynZflhVW7KWOWgYLAcEcVF2E1MkwEifkTWy2oWlDbJARwBECxU1qNrEboSSO26btYtMwPdwfdjd3h/u7tm2xxjCaEuKP6FnY7/OXd/j2sKGGCA6SJZJZYeXWmAzZ0REVwIkCAhC6qgOaoxIgkhOKyGHsyYkiUeSaZm6gwGiE2Kam7cSUGFJqOrlQ91FlFMlg2XUoY1ZRh93QV5pRE4YU3MQBQM2IiKmWAEZGMgBGKGZAVNzNTQCWXjuCHhAEHF0NgQBHKd4fnnUvdug347CN0QHLYbjoGiZ697svt8gfXT0PSO9u70Yau+vruO3yblz3lYBZBp7y+OZ2kUeoeAw0q/EYH2judX1+/JRs4wx7k33lXI46AvH7EODMlHT263IwnJm9nkSANYziEl95qhUt6fu1s526qWlRpbnrE07dSI+F0deS1XIk0XR/VQVRdgghRuJaqLDlWKE/IoE5AEWgyClycLOGwybFNjBIASkN09Wma7ttGxOjR+LUNj+427z56uu7336XetvE7oDjg2XitoB2tQ6LUa26YREFrIHgAI7sU/1oEzBwzCq7/tCmhhN5rYgFOo79qJrBWoKCLug6ijo60zgMCkgxRYI8HkaRQx6BUGCyqhlMZL460aVkqn0PEAkxEhs4o2dEQ1REAiA39yk529AFjVxBAAgBXPqec36+eWmXnYv+xV/85bNPfvDVl79+8ewi9/uf2aa8u0u37+L11cfNNVxtDpbffffmojt20VqSdKcWJ6tOLUeo+F4EeAy7TyoSsGrbeEaOF85w7I5RDenvx4EnsWL9xPXxYyjEGaWHp3jC+sp1zsGk/s7KoripW656jOiiRlc/8VQMy73W7LVVIV8icpmVG3NCjBy6pm1TkiHXWjSROAJxbRCJFCjUCk2udtG2l9uLNqbcHzYppufXbUwUuEmBkSiEtOnafxjyb76zb+6bgqjWiw4A5gURIxCa194cQmiOBcEAnZgI2Fkd1bKoKPioZZTcdR0GHGUYhwEMDWkQC22rSH3JAv5w2ON+t9lsPv7BD8W06gDvdvfjt1/v3uVhyHTR1h1F97rLhODupRSoBUwBycDcAziDDxQCYSBknDqgVB+5uQcHNHF3UGDjUoqq/k7ftfF6JOligi/e/fz//v/umPdff/vl3//i7qtvXPTjn/7op//yf/jDf/XPLn/66c8+/Ojb3Xfuarb0sIL6dSaUNAP/IgLFNaicwcwZlK9BaP1hQYD1ORNiL/WmcAXxcI4Aa534fQhwBv2+iEDLvPEpU9SaA6z/niS1zPF6UGUbNzWtWrybWi1GP5OTUkotvlctSzVpP8bYNE0gcpfK+g0RzSOHJsY2JInQhtiEmJBrve3aQqe2BdimNmB4dnX9wbMXXZseVA/DsGk7NhhFrYg3FlpqYxo++2L8+qYdPSoc9kNRsSbspHRNrG/DRlxLhoILgJgTEfz/CfuzZkmSLD0QO4uqmplvd4uIjFxqr270iu4B0AAxIDkjFCEfKDJP88w/yYchKRBisJADDLE0lupGo7u6qrIyqzLWu7m7manqWfig5h43M6sxliEhN2/49evupnr0nO985/s4MBmZYcVqVrVmqQaehi6mNB3naT5erHabzebm+fV6va7jLLUm5Ne//urh/n5Yr69urg/jcaoFA/WrddevkB/VC1IAdzTTJqABDg4IEMMpzVjyIjMDUp8CRfaITYJbXY2IIqCrpUZ5VyMGNFfVnPNfvPuFf0Xa8//n//X//sVf/uW//Bf/rC/a76cXcfgkrZDx7i9+/S//yy/+6T/5n//g//J/+G//r/9nDx9O7GUJPvEa8m8BIRz46QH+9AHfiP3wBL789lo845DfWLjn2Pq1wvdbG+DbW+tv2gDf2Ie/GQU6P+6/sqH9yeOfvr4zsbHhhnRa+u0EaHdlnmcTJaK2AVJKwzCkEIw5IIZWGZubGeHCnUkxRgqRmJkJsI1SEmKXYt8PwzBE1N1ud3V1lSJqHu/evuHYmeo8z8CBhiE5IOLxzb0fykAxuo7jKIShS+U4JYpmsBgYnt6steQGoVmjA5gjqTXN2+qEXdeFFHW/V9VuSDc3V/2LF9vt9v7d++k4bofV/v7h9v17R3r//u7Vu9f744H6hCnUWpEpxDiDm5u56ckZtn2kKS2ztqpqIuqG5ugmoQNv8mQmpmoanBwDqABAQHI1dLLQBstq1rh/f6R1+k//+t/dvXrlbx423P1guLE3twlkrpk6GDbh7edf/Zt/8i/Gkv/gf/zv/Ym+vj+RJzmHwqdpDPGHpeL+zbbUb9wD31ij5w1w/v7TzQCB4Um2/O3l+hvX+tOF+hsff/7fkMEIKTByi9CN0m/mAIEZERnQzRuFA5HoJMpZTRExxIiBYVFtADkVc4iYQghGJaDkecXUM6LMJLnMx/271x7o/rAvrpeXl8+vnyNvuiBD2tD1s1qr58pqHVDPIRCb2TjPpessEHIkpGAYDcjB+/fPV/O9TP32+geXn31iNyHjX9r0T1+/BZQX2937UMrHl9sV/R+n4bv/7PWv/8svr5xmwld6zLuO3C3LtYZwMCC9jzAPJxdog53ahqshZZRMboE09I/T4e3tvgsJNd3t/Zjf5qo/+u3f+/4f/gEQfvHu1/7L+3B36O/n6SifiH0SP5IDvL77quuprvu9lVImBxnAdZ5+jtHMtapJ8aoRoWPumCjngWMfmJytKioyEjPe9jmCsBE5ABqSA5EDQIgVWQJ7hGpaqswli9tXwz6tmOoYoZM8x67/5f3dO51oAwAzAASAZPVTSetfPPqv/unkq9//h3/vldVXoRy2cI+Vh7Sf5gvuNsCdIlQVNI9cA1a3NYKZtuKYmYmAF0MxhpYOnOhNzJGIENTdT5MjAC0+EskHFioCccu9WqZhbuBAQG1EQURcdNmKrVl+noh3r6ZwQmf4SVq1qFWfOAfnkH0Sxvr6Nv3a3gKAZmLzBMBaDJ6ePtqWxzYHXwZUMCKKgSN069gPgefDfjzO7969e/3q1T5P1a3frnfbSzNrHSIxO+wfyZzV+8WSFNEc3Fo5uHym2NRIEKFBeOvri8vu4mqz2XRdxw4ppbt37511g+wDuVqZ5oe7+9t3791dVUsppZRmkeJfr4L8NAbkauCexSBFRSwqE8Kkioir1QrU15vNq1evUkr/8H/3j773gx/cvnmbS4FyhLnIlGepRlIYmlqwgbuoaTWtBcqx5vflcFv2X4ITQQqBI4ehvSc2hP08BZkHjOt+WG3XZH6cjvM0YumEFvMt9vYZ8cniAJtApbqVJsxL4L6caMtgHQIA5JwpIIARoIADIiOGSgD2j/8f/88vbt98/0/+tl52ExTtaSp5s1rLcRYAUoCqgg7o1aGARVgi9CmKE4Ah0oLeWDsQ2mdrjT9yvvzradXTdOO8FNvz4Nc7Er+xafCNH/yb1vM3fiQ8ASZP3P2v/4yeTOMMF39pWEg9H1KFhS1v3uQ+mwR2c+9SJCMLzKA2Hcfb29v3b96+fft20tpv1qvVarPbpr5397mWMOcDSAQaOPYhNImyto+ZiIip6QKBkSEDKSEiblfrm5ubYXez22wH6tB86PrNZlVl4tZJM5mnaf8oj3f3HWBtljuioOYI5OBPyE7ePBDM2waANqbjlmt5rPkgklWMsAvp9vZWS/nD3/m9v/WjH+/H4+svvhxW6+7xQURKrZMJMgQGciCD4lpmKZInnw6kt17ekLzrfB+ViAhLg31RjdwIbBUjmkctQ8lrXg1djP1A1yu93ZupNlaRQyQGZLMTE1G1kSyqqYDZSYSnbYCcc0+BiMbpSJEQEakRQqndvqrxl3/91WOdh0+edfFlEXdI++O+v7y2KReQpnJqjIamhsUVsjNzCNSYAl/rlmEjwYN/wEu0TVM87S18ePDpevp1Q0pa7P/AJziZD3x7QfsTWfane+Ab++f8v2HJpxfGpcPCWFkCfBOzd3cDQEBCYl90rJbnxWUaBs3JAR1asvyhqO9izcQGdcr72/v7d++P+0Pbwevt5ubZs5ubm5RSzVXUp5y1GxgWWCCFmEJMyMsvCtyeVs0EidoLckqpu9he9pttFyIaBIIhpT/4/d+/ff86qe/roY6zSS7Hvk5zjwjm6NAcVJe4hdygqXNrAxEB0dzV3VSy6ih5LHkUMXAHnPJsJn/yd//eP/i7f3+8v3/961cdoB+n9DC5q4HMZIUBEMk0mIuVWvIxzyPUQ/L7oPcRHteRDejk/UYMwUNgTIR3t+/XqQ8xHMv4sL9npN3F5mK7G5qDjrsbIIISFFdRB0RwF3D1hsK5ovvZmf1UfQ2riIGnkiMmJyeH0PR6FQFKdbtYbx9ev//Lf/9nn0asN+tSi9X8fnqzDgmdmqQRMBF0GknAoGgIAVMCBqPlxhERYUAEpiU3sZOIM6A34TODUy3ri9nP12MvnEtweiIc/XT5/sYNcPYf+I0P+PYVwmkDfIPX+vRrM7M2owIeGq0TF79UOEGWbVUFRzi7qbV+WdcbBnY7Hsdpf7i/vcvTRMDrod9sNuv1NvYdIiOrIQoYBsaTrgoANC8GBgRuvstgCGCu4O0gZocU0mZYha5jIlfnwH3XfefjlwEk393LY87jFLEjEZsLnDCNGAI2r19faNtnjiEhNaElRCy5zCajS3YVcGRydAZ2rX//7//9v/+Hf6ccxl/+7Oer2K1Wqy+//PIKAFyAVN0zeCV3NTKvMqnLTDoyHFH3pdzX+aHOl48lEscYmQCstRmcwP+bj7+72263q0FFDg/3h8e9VePHCUMPjVXfhAMQBRxMkek0XWnSqLgIDs4UAC1xYIVaq4JzCAKI4IhgRI5uDs2nNDmIWKnTn//7/0jPdoN+dIi+utw9Hh54s1ODZnobuhQCIkVDd8eWUp5qAAJu6YEuClzLoargYI4c2y5BADzh4Q4A/mRZw5OM9JzttJphuV9/U/h39yfF9NM98G3HmvYrAkNbzx8ccOGU3NsZdwJ3gHYMKDiA8xNvbnRoDosNlmHAwGFp6CKqAVBIhAUpT/O0P4hL36fv/fi3VtcXm4sdEomqGLiIonbk7uZLIBNwZW94KS3DVOC2iLu4goFhJLYQQ0iMRGCM0AUm8BioOGieQfr1areJXTBQVa3VRNydENt5DE/mAJ9GIADohr7WrFXEvIpMpg4QGf/oj/72H/z4d/cP91/+1S8Gjh3h/eu3HdCBcjEtWsXFBRSsuqnD5KWyFcYJ8bHU/Xis05FUfjANF9v1y5uPXrx4cbm7GFZ94kDg03Hv7m5CDr55UZ+XNvL258fbxrT2Ni6HuMxZmimgIzQ+qRM6uDXlMzWkwEyqWmsFJo5B0AkZEAzJwMxUzQvZuB/jup8fxsev3muXDtETp7KfsoVZTau4e9qu1l2KMQIRgqtIyZkwxBhjjBDQCQzRDUM4TS+2Zect5VxIwi0H8lPd1UK7PYHCEPF8PPrXE6ffuPoBAO18qiyj76cY/YFq8fSsCUtpu4jLNQLz11AnA29lOBI1HWp4gs4uR4c7t/INiIgSceQQiBFxNI1IPccRY3Du0/D8enf98vnLH33Pu2CRa3NpNgNwdYzNPAsAEQNS6/J8qFPaFLmbuJMaOUFRM0Lg9uvQvZmGNRYxSD0+3Od1jMP1wBFrJUDXZrtpH8SVEOm0BdyX99uISbReMSgZq9a5liwldcOw6n7vb/3O+LB//bMvsNZ1vzrc7Y/398+fP38tezOptUip4I5gBjAHnyI+Sj1IPeZ5mmYs9ZO43Vys/29/+Mcppb7vA7FImd8e8zyXmnfrdZnHeZ5dNca4ivESO+fw9sLneZ6mKZfSHMpbrDS3Vq60yLnEI3dEFBFIIYRQVXIpMQWOQVQUDJ0QrZqReQUhxS7EkDqMfPfqDV+s1y+u5vcP4DraoUV6IFwxhPXAQwfEJjpN0zwXAOj7vh/WMUYAiDGGoAAQYhug+po519MVfF73djJA+VqEpg/w/7lupq9rqT/dA/gt+tDT6+mPtL9P2qD+NXuLp9fyk4hL6v8t2gQCtOyfABGBAQNQRGZiBqQ+BcCOeAxp0w8vn7/4/m/94Pu/+1uv7t9PaDnnuVZ1TBxi6hoOC4gcKJyupUPcTiICbY0hN3JAN5jFMLT9TQ7uquq15hACI5Q8Pd7ezR0939xo7nTKodFIzd0MqaGIRERWl96nLsHDVRXMHw772ayanW3lL3a7j5+90Dy/+dVXHfOw6d6+fo3qHz27qXm+Dbm5MVJpFS1AcHWELk1V3h0f9/s9jPWmW//285c//PizQQBLlVKzqmo1UXLvCfZv38YYBw6GVEs9jhOiE9HH3705Ho+3Cli0aG3kuWYFQOoeCAEMENv4mYPTaSKCSKuJCMcARFVsGVp2AHc0IQMCuBiupiIY+c0Xr64+/eSjH9/8+uF936c811LrXAsQUt+vxEyBAkmWPObDfq+OdaVNGwYIVysECMzKAXHhUDi2hA2gnQXmH4oBO/1pI0dPTgA675PzG/mvnADfXrrnBzzdb+ejJpw7c/REUw4AzKw5MbVsvnGz+r73XNw9IDmCq1UtiUMXk6u2aB2IIzGfwnYfoquZapfSj3/4I46/tb3eGdBut5PpYKW0nNvMBDVySIQoYo6hX/V9SimieZUCgbPIJKVIbUzSLqbEARyz5J56VSW2LkaQ0vVx0ATut+/ektvj7fvDxZ3LmsWkFAIgwKpKTswMClKLmbUGHBO3Bww8MPM0TXn/wH3y7F2IKXIE+rt/9Mdf/vTnnTsp5DL1KZno4bh3973PbF6mY2+46zf742Ga6uWnH//nX/1iNGEpz9P6Zrf5zu7Zp5urncW7cnBfdqG6FpvNjIGh58mqFFlSAm7Me7n76g0RXQ3rTeofD/vH8Rg4rnbb93e33KWiUk1jiihqoinGx1KGYXD3/fFxHbuQoruHlCKaoNdGtQjIIbABuGMKh/tHjhD74e2r1y/uPnv57Nn7/QMDgruIGEJ/WoiBuHnLzHMZxzGP8+HhGPvu4uKiLZsYoxqkCDFG1VpKwdjBiY7fLhFpwlsN8GkHSKMLuLsjiggz930fY2xzAucw/+1131rjhGRu7qCyNGaJqOFHHzaDmTcyHMAygdriuX/9KZffdNo0sdEQAAHRCRgwEEdmBWAkRorEy0pakitGhBgCDBvfCSceVkMmnasxBUQEI0AgCiGEGCKJBcDulEG1kFDBSqnZZJymh8NeSx36/mqzSz2HFAMwOZ1SSUdQAOj7FInA1EVUyIuQe6KgdVZVdG/WHmdV3nbD6MkBt9ABUthcXnzx+iskkFK3w/CP/uRPMEvvxKbY4Bb0hZjmRoD7/eNmM5jYl493Nzc3G7V/9af/y9Xu+W9//PKTTz7bdEPdz/nxIIfxQBm2YbFBN1Sr1aq7Iyx4uaM72aKR0Ro31dEMXSNCH6KkDpBBdLfZOpJPZiqs7g7BgLX59wCenMyhKdYgimgld0ZiIiRwMDdXvR9HXq0opsNxfvj8yxff+c5vf/QRY9BS3ZA5YiAKiTEQMDX+d6nj4Xh3e4+8J6IYu4fd/rPvfGJmzIQMHJiZEJEDLsmEAzgQYPvDSOaGDqZabalZvfFPT3MwLfy3Fexfz6M+JCNPsKTlADlZ6505rU//1d3DYpvjp96WLRj/edWjATRWghm4L694Ac6JAFMIzNxWf2jVFlFoTSwHcQCnwByHVSLkSNQH15k0M1EgUgdGJOJAHEKi4zx0/RC7PiZEFDNxLa6HeSyu94fHh7s7Uk8cUojrrvcYIxAbCpB4s8AwdwuBREuejrXMxkFLNRAyLyKuhg4M6IgO0LgPH3juSwEE7UOftORamJkji+kPP/vud1+8/Omf/UUyA1VQQwcnVMTsXlVjtYRxP839btNth5+9eXO833/6/NN/+Md/NwpStXo8lqpdiND3GPir6X2LdojopxvMxmfuFwCgI7kDESJuDdUV3ZG5i8kA5io559Vmow4zzuhADhEJkCIxgSIi0SIvYraQT3OtHgiRPRASUyudAF/vH9YXuzoe9zXvb+fXX73+3b/zx+P+WF2F3AGb2aaZaxE0QHOrVqb58fGx5aEhdlJtvV4D4jAMacleKjE4Nfn6D9yz89USGzlJ7Z71d098KBeRM0/7KXPuGxughbCnjOP24FrreQM8LXGD+Bk2+ubTkQP5Esbx9CciNS4aAwFBIArEjESMDMiN3nOqXNFBYdnETJRSil0wci+gVcyMgJmRMCAiAaNDEO+HsE19DMEQKlh1Ha0+lGku+X7/MM7Tths2w2o7rFapKxyDOSsILgoh5OJoFGgq0zgeiGi73mxWPY9otbR+BTO30TRb2nwfwOOl4EFsoQeZ3r+5211evL+/23T9Dz/+7M3nX+a7x9QNJGamTqDgVfUIZdYMs6QYs3ohLCa/fP/2sl/9n/67/x7vRzxOepiie+hjCfzoZT/lPNdSiplxbG49BNCgvMZ/bGe9ITobEJGdZhsiIcUQMGqt4zhiDBwTclNUJzULxClEsHpeHI5QbTF9bec5GrKaAWILxkR7kFLGx8OhWw+GMM6TmeWchdGaQJhpKWU6jpGDxci1Sqk5lzIVVTegbiARfXw8rFardrQ29KSatKJzwc8b/I7oRNjyn2Yu0XZpq4zd6US2exr7zxvgScRfFjQjneHXpyH/Gw87lwGhnuim5N98xqeHRURqpHxGZKR45i9DU7CCVqvy0gQ5NVYRAgWxD6sKT5E152xVACByWBiR7iayIlpzaDimSFH0o5Qxz3uZHo6Ph2nfB7642F7sNn1gVoXUAbfFYuoiZgEcwAAt50lVLy62V59+/OLZ8/51NFVGavZn1dARxI2ZHbjFnsV0jWg5EtWwC93Qj+NYpvn55bUcpuPt/YYTTAXUAcEJCtSj5QcZjzIHd1Ffb7avHm5/+erXP/j+9//O3/r9cpxuv/jyAmJPTExjzfelPEKdXcmQm/gXLj0+O42eICI4NYgTEZERGau7glc3M2EnRSimUy06HddIIcU2vF+/nu+e7+wChyNEigXdDEQMnbhxCwFt1R9UD1L7/oqlPjzsH+7369X2oUzIRG7VrBSZ59KFDB2I5FKKVpFaczF1MPNj13GKu93mpKqLCupm7goNdEcA+JACOSCYE2BoDCJAE5UqCsDct1fesv92LJzhIPjW1JgBGvO5UP6Q88CCjcLpm217hHPZ22St/MlmaNOPp7ldamrj/bLQmRflEmqPX3LoJxugvTgOHZgzOAGqtl8MqqpFtTWPkZAZtOWgdtmttqmPSFprMc2uj/N4Nx+N8WE6itab68sXL15cbncJmOQM9eIi5ADqoIYwzvM0HQ3t6urZ8PLlxfaCX1ctNXK3RAIDdSMHdgcMJ/Os0zlgbmYu+u72sLu8fvXrL7/3ne9+9+Unt1+9XlE0zUEd0ZGwqh40P9TpsU5HKV3s3P3+9ZuHh4cfP//sv/nh76yUfvXLz6+26+k43U6HogVS8C44OYHPpbb70ZZsrYqIJ+wLERtZFE7QOcwgjk1KUdhICTSgMk61BNfEiTmBe65STLFkTNiSfsSFsI6IzO35HdzB3EAVPTi6+8xu7t7Hin6cp59//vmf//mff/yD7yITMxsSqZiZlqpVlDghB+aUUpcG8+K5ilgt6qIi1rK7EBlOJNuG8i0JmTue8MPWSG3xVB3A3ETNTLt45sA1FA5PNNVzRPcTRuTujb7+lDrRHsbM+iQofDgB/LQ/WisL/UMl0Uj8kZqjDkQOKcQOGBH5dAFAG4VcypyWchHRqVPW5mYCR3ZzVXdQUxPB824Dal5OjMTIl6tNSh0aTHk+1ny0+v74+PbwYIHevXvXUfjex5+uVit0sCoJuaqoBXqC4TbvN9ECAOv1+ub58/7Zs4EGZgOiZtxtZk0TBJvrJixsk1Mz0VyXmzfmKcb48uUnf+tHP8SqD/kNBirHqe97ABDVo8735Xgr0xFVCEayvD/ev3r7/Rcf/+9/74/r/fiLX/yX7Xb7MI+jzEeauQt9HwBgejyM+0MdNqcNiS1P7bquW/cxRsIA7qUUzVlEGkeQXSiwEwoYmSAxpZjWwzhN2cQUuxBT1/XmRWoxBQoNZ2MgdqQWo2JERIbzhCuDgYqZal0jEndxXd2M8P7x4d/8u3/7WzI/++xj5A6YgKmdJFqqInniGONmtb28lFWROYsTbrfby8vL9XrNFNuBv7iDozdqw7eRnHPq4V/vCSzV0QksaqEBvpX8LBrKZlZlGb36OpG7/dKnAGv77SdlOAD6OiOvbYMPRYrDMrwCS6QPIbRK19DauzpvAKKTR4xDUUEzDNiOhROXadnKrcJ2cgIiRCIeuj6E2Aj30zQdND/s9+8f7x+Ph9vb24vNdvx0rrWO2Uyg61aqakaA7Pi0n2sxxmG9ur6+fv78JlxddZL6XnG1hv0HPOSU8nsLPADQEv/28YtIKeXm5ubdm7f/4E/+JIX4xedfdhTKNG+6npoDtmnOeZzGyXLuAEJ40Prll5//6Prl3/29P4TH6fYXXw6G5HD3+KDrIKvuUKZX7+8oyzbGq83610RtKZqZIjOn1cXFxcV1S3JFRCcy06LipgSeXBKTMzqAuBEyxRChr9Mx1yKmyLTq0oA0l1zmfC4xGZkdCZA5NpMvwHbIB0Y0URVRkbjqySEAmujl9RU80hdffKGJ/96zKw5IfVpwQjURIUR0auIlq9Wq77GvCkwXFxcvnr/cXmz6vj8ttVMr2Jc98I1k+3xHnrbD2rpfHLxV22ZonqdnG98PyY9Z2zDt+eE0zXyuhs+bgU85EjQ2aOvUtdVMiI29GIgIMDSMmLiVtlg8du1hjYS+NBCIz16BH9pyiGgIW2NA9qoV1FIoSI/z/Jjo3VRhHaV4cO+J6sM+Af/ws+/2iQ91utN63+F9GF49lC8P5f5tvv3ilrOsN/Ym3v+X6avN5W53fXmx6lYoj3bo1FaCKwAzyQEASCfG4Xn/W5uLZ58Mj3jlXfgo/csfxf/2PxVkMg5zlXnWosXMAuBA0UpFpxCCmk0izqHf9fe38g/+6B99snv2+V//5Zq61Nuv37/mm6sHE1qt3h2nYx/uanysOKyvXr1/9/n7n/cxfffHP7zPh7fv74Z1nA95Hh+6rrudp8NoGiMMNyPVQ1aa8C/Ce5vLYNQVu4rDH/34dz66unn16tWx5AJW3TwQ9zykIedccvYAhjVCSshkbKOjGhpvpOMJ+j6uRk/l+Hxz8YP189vD239ZHnaXz8NU7TDuNlc9pemYLcLAXQ4gBO6qJgwagxPB//CKuq7rus4QatG529bLkDHpT36++u5HV9//ztHlIBkQC0EMa1QAK8OKNMbDPDHZ9vry6tmuuwycXNfVIwExOUJlBJAAqgIGDEv0BDQgKFLURE1dNM/zPE2qykieay4CAK31aaJKEmOcjuPi4ITtKKcQExFhXLfIVUo5N84Q8Xg8tsFDRHR1BV0COjqcldepbQMAJAghLBuAOdAC7BCdA/2H6+kO/vbl7kTUav5zgtF2sJgG4sTBirr70HcxdgDgiOpeRac8HQ6Hh4eH+/v72Hd5zK/fvMnj9MsvP7+8uf7OD7772fe/t+vhZrVdd71XKbUik5jsj3maZwrcdR0GROWIKXaUhq6UQyMMuxoDNulFJtK5tgzQzIAxhNCkcBMHRhrHg6shgJimvmu6I6qaSxFiYHL1cRwPhwNU/fjjl32I8zxLzoECBB61TLkWAo1c0XOZx2kucwW1kqbrzW58d7fC9Pu/87vbYfPll18CQKm1NrEgIGIGACYKISCbNVjQYSkYEYEAmAyBmTFww23WKe4uL4Y6uWoIIQ2MiNUNA3sgw5PQKRiDJ+bEFMB9IiNsQ1IExIg1kIG/u7t9+fLK3c+Dr9RgPXIDV4R14tB3HnF7fXlxedkPQ8OjzEya6CsgAjjxOWcHAGi0FDWRJqOjJ88IM1FHo9NEWFtg9IQUtFCbAFTkPDbQcc8htDnlljWptfFRk5zHaWrJS4wREEE1NKCTYZEeYCQCJIAUYoM12+pvSD+eUNXzTvjGEfZ0SywXoeOJTN3gLKsqEhmlWgqBHcbjMRlv17suxCq1Ooh5LmWa5uNhmsdpmqbOgwEC4H487sfjq9t37/cPr+9vP3txqZ99t7t51pmBKgLsZX473d/VyQm7oUciIMdI3Ie07lrOs1AnEAMSIBDgrBIADUBd0ZljEJOa87BaRw77/d5cGb3WOgyDOjhSriXXMjECobrtHx8e9g9rCj/86NMElI+j1uooFMOxlINpXaXM/pjn/ThLFlN31TWm8f3ji+319z76JEDIYzbH29tb6mIBE3A0ihGJCIEjweQTIbpKcKc2GonYUuzqVt06gCL1YOMQtpvNZjuvNFenGLtuLhVMKEVxm00KtE6wIUKzP02EIgYMjhaJnWJFnU0Os745vL+275svZQQApBi71qhHcKZA3YqB+rS62K63G8DT/S7VABqHmplrtpbPwJkCJNpmxO3MmXZwdVVDRBNthYo3kf2vM3nOeI4/Ea8+owgt1WnVMzMvFioAXdctifqSAhFFJCZuiH4gQofEgRBbb6ut/lMBjudj5Temcd+4DCE8+X6rL60WB2so0/Fxf7x7+OzZx88ursi8GBbzqi5itaqrMYVhWJf9zCF0ses4iJS5lte37yat03zTxGZfbLbRvcz5/eHu9fTwKNmGGEIwcEET1BCMVzGkBKauwKaLNKlaGwxrxhWmRgjES+H10fVFF3l/N7X3q6r90O+PRwOcrVa3qq7EuZS7x/ti9fu7q482l/P+UXIhDod5pOB7kNLHzP5Qp9txPJYcOKaU0ONKakjDjz797keX1+9//VpziTFmEzSsbhUMnRQ8UqODe0VAtWLOJrQsqmhuHqhMea5llaK5TXmcU0ohPttcvD28FbeaKI8ZiCPDWOukVRwEPbgHOtWjBhMjg7NpUHS0UeudzPeWx5ophQpiADFGIkocyAHYm08NMFKK3TCkELH1lQFVwdXRzRyQGNSKafsY7URSMNEmYIFL1w+annEjvNgJDmpcNWa24MyNDOwmH2B+YkKgNqmLiMBEGCITaTAzBXdCJ1RVJ2zKYuQQ2ifLSKH1Ypfxb+RTDdBKWj4Lwv2m/Odp7P/G9x3NkBbNG1c0bXr2gBoJpeT9w6NXu9zudpuL+XAsAAqsQGrohkSh73vcwlFRbJpLXZDswEhBzF/f3kmjBn/3e9uu3x8Pbx8fD5aPllfrDgMLGqHNVgiA1oFjcAExJcDgaAZq7m3qoo3agIE5izf60+VuQ+CulZEAzBAcWd2KWAF3wix1NpvrfMzHruu/e/WCah3vHgmcUjwci1efA9aIjyZ3ed5rcUaK7IRuuPPuD37ndxPxu1dvGPFYy9u7293N1aHMBa2CgVsVi0ABkABr14yWnBwZndENHZiwi3Uei4qgJ2ZUq7XO8/j85cWB7/NxnBRyrRhTtfpY5xnNFzlK4HZf3dVsjgHdAIxMtcIo5a6MdzLTbug366pS1OLQEwG4TuMh9VytdWiRILbV4qJtoLyRDlUURNUBES0QEWmVc45uIvM8mxmc+Bq1VsmlqYVC35vZskMQIcb2YkspcIrCCyB5Lk4bnxfAwYGQKTBAVIld6ldDrbXWqmYulZlDREZEbk2u1tYFbJVx0yRrq78BQUsD71tZ/vlM+PYJ0KZYlsMOzFzQFcAYIJe6f3ys03y9ubzaXQUkE58QCgBQaISTSKGLPXY00WjEAoJEMXJiBqZiXkTK3X2MXZeGy+3qOB7GUqwj5EhddERDULTJZgCkTWzCRO5OAJGZAAqgoRn7XEsFcwQ3UzMMPGzWm76r08xIBKK6lC6KUN1ETQmy1MqkbuZ2eX3x0XY33z1aqaHvCnohmqTqEPdSH0oetXCKRIyGqopOv/W9H3cUy5xzLloqhBB3m9t5LAwKXsHdndXIITkSoPTB0fBEXXQiYHQiCBECG6MBABOn6IRF6prT9Wr7bi6lFEX0gBm1MFR3J4y83GICaiSFnFDVXM1NxHSU8ij5qOX68sXm6uLoMtUc+s5UXbSUKg3CcjfHoArmbTsRoJubSM1F5lxzATUA6LdrZm4Qn4igQyllHMeWvbR0o1WxC1hkCNqcpBwJgcAV1A2dTNzBgQnAqeUryMS81AcnKZelvmUmopgSh9D2jzblh8iMiIGYiQPQwvNxWECh0+o/m2J8gyd3/uJvqoYNwZpelotDq1Sacpk/3N6+fv1m060//uijGOPj/d6qHMGQmRINSCvRVSmlCBpSDHGILbFr/cJq6lKdSQhe3d85wGboU4C46vrVKsRAHKoUJkewfZmzYU2gp64QIwE2XBAKKiFYzeaOgd1MRFahu7q6IreH/UMiquJmFmI8ziMygVFVqSK5Fg89RY6Br64vItLdw2PoohEeSvYuzZoh8DzP4zSp+9D3AUlEEvJ2s76+fvbl578kQA7p9uExrYa02Xz55te46oTAEEGd0Fm9AjK0s9vRjQHVnV2rM7qqqTEqQjFhhY6YGcHBxvmiX03pOM0P3iVlzK6WONel3CTEipSQDUNAPIA5uJqKSFWZrYwuhWF1ubt4flMe7qzM7t4ogIQuJszs0KQGciq9JmHzWqqIlDnP4ziPkxZtLaYsmjio6jiOLTWvteYxMzOYGxGRurvWpsvC8zy3JB7MAdBEXc0RUkruJqpQQePSz1WzEJK729I9PE9GIDXmJUAIqeuQKDTANBDgsu5b4G+R/uuL/ullT9b6N7bBt1c/LIQWMLCW9rgrgjG4S71/uH379s32Oz+8urg0hfv7hyGmES1wCqHrOWxUc66SBcwvLi6O4Tj6IZc6S2mMhhC6g2bkeChl+urXXQw3V5c3wzU5xBAVMFfBQGJmZU5mE4lHdm9KcBgAl3aYO8Rg7oIeA5u4gccYLzZbF50Ph+1262qqGmOqKhADEUqZq0qpNa2GFEKIvB5WOuuY56vL7aT1sUw+JKvkSGLg1Vi9Zw/gWvxqs/rk5uWb2/fQxffv3hvC+uby9vB4++r9+ubyIU+V0BEQAdUZPJqzY5XS3EnJidGDIVlFIBGpAIw+l4LqQ+wgITnm+/2w3gwxAYADCPhsEkKfTUihuZ0wcReSRXamo1VzU5dqtUrNroWgMqTNan2xuy8TTnskB8MUI5gbSWvMWpVSSs1zDhQ4llK01uk4jvsxT7OrMXNkLqWklNx9nmdETCm5WpMzbRVBIyQTICEF4nnMH2pcXwRVVNVXIKpFpXnch1xjV5m5X38YpkFsdE0ERGhuImZEFPuOU2xQaaBG8GvJIACYQxuVe8KkMzPEBfkJ4YPIs31dQfJcAzzdG4qOS0NOEBQRiJrllf7iZz8Psfv+979PgadpcoQ3727Hq3XHxCmlmNbST7Gb+x4Acs5iVXTgTkspZa4mTVuqE3UzYbdjnkYpYTNcf/xRllKO03ozWCn3D/c9EzDttUwmpcwMzu4lZ1cz8HmeVJhT7CkVqfM8E0Af09D3x1/dDinm8ehoiP5w2AOhA+RSWitqte7nkqXkly+e9yne3x6G7eZ+PEyg3sfZvSJOh+P4eNh1Kwfxw7zut9cXV6uuT5M8uIrIiH5/fy/3t6HvZvdfffkFrLrKAIjMPHDsOTSXQQNzcxRjxBTQm5RTla6PbMHMHCF2HTqWOSNyjIRV+9SZmbiFFDWXLobGhEhE4dSvBLUZfFyxmY3HY7uB2VWZs8oPfvvHFeyjTz7KVuZx6pgOh8ebq2sJYGZVhQN2qWdmyWWqY8PuylRcDcy1aJYZAAwWQbQYIzO3os5V51xqrUS0Wq2aRmDj5qSUcs7axpUYkchFtdajKgUupQh6v16JSJEauiRuZtbmB1rLrP06f6Lw3qho7fwJZ7nzb4T8M6WHTipCyIRMT1Ogc/g/L/qn/+vLRKW6m1l1F8ITcQJ0HA/dkLaby241OLKjioM4HHNxDslgwDCkbjv0JU+usupT1VRMvRTyQCKEIaRUfSHQmqMAFrOpymGaYxcQYZ5KLTMwKeNjmUer2EdU4WoByHNFNwPPHDCwgDaAP3apYa46F61FRcwFCM+9dUGt1tBnVRUOYT10qRsSwOy1go5mM3tFzqDFrNaaMMp+vlxtri7WWHWl+Cz1IYSf7d+WUnKZJrQxz1pG3gybm6vbfKzgCsBq6jZjjQpo3nNnqmAQoBog8/Jpk4OjI7ie7UQBiSEhM1Cbr8iNWGSmqmhOjnBihipidVf0LA2ZbMZQKOgOzn26fHbDXcDI/WqYpqPZ0oUFJg7ce3D3ELgB/6SO5k1CvnF0vYqJoAO26UEDd7QIjUlJgKVUN8PWNxBpnzQzx26NTs3EWFSJmvwC5alg4FIrphA4dUPvTF3XTcejqYGCoKAhNaUuXHzbwYGc2gNAARxCw0oXXN8/YDi0eHU0BU6Ek4FFq6/x684F5y+eLv3lC0R1VRFwBQRzMRdVub+/22xWz18+71edoiuYAgrgMRcMvJKBun5InW82KtXAspaCpowhsyOoakBKXciTeFNvQVDHqn6Ypvu7w7OPrgh4Hqc5T10XqteH6XDQrJE9MjiSwoJw+QI+mFoFa5aVBO5F5sPRtVo7Z8+sQYSlY+5N60qZaRjWoe885+w6WR3NCqMiZrMiWquuQucVL9L6athQ0N12O6R0e3t7YCum3HcXF7tVrfcPD2MtMBdEdrBFl8WUXScxNK/ooAbuwTlGT55CCIgAKqjKy20wBAxIkbjj4ERtXN21mqq7WxF2DIDBgMAdvJoJIoCJNC4xmQmGgIAGsN3tbp4/M3AiTEMCAHWLMSBBCBRCQMTGOkNzFzVRLVVz0VxAQVWl1Cb7HhI3tA1Mg2HLvMnJxQOHxClyIiAAZA7MLFVdzMylVLETaYXQsrhaVYkhxJj6YdUi1+HxsXXH2vJrr+1MlwCARqk4c0s/9Asaf2hxyCE0PLGMARycAe30pPh1GtO31/3Ty5oSmxayikSO7dOYHw4P6932+tmNM021NNktC1S8inqtVWpepbSJXem7qv0kc0GFAJyCgmsVdiAiXhzpofkyiek45bu7+91us1onFZRqsYNc6v1xVHMNWAkIvHGjXLUxx2vV6moMBKiqniXDXOKcXBs+Qsit1cqAdvJ1JUB2IIM+MCNO+72jzSaTqoZkCNUhq6m6oj67uOwo1Lk+u7zY7Nbv3r37+Ref/zSVPM8dhY+un11dXl6nRPvH++nYDV0TaW8DG6dJZZNpRnNwj8CduTp0hCEEk8qmjoS4jOwReGtrukPkkFLyqaB5BCLzgQI5MAI3+25cHNyCo5kjh2rOjUEX4fr5s369OpSRCChwU4YKKYYQanCOoSGbWlRMVExF6pzLnCUX10btdFBb6i4zdxM1V7OTZrhWafQqFwWGyJxSCl26vXtsK6ZMs5SKACmlGCOeZspiG4FgrtacQhHMVVWQ2pAWsIN5mz9p67bWqlUAAInDmV+6LGJYpiLNzE9aCeRtTgMXStP/1jzy11KgE42JQIOjuRXJU5lUdbddd5tVNvFirAGBNDAqOZqqSi5qwARDDHPiLlGHUYNjDGamc9Fc3DUiWFv9tpxapZTHx8f9/T7yjoFCSKYwljqLYmBhE4bQjNvN6PT6RQQYlmRR1Et1EtRmF2nwLQYiAJgoE6263gl7DIg4lqJE1a2iK7mAZRUxAyCptnm+42rMtLm5enf37i9++fNKtnl+PcxFxnl/PDDRxWa761ciok2tDZxhEYppAUhMoWUvLmfv+aCBCTsAbDoCHIItTcz2dkKXUow0Ajp0IbIjU2zdJAITVEU3AkcMwgZIgbXWSAzoCn5zcxNjtOJmFREpMhkAE5DHPkZiAGqKKS3jJwevAqJe3d0ZOBIiMQIRwIdRXW4DvGQANZeWm+Vp5hj6vmfmoFaLmmopUuaapxkAXFyTU2AyJApMkSiYQanqRQJxBVRbXkn742p5mu3Ue26wUoyRkRZdLPh6Em8I4EZI7kaAbQ+7t0Gwr2U45wPhN+c/AP7BIKN9PFrKPI5j6uNqs6aAuRZTZgMyUnBgYmYkl1rUjCMHgC6GVddl1ALkCKmPXZ+KGoF1iAZQHBwX8xJXyHN99+49uO52mxT74nNVhRiNwTuCiYEdeAmW1up7IGpqNu5mxmrsEIEW315fsKLWZDFEq6Kl9imloS9SImIAToZHOgkcuFeVWVUcYuAUE4SwXvU3VxcwpJ//1Ve/Lo8//vGPmeTi6jKK1/sDiWHVDvmqWx1qBkBwrG3Q2RzUVY1iQAYwd7Gq7pZFlUNZdV1ibsVlxMAIbIuQh5lF4pRS498nYnYECuxACApKRIXMCZQsChlAoDD7RMCEaqZXuyszizEWr1W1CbA2HlIIAYladGdGdEbRJU92IIBlWBCgSeaIZFADNTdrpOCm8EWwaCwYeJQYOZztI1R1gZhqJYAM4O5sMRCeJ0+W6oywQ2YgMQBtwwGmIKCuRVogPjOIGIg6DOek35sQYqOvPc12TsMBZ/fm80L/m8L/13bIBwbsotxRa53meb3bDps1EIkaUTD1uRQTs+AYmJlJ3NWQIRAG4tW6P4KgZHcPkVIKUCOZA5G4kwo5REbkQESu+u7NWyslRt5dbXJG5JhWq1yzkwGT0+l9naZ/QgizLdOJ0OACBzdbiLOEQIRP3lf79npYXVxdPj4+VFVAY8BqIm6te1OqzLm4Qcddv1pPJV/udpr4529+/S4fabd+U48/+fwXuzjc9JuL2G8poUAXYz90Vh8QFF3Itbpqs9lWlHCSKScGNXFDVSQy9XaCdSFGClwUFim2Bn6HZrcI5oER1AMS+9KTcQJmlACAOMSoqiGlI1IiVnJ02G63j4+POIRc6zSPIcUO0cARsVHN2lgFM5P5bFVyaXPkZoZOhIpIhoaIWpvbpqs7NqfQReWMmsZsw+xDSIEiGBKinLRMCJZBGYHKMRKgtTlYkRbOQoxQ5Bzmz5M0bRjgzBItpeCZVn1euOfVjM2+94kd2Deu35jr/1ce5q5P9oaqapW8uXzWdbEZUQCSgpdapagRAAAzszu4EjIBBoIh9iGP51cYQrCgZO5UwUwApQXy1oF32O+PYHr9/PriagcAHEMHMNXclqadB7/atJUapSBFpjoDQDBb6P6lpoROCMjAbG3ux9wJUkoxhHU/bNebeZ7K4WBgYHISCgZ1q6ZFha05RsJ+PF6V3eOr/Z//9V/QKnbb7b/9/BfPb57PD/u7/R3FTddtqChE6YZ+iAlNgBAEgMDNqJ11pTBzxEgI2LgAzMx0hqSZmZCIHEUdlvq+kV7BvAnmuhlToKbShoiI3riahF1MgpJaoh2jAJDJMAz39/dr3s15nqZpM6ziaQBwrhVbquxIiK0j1kDGVg27my+tKFp4b9aoPYt0aVv/OWci8mVghxqbzZ9S3JqcxImLmkIIIQgt6SsGXmbWWmkhCuTGKrY8w9JoE625lJyJSHsx0WCu4A5LMoZwEo0KIYA76WKUgIBkDiae+lZKEDqGsOjJmfqCpSyh0U/Cj7HM43gQL7xa7SPe7ssj4Gr3/Gb7oqtd8KgQsnpWnYNMLhdhYJNcsxgE5hmQKIqh57L1XXG7m/ejWOiHGWE/jrjpQMGrR2k4KLATOTtscPb71w/kdP3RRWD+6u2rZ8P29keEf5U3t/P1wWqGB/T7zu+iq877+Vjd4noYqbzLR4fwbDtM0yMQCkkTaqwBVNEVymH+9OrjPvXlsaJ1TnZXsoQNHaaBk6KOY64iEWmIsY2Xv9heJqD3+2O6uH7j8y/ev4Hnl6vD9PH11eP7O0F5O92xwQ+/+/2Kbk2BoXEhEDzEAj6a7IQMUYNlIonshBHInDqnCxqe+7CaLYF3CCGymYY6Q+Jc9jTgne27IU11ChzXZIPRCjg59MomKmbqOoX5YrV6eP/+puskwOvjw7Pf/V7ZQh7v8/Fx4+mae7ZUV920Cl84Ps9cTYuZM6aUPFFmnhwQqM2OgztHVoRSdV+mqBGAAKh5goDawj6IAQCIIaUYIlSdoSgRBQwKisgCkM2IKKTYdZ2EoO5a1HNNEC6fD6FL5T6rLXowoMrBA2Itdc55t9u1DDMYhNgxM1Z9fHcbfmOM/3av9ynSvwROXIYJ/QkXaGmmwTJTa2bhVB6om4o3Ll4r25cOmrmZt4G2Vu0tW44QF8RgOQEbfT+cCNtNNxccHNSbjAAaGJqZo9daZc50h84WBu63/W6zGTbDRx+tDr96VHgUcAjsaApiqqC2Dl2uZTpOQHhzeXU57ByBidRNRLN7MZX2aTi1l1RUilpVAUJAzCZz4gw2oo2uEzWgrcUYmKaJAOfjKDIDSlQAA2EYpUDkIkpqq5CKyHa9Vjes+uFeuDeaTYODDFzPSSgCgNHCV3eGRaJvyZQcEJfxqJZSm5m6GrAZKGAAAEBHBMKz0m2711Uk9d3F5TW38SsDE1dVq7VWEgFBH706oTNz46Qh+TDAVsZy30Tn/QSbtF/dPBmWl7FIthAyUGQAIGeAxVVgqTAdWtbu7q13xjHC6SQUU3ef5/l4PAaptVYAaSygxrag02zkPM+LN9dpKrL1yL6ZAj1d6N/o6fopvzczc1koNacxl1O/bAn8pxlNQW+2b6iqbdAREds7cVqgVfWl9+FPusuMBCcfETRnpBjjquvn3AxGreMAsVMyI6wu3nzrW+EPYFIOx4N6qZq7Vfzs4rNn18/X29WLupb1r4SgBuQumKm0tSW2jf0kXo5TXPcvbp7dDFuba2JWMRHJIllFlpSBq6q4qdRDLrOLBTbGqnLoeTZ/FNmTTm4EyOgFfEVUF0KgB4N1CmvkPBXvw6S1X/f58Yhgmz7NJjeroariidmLBs27m2xh8jWMzkHJAyKgQ0chnoZ7AhATBMAWpJjZiGqtjNRSc0SrINFZHZQQms4fIgKRoYhxDMA0lbG7Gq6fPwNCJwSBJWtXrbVK8QI0ljl0KZy7SUgxRu97HwY1JFsI/e2nAzoqqeripNWWWyOdqfvCykYzM3GlZWxGq7Td27oZ7TUsGwnczHLOx+Mx1aqqnJag2TL+M8J5PB7PkfRcvobWJvhGsKevr/tvbINGn0Fb5NQVwd2aJRvSh3Fb8cZaMjQDQgQqppPkooKI3KUmPwvY/BFdzBS87ekqUk3b4EHTWmOiSBxSxJPEF1VKyInCbKJMrg3RtMbjbJS9WquPpqD7+10A2q0v+i4pY2HwiNoFRygFXCFxwGIr54ixUKLQDamLMaoYVUImUoImImRu4OTNjcXVdK4lg3okAc+uE9PR9RHqwWpFC8QBPbvNJn0I3EXYg7tvutWmTI8P7+L2stSpS50GBAXqYgXlLmEOoLV1H5osIbiGs5LmoueOAMZAETlx6JgScUKO4BGo+VsxMofoFEsR5qi+3EczU4MKQBgMrIEchsxEbk4hOOM85b7fbS8viisCRyYADBaQQkFvGhYiEroUTwoJZ+2qruukmrX7jQjgbM7GQGgNF240eSBYRJkWJRgV0+Ai5i6IOE8qIoGYI1FgOs22u5/CbjPsqrUtvIDBnwwW44mk0zCl83s/f/E3ngCn73zTALDpWMAHSZb2eiyX6SwV0RplyzEK1nD6RpxU1dZqVVVzD8xG/GGnBZYquZaQAyUIjngCBUIIwBRjbG8+zqHUWlXAx4ouhaVhBG5uviRCos6Yx+nxfl+mYkXGUv/67riXaZfYO6i1ihswRYg4Caol5N1qbTFaqQVzcGhYBwZOkEQRlk8fOAYHENNsMoGC0mxS0MWtqtSiouaETmSAxfRYteMwoz2W6fb4eHO9Wa3X/HA7z/M05aEr4hJxGdEBPHdalIjIqSEtS8QBt/b5Q5Mi9gTQhzBw7IgScTALSIGIgBMGirEQ5WlmZnEHJ2RyRTFQNFH1gO5o1OxOuFputsQCGPohblaVEcAMiVKM2BFRRUevLo4AoRETY0QmBzvjkm3GUUQAUWFJClSwVmlVcjudkBUbKw7RAJxAqoNXIgWA4zG3mritQGkb7JRTnfg6HwwEzKBWneciokRExO6gag1VWXJJW4QkRGw5Ac7gZrvOy/3bR4E9HbMHa7xZEZnneQGhU4NwFgBUzaxJdJpWVT/JeqkqATg5ETmauql7QESiosLzjAA9x3iaw2Qi5hCJbb0hxCGkqeRcy6wVXUsGRHBXdUED9OZ8AURUcnm8vXt4/xCID+N+erPvyyF1lGdv7m2ByBE8sGR1cIwBA+ecU3XCUGutLeASMrA1nW9bToUqZgjVdJrH0UUIaDbKSrmSa0iBHRFA3SClo9UBykTyaFMPJV2urvHF+4d7AMS5tuXrZgFpnmc/a6ERNCaMmXnzHDq5AKEbI3XICWlg7ok65A4xMgfANtlHuIjkTNN0vq1EzR4X1VDRTc0Y3cEc0Kqpm1UgghSwi57YE9daeyYKIXFHgMWUXMEKnEiTkQMFNjIPpqc43dYrEjk18wh6kk237gu6n3otgItQF5orAIiBz/McY0wpnQuYpTY4uSeFEID9LPhjJnmayjS3xgWcYKMQop39Xn2REf/fPgH8298RNTyLnriYziXXmrPWgI62WGjAWfe9qTi5VdPTLlreAH2dcNr4IUNKtZS5ZACgDoiXkN+4e0y07noG7GLqp2maprvyYESMjWX1YUCuHX+J06TH/cP+4fYO0d+9e/dG5pdYn3VYTaBUNgiMsyoEmlTnUuZqMfYXELGq5pJxVvcCVhErLLSLFrpm0WqqCFnlIU8zGnaRx8pzjUV6dmLkQOjganHVHe7vN7BdP7sK5Xh33A8BNpcXF6EHtZLzphvIvIxzTyHvj37SCSViD6Zo7bf7Mq2EAMZIESgiJocVUofYEUWkYB6IGJnAzQGRVOU4jZZIxPzJbB8QVjAEdDc1UPLoERCqKThgFyvBUQqFlVMACtCU0BwRnGRRzmwOI36CLFvNepbyNLMmrLbsk3gCCb0J91IzmXR3dHTCFsIXnUt3MJdSW8usaU+1UgERXURPCigmKlbVDeBUT7qfD5mm2nau78+yWbXW5WSBr1+/Mfa3S1UQ0duYiVnj+cy1uDu6VCBsQI5rU3ozwnNnFE4sRWwNmiaLC8inhh+o8irWWovUIFyjMlJwFjcXI6IuxEhMqWMiVEM1FuJG+POmiNXEXxv4TSEEApjneTqOXR8Pj4+/3liXcA5cVFKRCGAMVcUjVcA7qceat0ZXYRcFbD/NfTHwAlYdBNvEBgECBbZS1c0Jq+khT4WgHyJWAzESDw7BkJvlmjm6Ph72z/X51dXV2/vbN7fvcp6uXjz7o5ffQcT3b99t17tSyrvpzYa4ToVSgBauCAFQCSo2kY/QZIzbRxcdonsPkAASYHIPgHyy6mmCa+iuADlX4WhIEBZZAwdwA3AyMAVTB3XoAruTZAUiCGEGfSxTb6nrOkc2QwFnaHmIIlhLkpcRXiWtUnKex6nkXOZ5SXUCg3N1LyKJe9BF2o1PPlpmS7LdHEFbfdn6BcwoIjJLCKEb+q7rGqLYigFXPWf2C8vN6pn/3I7NVjrbSUUCnxB/zIzOk2NnWr+dRjDtdC3Jm4iInDlAZla1ZMlVRNyq1SJSRKqWqkXbcmdarI5Mz9vR3Wspx/2+lgLmhMjMXde1UetSCgbuug4IpWllmNVaAbG9BndnpI7Ctl/d7C4/ef58SJ2ppBA2q3Ub+G87d7fbHY/7aZp++L3vXV1d3b57b6J30W51Ptbc9/31egtTmR4Paehn8n2C1zb98njHu9Vqs54fD1ehV7eplqnkLFVOsQ1xUXFrQAQxx5SAcJymo8soZXZxQm8PUw3Ex/3h+dU1FsFp/vHzl791/bJ/zG9/+ou//l//3c/+1Z/eWPyt6xdv/uKnn+yuByeS2nGspXDkLOXheGieaNoGalVNNXJYpdQxdYirEDZdt+GUkNkN1E73q034oxF+/Omnx+OxSO2GXlVLVamm2mTIwZGbsnERLSLAVFwPZf7oO9+ZTGa0wqAMwt5+6EQdcS21i2ndDwQwHcdxHNvi8VNftU0/zvMMAH3fK7QusCGCu4lUMwshtK0CgO5Qq5RS0SBxzDm7e4wxhADmi+4VNNfkNna85EW4sJgDIjdzHPclpxIxVWeOXTeEkJZsjAJzDOcwfw785wPhhDPA028uhxmgQJNZRSdEbl7q5GTiiIaO3hDgmCKYorYKRgDNatVSQaB6KZwBo9siPW1qFVo5j00GoWXYCICIzSq4YW18muRch9X1ZqdXdY/HvU1C4mRAlPMySXR1dXFzc9P1sUkRQwrcI4CVaXbRVepmpAfTuzKOZAe20lEN6IShucAxtUOXiJcGOS3Cj8wcyKNZa2sAiLt/JXMBUUYiYymImIhiIEbSKSOmq81l38WL3j571nUh8mzr3fbZ1TN7nDYQrvvh3cPjthsMnBwcQJuqBbgiAEOn1JQKAiCpB8BVDKsQVxgSYUIgA0JvSnCIGPpuLHWvkwcaNusxNmodNt1xAwoILftQR1tGmMARxR36YJEtBonkkR3ZhUxPGJQrmBAtsRaRmBdbg6Y07O7iJiJKwMCESIERNQQySi0Amxu5E+GpLfRBV4EA0eH64rIV0MuOUtNTGbkoqSC6KpyE35iohezFXzWlFGLf9eM4Nll8dJBSWwLWpy4sS/+JCTZ+i+X29OKlNHB3X2yfyMERiFozSkzNLCCc4NT4IZdSdVcV0SpQnY2EC2BWZAKgtgGW0p6B2bHJNGhrCCiHhjlQJCZuql4b7eniJmG6C/sId57dxFVdwaXmzcXm5cfPP/7sY2JzNyLcOH96efWd57R5e2t1cnchP+T5mOcjyFxyKeV4PE642bQjsDkuwrIW3T98OIwYkBmRmQOzFysm77jCgIgUiNGVDQix2YzrVBDmrfGzMDzrjXlzudu9L+PV1ZW6/fQXn3fI69T/8vGLzfNnBcDQml+QuimYgbXg06aig2MAS8gD8y726xgH5uBIvnSXGiskdOnh/bu7PBLzaneRa16EQwCRG/4PTsvdFHV2ZHRFEPCwWuOQPAWLbF1wCIDsYiraJNxcDZldVEpFCGdARtpUgbuqVlMApFPGz5EQudULItJoMvTBVtTbnLb7Iqbb972ICGCWxdWcEQ2WxMTV7JQ/uxmYyQnifEqpWIrMD4Wy1VpjjH1TafVvLPMTVHz68sMChmURYJs8UldxsWUKyQCgmpIDEzhSQCLEnLPCUhAvzlxiqAbqIWBEAgcXMdE2vdzALEHEk1cCuBuSy9JvZmUiwrDIjfbQpXUawtDzECG6QB7z4fBIBMBw8+zqe9/7zuXV7v7x1snSwDcj/Pji5rc/Wm++tEe6n9FKIy3nUuoMU9b9eHh3d9CBpUyOxbW6iWkTTG8gBDclEUdCOEvIuFopuW765rBk4MEDuQGwuIsZIIhIGacQh21aBZA4ycX1BRG9fvvuzZtXm92WY7gbH7b8rLV4zKXN+Dk6tBavA3MMhAhK6l3CTeq2fbcKISHSyeTKAQwJEEfV28PjXgps+9R3VudaNYWIRg3LwJZ9O5qpgQdERwL24qXfrDAFYRQiC+SOKlC9JSJqKmjm5DlnPx6DdRxPZm1EqlV84dE0hxsgNIRh6JogCiKFkIhARGDp3TgYnS1h2nTY4XDwE6ACAMDk7mcGEZzg/HOwrlWZOcauHcuIaAbzXNxRxE7ZFIo0U/sFDlue7GkKhE/A/KcnA5gikbopgnpt8d5ODjboTuAOhAgtCVYRp+YhBKBmKlqrVUkY+9St+sEoFq0nk23UE0jaxj7QHDgAg4O3TiQiOkACNzIiGighc8ceILGHmuXVV2/u7t6PeUb01Wa1uVgbSJWcEm02m8up+450q6PInGfQif3gcshT2R9jrtfIKxpWgmRe0R+0qLuB+5kM24zEm5MfBSdK4D3HPqbIjOYBWv2vhsAxJQ5kUKsUotCFg9Qv7t+sUvzB9YtesRxGSfzu/u6Xb77KDB9/+qISCAAPXZ2n6o0zXF0NCRahGofgGADZMRGtYrfpu03fJcIIxEDnHoKoGvjj/bifxhLQA0n1uRYH6mIHAGcb+TM05+DqRkjiNlXZ9J0QzCZVyuBDQVdUba0OdWhQg3kpRQAiWAd9O6D6vj+OcwvAXdcZIafYIvGw6s3VXJkhcOSA8+xaFMndcNEoMySAZiZzOOQzkkMn0eXWAbBTPvw0RT8/sl1womCcIVQ82ZCZWSklnKuHD8fAU1rok7PhfHyAuwEqWdusCm5oKq0kMAYkA0EAgOBOFJzQ1RctJCleK1TpV6uh61apEwyzQucgteYnv9TMFLGiLqU5kaNDPXUxVSUEIho6ZgxI5qGD7eV0VbebDQAcj8fVpuv7HoiO81Ghrnfr9Xb197rPdm+n4y9eTa/f7mu+8/zVeP/23buu+k7p2e7SV9hv19t+VbPtjxmZEAgMTpp4S7Ros3kU2AB69pUNQ+6Ped4VVjOthg6x09gFB80qFEPoYNI6jXdhHIYXV9ebdQ3pV4e7N7dvXo8PlzdXYbf+6vYdDB10sU57bYLy0lxfOUAjuzMTBcSIMETerda7Yb3phuSUgIIjEimAIYhKNnlzfzdJ8W5Qt1xL0zsyaw14aGOvTXITgdGlqpOrgM05UxcFoLiS1gqqzgpo4OjO4JHYCOnEzWhhm4hijLxa5ccDqHk0IHREioFCACaOxJFYkRtGDBxjpJNtipu7Ay97uIXghV6wMC0A9cQ94ydevWALw3fVDa0BUUXtBIAGpFJmcgBAF6VIfUwiUmv9cAKcVx4+oUafO8HLDnA/jVCZn5xVW0LpCxUIAEDMEEHMzWG97sxMq+Sc8zyDaUTkGLuYAjE6EOFqGDSlcjyOx9HTojqxPK17Wbj+wQGd7AxPLZ0B5RDdDKxKCvHq6uqTTz65vbsrWi6vL66fX3d9nKt3Qx8SUcDLN1P9/E3+4rXPOaPd1fmhzobwYnt5nfE72+eKUBJ1qZO+zLUayFNUoMXJ1jFapvIIA3IX2yEQvtddmFmmOecsRRGkoorpiIjBrYdjEZveTa/wsl8Ht8f9OM6j9GHz8fMHyz/99RdxM1ig7FqaEbgZmiN5RErEzYQzMEagdd9frNe79WbNIWRloEjcxpkqeil1Kvnu4UFjoMBHqUUlhIAhiEjC0Ih0DWBoydXyHhcyi/XDEFPCSBi4mgotRorLSgwE6CzghNx1XepCSogNqKeu68jBxb2CAXAIFALG4K4hEPQRnVqjjJnCqkfMWrkCqurZscrMiCLiyVDjSZ7SYpA/6QEDLFN+DbR8mssAQM65QagNpGoTZ7XWD42wb0T68wnw4XzwD0fm16oDcrRT/Q7Y/kVVzaGZDzb8Nedcaw2EKcTEIcaI5iKCHFNKa4f9nFXV/YPe3cK1VlW187tF87NwnbuP4xS7oRnfxj5ebLYfv/z0OI5Z8tXN5cuXL1OH4/xI3NWK03z84l//J749bo66SolJq0LarG4uLj+x4WqvL7rNoeYHEVADQk9sVsSsUZKWz9pBXQER2VCxyTM3t0VG/HR744TzPN4+PjzM+1LVWR08q0AA7LgG/9X4+Ouf3/UIQ4gvVpep64ftxcXzm1dvXr8+3P74+W9jCgauLq0HDOYI0NT9l1ISMRF2MQ3DMHQxYqBihMTEbVofXQ1cROaSQxeBSVXMLHQJkMs8eeRTvPqwsHB5foYAFLhbDbHvuo6w65owrZ+CMQNSYGBEMAwcuq7ve0rRzMCQAqaUQE1YztB723taZiJKKZmqzFa1RE5937sYel3IAH4W41n8tc9lQCOctvGXbwSm9sU0Te29NPpMK7XbChyGgYiaDGMTiG44ni1TBuePA8Hdp3nqug5DUFvAJqRG0zNDUAJg8mUuyQGMidwM1FSUzAEoADrAqAUd0DBU3Niw69frfo0OQxrcsQ/rrhsEvRNThzLND6utgYpqdXVyZKAIhKA2BeCOEiExgprXagQ4E1idJpA5GsQxMuGN/MiudvH7m5fP5q3tA1/UZ8//7a8++ic//+in9z+7WVQjx5p7sz/gNfnA1aMB9WHvR+kwEMx5b6Cwws2BM8IEcnTNaJaAAZNrFA9zvbl6Nh8eV+t+lvJ86O/flp/D7Uery6v16kWAfh4SwOP88MXD2wnCOym3VicEC5AuIqc+phWU9ZzrH//h3x7f3f/lv//z74WPXvDFuqZ6n1e03tdczGMamBkDggHuYs2yMfpOt/khXXy2jy8zPU+dzBD6bmJ8J/k4hD2H9Y+++7/8i3/GW069rQKgYsgaq2BMzAECZ4CKxoAIzSrKGEhyjZvup4d3+ukV/fDjr+QYdR0fpmG9idGduPRcsARxJmQgfLFu0zOBYuKADtmnaZ4FVVE9ACd2UZ9mVuv7/m1AAkopWZZ5lpB6RLp/OAxdj+qaVQ0MG0GGDNxpUbpoo2JNyRodiYKZtBROVassyGaIS6/J1KoVZnYzKbJdb5sH99CtAcDEs1QADE+30TdOgKfXeReimYJrw2TDYnesrSY6cQQQzNUavx8KECzcawx4Hunvus4MFpTqhIURkWttqZprGylwDwiEAcnbGddUgb2lggLkjiAgFdRn88hJLTBfbHcxdY9zfqjFHivf3qaHh+sly3QAcDM0P59ki2XqOZwgMFGCJRNDaGh5A6qWeARAABBCAKLEQYm2q/Wbh72F1fby2eWLTTjOME+rdX/10fMvHt8936Qjw75Mop4woqKUsun73/69Pzg+PPzkP/6HLa+vri7nebrkG3EVk6q1mgBQYuxDIiJ0jIjrlHar9cVmveVVD8ERUtcpghPevHh+PN4/f/n8f/rn/1zc+EmAXA728xens/1rhGCmKoKIu8sLM+MUl2wUQNWLFBKIqgEWlX0/DyJzakNnlQiJYow1FzUTUzMlhyKCtXbrDRF1MSnUMmdq3nhhyUToJD7QAryCS5VzAbCUAad/Pb+w8wkWQlA7eelRbAUAIjYq0dfQpNMV2t2Er+X9y1oXVT+x31rvuk2tiKm4YWDA6IynXI0AkQjdrQE1ogruXp2R3L3x5Ppu1fc9OoQQVJf6ps1TpJSGYUAxIm+u6OIVyN0DRrZA5CAu6GBgYgEByIkRHF3QqooYSCSsHgBWq5US7e8f3h32Orq9e5f2d9/jLWRpR1xb6+TOCymG2jJx94a0ACIRKAAQurU90HS9TgkhAgB0MQlS5AAMl5udfvHq/fxql/3y2Yu+71UskV9th+fXN3vJ78f9XjnnSsDr1PebTm6ur9fbP/3Pf/kgtx8PH3ddx6tuLPkwT7OLoAMTx2V+iJnXDn3ong+bF9vdzebiArtOHKvHoZvnsVut70p5+elnf/rLn/3q1evVzWXP6O6uhk/iGpyF9gFOZl2nNCDisWbq4ouPPiqmsUstdLljrTUrkCIDUQxd3w/DIAmaU0vbAK4msYQQOEUDz1JrKbjMSzmYrFarZQNQydPsVQKSJzBRIuKAqAs/QtwaqfHp6j+vaTvNtbR31LKa1hpqhJ8UF3MAdw8htiZA6+ycfxYAQnE9h3l7shMAwE0Yl29iGzJQoSzV1NzRQwyEpxOq7ZAFDXjSml7CCjP3faLU931KqSmFnn4tIkLgMAzDZrOhwxEBrT2HmZs5ARlak3gHMhd1r6oI4AYRCRGM0UyLa6nm4lA0iZdZ94+Pj/vHQVOsZbRaI4AowjLMsGgBALbeKZw6X+6AjhEpII7UhqqQHVtrFq35ayACqGoIQU0jkiNc9KsfvPzO7ZvXr169ulqt1jfP3FTnSYuZFDger4w+6Z45+3g4JkmXm8v3F1d/9qf/4c2rr767+ayRoL778ff+8pc/P8yjBMJAKYXIoU26kPk1h4t+9fFm96xfbThGI0QzhmwSt9sHyXMgLfnf/eQn22fX2Y1OI1GAi+LleZ8DgOHSxmyoOJgL47HkeLm9efniAE4hqJuBl1JIrIO4DqlPw9D3w2o9DIMGDyFEDkTMxIYYupSqpNyFLmEMASGFFv0CM3fDAAB96gSp61MFCEDokNVO69vccOl+nEg6T8N/Kw9ao5fog5JcW2YxhAaTNMZbY6y0dvWpPv5QNCNiKLWeV//T04GI8DSmCEtrQ2utnouaOQITQK1M0ICq1gZXUVVFMwRo3D1KBOaIxJS6EFvlAWcf3HbauDWL4RjjQKyu6qamZGqEaG16w5puKoGqKS6eeKS2iNw7m2iptUgVyKCGU9Z5nEyqYqQh2bp7GAXFAODsstmCf/vbALUxwx0YEQ3c2wT96TNBbDiIuxOxmZdSNmkz5cIEodqK4ovr3f27t2/m2/Cqf5BRx9HGvA3p06tn67DukNkhEH387GIY1l3X/ecvfv7zX/1lAH7x7Ob17TtOHLvwqze/riAChBQCGLsl5J44EX/C/c2weT5sLyhiFjVRZCbe57zZrfeP+/7Z8//pf/3/ZlcInQOT1xaUqAHkrXY8ff609J9O/yFUhkx+cXWxvroYOUOIVWcSHWsFg23Hfd/vdherruv7vk8dnLQZbZk1wxBC6FK/Gvq82ooBwGq1Grq+cb0mJDBj5jbfZVXJEZjh5FdgZg56ntxKKbXVdepq2dNgbSfW5zmo8xMDPHiSyT/htn1tqYdZ63n1+5mwiUhLt+eUvpupadGKtToCMIEqqFgF5+YBhnh60e3Ia9Z6FNFEASCcxxoMwb0R9EJIeA696u6QmNQ0q5Jp+2zdodUYiAigBLT8jY5GBtzk85XA3LJUKYUEEHk/HZtrYnXny3V8cfn+l48XpwRmoXGcCBu+HAZNSdhBrW2DVnW01U+OjujmBMjMrpJz3mw2bZLGVFcUxAX7lGf+2ePbv96/nuqYAJ5BH3eb3//sB8/7zf2bd5I1XV6Moj/94vM/++WfrXHd9/2U56ub6+Fi+8uvfnV/3IeUCMEMgnhi2MV01W9WXf9973fD+ir1yZBFzFACurNE+vzt66vvfecnX37+y9ev6Wo71iptItONAFrKTjnrE0zPvb1xbxKIBJDRcUhXHz3nLnVdLIxWrRaQaQ7OHlfDanNxcdGnxMwh8dn5vB3RROQIyOSBOMVuswohbDabvu+X2DfNdc4LlBQCEap6E2xsB2tT9GnqCkTE/KEIOeUwH8Yaz9f5MW25nndI2zbufvKBdoAPTwUAoaqcnxdOBH1sIlC27CECJyIxrSoRoRnzOGGttZhyDCEEq2Jm6BAbToV0GtMmczcTX94DmlvbIeDEzECE1uoHIqKecKputYApBQTApqHkACeAFBjQQZvQhlEoVUgUiRRNwRU9shfDx+lYtQjBbCVd7/rvwt3bx8uC0NKYdgLY0oJu0Ja1NqA5AmBVB6j+ITQwYBt5aFc1Vc3teRKxiQDSatW/+ORlWYe30/427yfA5Kjk//Sv/2wE/91Pvkc9Ty4/e/ern//qi788fL6G1cXV5Wq1yibPdldK/tc/+5mDBgoUQhfjOnS71N+k9fWwWffDd5SH2CcgNCOmEKIjTCbWxX0ZbR7/zU9+wkM/qmKfmsbtWbSnQQ5ykn9qf51AQG82PiVCGIarj58r43q7rl7c0dRlKgXYN5Zi7FerPsVWutlc2k0lX0TQVLWIOCJ3qSNOKXWrVeq6FpvXxIcWX2xZnaK11KIt3YWFSbGsXSYw/sZaP2co8sQb72l9nFISEZUPBhlnDu/p1D+lgu6hnoqDc6rj7vRE/dPM2oyjmIkZE4ZmRQiubibm+KSJhgsxpiHWeMLOzcyJTr09AACV0xJsfRUmb0PKAGTaNAmZGA0NXd2YvOXdRK4IjAgtYATUKmiVKmt0JfOAxjRnO5R5tloCjMZ2MdBLfrgIelA0AEc0b543eIKCWvhp4z6s7urobtyiJJA3S7pmVAgOIG4u2o6SyEGgBgpJfeAQQhCG0kVd0VhkmvIW6R//9E//1U9/ssa+eL2H2YFW66vvXz0bx3G96tYxWqDH8XD/eM9AVG0Vw82we7beXafVFXW70PUQniVmYhcDII4hxG4yG/NUTF7+8Af/9//5Hz/IPJqn9QXG0EXCWs+5QRtdhzaZdJpKwVNS5AjoUBniqusvtkqYViuaxA0bS1HRXbzpo4YQDA0C61GfFnsKXk1FpO97IEzqXdc1pXJERCJmn+fZXIydGAxBVUWrnYrCRaovcKNAl1n9SeA/5+7M3II6nPDDtm0+wP91mbpU1VLquZzAE+G07YFQpD593gXgM40xmi3tNJWlOOYY2GmuxaU0/8qqyqru3nUdOiySQU2/hEKIkSOBuTqVXKbD3cVqc7W7aOdbCKF1bUIIjmC1xhjZ4XD/cMzzKKXbrlfXF2qyW2/HeaLTNFRLWNvCnbwExmCkVms1j0gdq+Ld44OxG2BadTe7mxB2++N9/Whbvroz0z4GUte5DF0XUhprjn0nJkBAhu6uoozUpQQyWmM+I1OtuU1RxqTgGMOcp6mWYRju7u6eXV6JyEdxCGbjsJ685Gp3tVRy6jrtwjzLXMsr3zsA931/tanb9e27h77vLQAlvn24ffPu7dV2OwzDarVad8P1anMZh63xhYUNxRWGDfk85W7ojfjusKde4uVOSMJm/c//9F9/eX9bInbrdbWKgv1mZdPc951WmccpxhiIa51D32krqVqaRYCwGMfvJf/Bj/4gg/ZDp25IvBnWD2/fkUOesquvup6IxI0DAUMI4XA4TNMEQG1um4i22626DUyq3iwq6HT+FKtd1x1zOR6P4zTlPFWpMUbx6rYooBBzChERc85Mqa3+xlxwXwrONmBwRoHwNFq43++bcgSfPAHaDjlXCyfuE7TgHuRDjffEUAaxkWbhyXWSgXBHAET/MBfvJlqhLHkCYgrxvCNbgS4i8zS5KK63MXa+MOjAzBzREM1RxEqRw+Fwf3//cDzMoluHtF6bgweQ7BTA48LHpFYCA2rk0JIyY0EoLsUVVOdaikpMzOt1WCehUC56+PQ6/8WjzkrgvWM7ghcmdvuUwRwc3dEXbgmFgO6BiDCQg54IK7mWNPQ2zbf7h09fvFytVofDYTOstmPuQhfX14HQH83nOhtTiHXOzMjUg5ky2jrqEG2I6+sUiICgWkW3PoRViJvVpotxE9IFhJ3RTnCtvgHryfaHu8++//3H6fju4X79/NlE8NXDXf/s+t/8xZ9/cf9WIoVVF1JUVzOxkuk05+TueMoT/Kz/576wCdyAgcxp3UMKBcxKthGmWmpVMIDqXsyqSK2gAKE16S2F4O455wYjMcV2a9Ub/biN2fA5F+eABnqcj/f393mcAGC16hnD6AetzVD4bMLrjvZ0tqt9sUyffX1S5Wka/2HFPgF17AMy+QHuR8Rwfq6ny/1pYfE0YXL36dRyi082wIfpTwCIEUJkZmjDkGruXmt9fHx00avNpS+obWxJnkEzajgNMeWSp3kec3YdimoBBYMsroBMwWMbJAKFBmGDGpklRTCo7mAqXqzUx8dHySX12+1mE/puqhbWHD+7zptfV6ugzkix+es2EQY1c9PmFIcO6ASuCCcTiSX5CUt7EotpJPTI7x/uXz5/sd5uXn/xq92wvq5kSKs4xM7iYFuht9N+yjJXDCFQCkgkbB5DAI5qzElLPZYDG3iuveHQ9RdxGFK3DmnL3Q7iBnBw7A07g7hZ39/fa4rddr2vRTd98fRXn//8P/ziryoz9xFjEFdyZ4JgmjggYJtlDqdBIv/6qJODG0Lj1qdNj10spnmeqstYMiKzE6hbMZlFi7pZwOAIakq0+HIDLAvdCE0956xu7th1ncV4RtibP7aqzmVyl/V603N0szIH10VtbUlU2ijxqQG1SAScFltjdJ7fQlt+TylA9kRSzc9s5dOTn69lA5xPkHM2f06Kzr+g/eI8TsycUmoiLcgLhkNErcHgT0ry82+ttT4+Hso4X2+v8uUVGMTmWuVLW+nMF8qixHG12gQw5jhN834aMcQ09GxIEGJqrg3qrqAezYJ4yOIihAWpGBSvOh9HrtZf8TpGBRylxIibm51dr2fJdiiJKFBzbdUnDgquBAAOBARQmymMqAFwa5iYEaIyhaHbl9lTGA+HNw933715kVIys8uQjjnPkzyLNKxunvPwucVfT3eZuSKYAiAgJgaC4qRZXTRXFyOgBNRR2HLaUtyErg9xQ2HlNDANDm3Ub3119eVXr2jdh+vLrGUv85ePt/+///KTmYCHyF0qWr1YCjRwn4iYP3Co8ETjkVP315cZDyNHBEPEuBkgYgUzqRU955rQ0NCqg6jmUsZZSvUhwiJKJWdDiuUJ1d2hmdu5o5TaNMpbZpJrAYCU0mq1IvPtescG4/HIjBq4Kfiaa1UJGGKMTWLwKdB5XpDfyFCWDUDne/k1bBNO8Ob5at8PCx78Lf37p49rq7PR6PaHfdd1hoCBO2wkb1RVRrQnJvLnF3RqVdjhcDg+7O+v7qdnL1KIzRAKkBVECUqt4zge9uPxeCQKl1fbClDd9/f7X71+U1Surq/7vt/u1tv1atV3HTEhA6oB96ZdFZslQuUgkYwM1sQc6CL0m9SP4AUUui4OXffxzcN0lDmvxDp3VSBzbDafAEZg7o3nSeCC3nVd8WyliVq4mzmjufeb9fvXX623FzSkr96+vl5vd5cXdS6ZS7XKalvmXeq3Q+y2vobwfhofNY86o9PA1HsMSl5s3QWI3AdaYegp9BSGkHruBkoBuENOyM3ogRwB8O543D67fjePXnL37PIvfvmzf/mT/3hED6se+yRgzeMlEQ8IbEYUDU/GVE9WQ/vKCei8GQDcPa0GjEEb3dcJzR0ABbyYFqljmY7H6XDkiNxTsZIPRURCCESh3WtEZCQT0VqlWp2zVXHR9XqdUhItANCmsQhxGDqtxnPrc2NIsckEes2I2HVdLXJWeVg2MPN5lX4b8j+/x3NG1L5PTxbn02dYXMSe7qS26J923Wqt8zzP81xKmXMGxJiSqmoIkXkZKEI8z+nCabDLzNywVs05j+N4e3v7arV+cfP8+vIqxmgGgG4I6tAKgHmexykD08XFlTPvp3l/fHd/+3A4jg/3hyGl3cXmo5vrF89uri4v+hQIkCMMVTsEQ2dwwcYUtLTeBYPdMHTOFdRCiKGPod9856N3d7fzw6GoVzcRY2RsDVFwd1dsIotqbujUrQYCrAsHfBF3MbMYuBIYYbde3X/15vbh/gcvPplyfaPH0HHCGDCISG/wyfpyt7386dtfd2V8nMnRtmHYxq4HRHDoYiJeU+yBO8MEFJCCc4JAQNGJmAjZDBUQHKainEJ/dXlk+PlXv/qrX305MeKqywTsSughcBewJ2ZV9gox0Bn4U/M2RxuCnbKgsxVQG3RebdYcQ8Xz8iJvI/SlUrEy58PD4eH+HoInTxUEq7TVch4+VFUAk1JNtTVGpVY3a+Wlg4tUsdrCq5kRQdf3UzeZGaLgCbYPQRyi6gd0Hk6EnwYBPe0NPw3f5/BvT5gN5x9/CgQhYjifEU8z/qepf1NTGcfxeDzmnDnFxfn0/FN0ficUiM9JGxACgonmnKdpOhwOr1+/9qKX2wsTffHiBSI3dN+I55KnaToej1lqDB2HmIaVcxoOEwLVWst9OQLOh2MQvxjW3cX1rtuEQH20ldvKklHuIUPIe6pgMnc1iHeGNheBwsgtGbj69OVXX/xq/OqdZG1ILjpR4KpmaOoq5FVFpEYkqBA9AUAgprikzqOLI4zztNluFZ1iiH33sH/MV89Cl97Z2BGt3JMJGgWgVQjcxU9efLTW/DCPUuZkvgXeQOgZH1GHkFYUk0J0jw6RiZkjM7Q5C3RFQ0Z1IIC0XR1yCSHcjY//7s/+7J2WzfPr1/sHiqG69oAhUARCM1QPhGbWWBTgIKdMGhFPVV1j6bRJfweAbrPhFKUWdyBv5kJmuZAhA6KhlDKOY5oSdqhkMbCVD+EWoEl52jiOAAAKtZRZVUsldRSTgRo9IXWhCfgxUtd1wzDUOdeaW9bQltCUc3Ofb2y5M6RzXm9nYZXzol084s8p7Tkxe3L5E1pUIIfzn/8/X3/6K8uW5QlCa9rbzNz9DHd678U8ZEbOWYWyVDRSV1dDC+iiUUsgIbUEYvgAQkiA1BJf4N/iKxICIZWKrqyOrMoxsiIjIiPiDfe9e8/k7ma2915r8WGZ2fH7IhvXU+jEuccns73XXsNviEu+HC4LrN+8qZba5lLGaZ6mfZcvhw5EgXTA2tpWl5iZI4RHX+zm7QQop+nNq1dZUt/3X9sADw8PDw8PZW5NvZSS+iGl1Oc+pUTAgGCtzONcxsmaCvPQ9X3OQmXverCEnBlypeQ4eavF7snc5zodTwUbd71mLVaHly/SYcAkxhaN8JhERCkYPPRqqqbgRoJtHL00UhPODOgAYkDkx/P56s3L9w+PCWl/dTjfn07j+eXuqiYpY5mmaW/5Ou2y5LPrOI6vXr7sSPflfD6e9OnYzTogHjhPMOdQADNj9Uw8SE45O5LS4ttFGAocbmYJu0++862//fzX/+bP/2zSin26e3pyoTR0rVR3RSRUt1qQuZPe1IId7xfEcEJ0N/hw6UT3r991LOJltkCNhY1atRxMfGIwt7ooMAtRznkcx6INgHLOcQLMczmfz2Gd1Eqd57mVyoucSI4lnocBnWyurkZEF2yVxdmumWkpA+UNaHM5rcKL4RdcVp7+wfr+Wk17ufrjBQUbExIaYUMRCKdLIvJqrbU6z+P5aRzPWssud4d+4Nubw27f5YweYExB9Kp1nmozcBZPYoSTVnMn5JdGT+8f6t38gl69kY/P709f/OSr3Pq+3ymbklWHWq2O2oqxDuPpF93VvqV5yuej1sf+Sa+1c6kP547oRX/Q++lv/6u/TE94/Y9uuKd+GIWodQDYZGoviyaHJ6Jfd/Qw2EQzqvepP3gCTzIc/u/d4+/8p/9k/ukv8ufTrWfL/Aso+mbX0NODvj7y4aSl+OnQf7qzv61P/5m/eaynh/k84lTIJ1clIOQX1JUv7q8RQPCccHqR/wLur3v44wep1dGlT0nIvD7tHPeIfH9POUHuxoEegI5pLK5PBN86vJlPZzK4OdwkhXqcSfmaDm1WR6gOxbQAUCf9ftcf+v/3+MvP/t1Pvnj31cyqXZpaQdfBmU5Tj5RdBpIBpcuHHhmcbuuJGE/kJ9CJaerkPBqeJjF6eXVTprlB618c3s/HkvD7v/NDv34zMuWboZi3psnZ5olaxeP5th9eHIbDvr+6vRoOQ2Vo3vxk2CBjTsBUHdxxMjvOabRWZ6vOiIMBWp2P9/dvT/Sdq5iLTVbBsJPEJM38/vjkTLurw3yea63okESY2b2QIIGaFkvGkknYCPvDlTc9nU5geHO44lVPsj/sp2kqtYKQSLdMi2nVvwIUBBEO9hYgCK6577I5zEMkY9PGig0XQA5kzv2QcxYR1yXLR/ug3N76P1UbIB4nPZ6P53lC8v1+L84551rrz3/+84qtgrWgWxmDCjiNZeKhM2+ttbqqJg27XY/JTsVAq3pTf3x6ejoeWeT2cPDSdBzbVGspVVWhNdau63rQhg7Mi6EAAIB1LML88Tc+8Z89tcnD9MlrG+tsc52cmLCyT+wmBCgnLU0QIdU6nVopYG4ArUbBFyIFXts8TXOrrTV58V2f5nmcplb7TH0/uFlrbS4lhThCALABSN2svn///sXNbcdyHkdueHU1uOGv7r68uroyR+rTcPXyetcfS/n1l2/f/uKrn8xvH8+n2VoeehQhNKoeSE/yJdknMEZCMARYhbs5D10TwtOZkgx56KVHpFomTMkztwo8dFevX0wiSkAIjIYoKk2Zw4omGO5d12WWOI8czBWWxrw5AYJ79EtCRs0VABEsLCLd3fV0UtWu6xJLouTu4zRN5zmaVCJiyRBD0S56MLDl7uQQ3AMnZE62AtDMQiz4eS4WFf5l4L+cJ1w2flZCjLmhOUoYOdkqDtdaA6CUUqIsQevMeSl3VqQQuBNg+DQ9v6HaovU1zsfxWL1Kn1+9eek3tt8PYf99LuNxPk+luCFDl6hjTCOd+rZ397nVcR6b1X7fDcPQmZzfPdrYHNXm+lRO74533fXera9Vp/PUzlMrc4VmCSnRru9HLcmr4UqhRCDAneRO0w9+90e/+re/HM9jpJZ1GsncCZvQSdpTHZ8a3hd80vN7P6Sh916mYz1WrbBs+13XI0knCZmbeYYyF22n+a47dznL1cFKrYCZkVgYIOWchz51eYeQu5Rzrq0Z+K+/+Pzz6XwYDrfXN/1V7ygp50++84kBKvq56afz8ctf//rzd1++ff/ueD7Nh94Ac99Jlyu6VfNlMokIyOgEvgw3CMmBidw3mCQaaNXGpo/3X2Videu63dzmo9eXt6/ffO/bnw0p/EYEyN25SSUCpNylYdft98O+H1JKCIDNgEJ0TVtrDgjmpjrPcwifmBn4wm9XNzIAgMhgW6l96i0bAU7ncTyOzIwAgmJiYG5gQXXfREWZObKsruuASSQ3KiIy19JacyQz26a/W890y38WtPLqnbE0bMInjJEMjAENzZABVJB0GdY6IgpnzpRS4pTmpTNqC5naAQGZuZUa7wGhnODeSi2t2nxq0Kjjnvtd7jtI+/2eM7rYw+nR776qtU2lqRYkAsLWm6OpWZnO5/FcWk0593noMLFBzTNW11kL6N3p6TAd7++rTpOPxeZW59qgKXHiHtoMrq4GQOSWGIUxMWVKzPyNH37v01fXp4cpjwoAPha15oAj55bq+6kcm43Ko9Z3MF9f7zzxNNEZvQVcykzME5IBJZKeu11qbI2Af/LZp9/6xjffvHgJMk3jDNqGlDmlWmvAchwBSkvmDGiIH715/e7du/un+6nNZvB4Onb97uXHb1zSaPXudPry4eGrx/vZmvRDd/uic3J3Y6y+yHghUZ8yuLJDQhKgGNiJIyEm4tz3zji11ghBuICejndDv0+Hw5DSRO1Jy9QRvbyy2x13udXi6gSM7hUM3Qj95ubmxdX11X6fc0YHbEYICTEaPd60AbiathYbgIiAGQAZcRHeMmAAUFPXUmflNgXBVwER3Sw8xayp60LKCA/POApiA+ec+75HYZFcAEVkhqKqgL5tgK2wuWjt0CWUyNcxAiIKGBoCuVm8BiI5KDo5xBHDzBjIVSEiehb/dwjCO6wpUCh1oQPycwv1rKMKdLvUWS8dXQ2HV7cvUpaGdXjsm+l8LtPjY5lmRxf2fJ0piVo9zuXheCzg1WqHXeqEX1zDXhNIKzpN01Ob7ufT6xGpOiiY+jzXqU2IGXdJa9PWVBshECYhTExCKMhmll5cD99+8/DF+zrNDCBFHx8fMYsOWsDupdZMnmmw/P48tTKhy5O2J1dHTMH7by0DuWFrVltzBQICwy/HEx8fKxM2Y9MXkhKRWkOmOFfRHKymhWZNjbtvfetb948Pv/78s/vjEbtkNv37v/087fb38/Q4nU04HXapPzT0c5kGTYBoCqXVscxmllPKKUEFQRTk5JARBYERGIAchtRVVvPC3L94/WIWeP/49DiOZfY621ELHfqX3/vmy9/+rl731CV0RTdCwOZgKuAi/PHr1zd913UZ3FutTATIjNzMYhIag+RFkXSdrjqEknQsGVTwNjUR4JTMVediCsK85FSRTdWmquQhlkjPyiOIxBxS6ZQkpQ7NV5N6W1Edz6DMKJFjFAuA4RZz2duMPxO00LomCGajQ1gyrNuImICYWQSI3IGZ0YEAORxcaujeqsRujmf5s4pTA6UOBRJWYpL91eH29lYSjm1qVq/2h7t8T446K6FKcskJAKZ5Pk7n03Sk3AEhMO2ur3Yv+wwkwNb87uG+OWiHIrmWqdbWpvl4PI31zNjnHTMzR0qAkFgSY0IScDaYWr3jufvex+ef/AzeP9wyyWgHkoKo5GfUM6uhJzBSPWptxydlvB9Pp1IC7BJSoeAOrWFrZqbmigDucH31xXh+ezwmh9f769QNZAatHVJyAzMjUzIXICYkpPdlVLdM+NFHr/cvbk5WC0JiujsfSfJwECNWxsnrVEqtVUgQwQEUFzgxArR5GnKXHTNABk7IApgAGQnNCT0RM3A3dHI15Nc3b8w+/+LLx/H0/u79kdrL19ff+P3ffvlb3xk7wthGHs81J6Q+73P36uXtHoGRap1hUnVhcDaLjlDgEMB9U0WHRfoTrBmYVW1gyMxlKth1XUqCNGst57kiWsrDsEdXMEcDNABcVtFlKr/lQiySUrLUNmSHfQhfEBHmdNEjWt00LnA9y19GChSkewBwdjC3cKeDmL0xCxOzIVgowQMArkbkYO5ODn3fU3hU4aLSv5xcmYGIjaIrmvvMWRxNRIZhuLq6OhwO9/lxZk2Y+5RHm+dSjuM0zRNTOlxfo8jV7ur6+urV1YuMaaFwdvJ0PtEuOfDT6XT/9sv56UnnycV3uw6AhiH3VZsSJumFEzG5oTYHO9X5l2M5fPvNdNMVbLfcd8CHw80J9TGjmzUCMueiMFbA/nieRq2jt2IemICEQQDFGLICogkbuAPMmco46TSnYM8QTrvdFeVxHDt3dmc3VGf0aAvurrq7x6MTv7590bX58fPPzm3ub67drOtSTv2s7Vwqud30Q3f7cjqpmWlbxk+xz0GtR07gGSgBZsBMLECMPkj2pooonZy9zVr2H736wXe+89959eLzr959/v6r9/PJdvnwrTdzj09tPAwDMAmkjIDmktKOu9vcHfZDas20lQLuKqZmrTG1praGbXC3ugxuo8MaC0ZVTRXCAxXQS6swKUmttc4FAFBRqFhtoKGIEGVDuMk/I/MvQzuvj2jE+wUFN9KWIKAtCbvaNjHYwn88fekCLea25oAUyotmYGCRzARf0dwcoQVRWwRZFuaLuSP1AAGFDWwZxTngkPtkRaECMAhISkxEpRZOHCz4lStUCEgkl9JwKnmeVbXb7V68eNHtdle7K6jaDYMAl1LAADPZ7A30VOev7h8+++wzPZ17weF6xzl1XSdD3rFqRRTuJCUgVgdURRjNjufxt16/Oe/zmVSRCDAZCSqZE2ImziQ3JIB2lvR4PtUyGwNy0CQNiY5lKsiZmAJUzOQA6vZ+qsLYXe2h+d00naep3b5ML14KATsQMDozGTq4K7jrOL++vkXhU2vJ4Hsff/Ns9bHMdHVzLOX4NCXw2yRIXKZWHu/a4aq1GirkSWjfD30ScWT3BCQAGSghsLugs+PQ9yfT1hx7meb5y/L40aurV998A116kV4fvvNmRPji9PCIrSTkYWchRYOYkak5JLnm9HK37xJB8D/MEBMSmDcAMOLtBHhuspgl4m3OAAAGEGz2Q7ef5/l8HGN7aG0i2cXLOAVZERZWubkjgMoubzHb/eunwRrUnwGeW+SNVk0EfjPfRml60QUSEUmUAIAAwaxZC+VoR1PV1GVkiEkWARuCO4hwbCBBUtU6TkS0H3aVOaUkfedEVRuYE1FiObbSp15AsACHHRja7jCUMlVrOefD4ZBy7ns99DdDt3tHtL+62e+u+l546IZhd9gfiDhLP00TA7daj6fzw/Hp6Xx6e//+8QE/+8UvHt9+edV1H93edgC12VSKzX59c/vw9te7vt/3++nucb8j5vxf/8Vf3ry4vbm6fqtz/62Pnw4/Hx/tzeHKj+do7vaSboeDFO3O2hV8GKchd/2Lw0nLwzxKlxr4w919l3Nzb4I5Z2aOfm6IGKq5kXcMRt6KPtV5NOuYyMFqnUrNxEMSAqmlmlodJxAW4h6ZHBJ3+13/+bsvBx5e7ndFW6m1qLkz0PArkkYqIgSYGAUoGQhgB5QAspOgs1MikEi/zZkoJSil2A5J8Ld//0cubmgqcG7TGR16FpHg4CPQ4eY6NbPjybXtUjpIZnc2QyHHrLXUWl1bGE9U1+AVRUOciLIkV4NlxmzNFQBExB2bG6+IgdYamDMnAGhzqb42Fc1XdX/fOjlE1OeU+q7ruqDjIOLT09M4jsxMi8QgEdFcyjYhjqMAAMI1fhXl5dgMyz4JTZRFyQQuTCRjSupEREi0CXRpa4ub1KakZa6qMT5DgLAj3+aIu36XQKAGPFzjNFSFZgoAnHjY765f3TJ3He0SpW998t2bVy+Hm6vK0BIJJVXX2ty1GZnqOI73D4/vH+7vnx7P83Q+y+P5aW6zWtvt+oMcFPzxePr44++8e3rsUt/nYXo6vb559WI4/Jt/+a/++u3dH/6jP05Xfn56eGdFr3at+mnWDGiAgtATE6I49GqpUY4SzkzcGT2MGfK+c3MFr6SAjcgNoDE4EBpqqegmTNR1AOWk9e3p4Zs3r5QpHENqUzXMJJBTm09mRgbAjuAJiQkT4Td3L8dWxqnMDdWyQiixws9W+wlyYMfsKEBdBH6HjNAhJgABj06EVnVhAJDM1ear25v91Y57qWpOahjtb1iQfuCchRARLBOnnK+ADhSWEg7kYA6Ey9zI3NU4kYaoopqtEDIRATVcIc1OsZZgKRpXdGcsPaJlgLH0VhZ9Gg/O0wZUu0zfzex0OpVSvjblxY3CvswQnpnBG+aNcFFEjw0j1nRZ/CH076Hm4yS8bkFmFsdFIcLDPSo48+7oEMRHYcYguAC6WiBg0V2ASNFa09acFFYybtd1Ls5DekE4NR1vZmtUp3r10Yvh+oBdMi3T3Go7++PJmttcEQCbz/P8eD49nY7naS6tfvpwh9ogEbDwPvc3V3nXH3U2M5Es7jq1nvKh2999/tWP//WPzx9/43D7wjn98u2Xx6eHIctTaqxlD4pO6kiO4iROBMRm+/1gjNYldZ7YocuQxcDv7+8ZCd2sFgz3T0JHEMlWWzNTJE5ipqda9eG+7/tD7gdh6JKCF7OENeooA2RwNENkgfAwkiRKDRBNiJRB3c/TNJW5sWprpECACSkRDyQdYoeU0LNTAkgI4dhBq1CBAnCSeXr8zsdvbl/eWCdaCjmiOpkjgIArIAVJCJwBM+EupStKgxuZIzoimpAbWAv/amN0SBAzOF/loWhxKFpU/11XXD5d8GN94ZZGdweIzJ0AAcg3hpaTfZjV+GZfBP70dIrRLxGFkDhsVcfqAhYTujh/QhERLqBEsaPE1BkBYKMIIQYyGCCCOCIiL/mfx6eMfw0F88sPp4qLcdTyTkSkY7Wm7VyhgHSYRESEmbhnbc4IivTiIx12dTrXh/dPXTe44Xgqj+PpaR5LcCGqhhsKOmno4CkkFmY+diNJ2u37j65vPvnkk5evX2Fm03T/9Pjymx9P795P5/HNxy8//9Xn/+b/8y9P756+/0//2UcfffPzt28/f/tlPY6GfodOHY5zS0baoAGCqamhg5OLSCMAYTJzRTWFBmF8gixAFOw2IDLw5pYOshgfmimCo6urtvqr919d97vr3W7gRImtKZqB6w0jI0qI1asxEQI4+nwu6i6SLYG6jtP8UMtpGhsvbOZM3JP0yB1yTxRmYQkhITBRrH5EByYnVFcFVPePvvHJ9fXVhNbIJ53p7N4akCdKGSVxBjR2EkZByA5iJugIFsUgEKISADQzAlUkUNzK8SWXjm7jkqM7Ca+ggefFTGRrn37J41XVozKKTQsU7sUBm18oxavQpxc8nU7elIgCyw4rZWVbeJH5xClBxPHEBfR1UQoLusfJtQyfAZ3EYaHtkD4/ISxKMmFo+bfW0JzcGYkAW6mOoAjOBISMKMTGUmaHZtKQkPvU77pdn7JLMKxc0SFhGrKzNEB9tGms7TQdy/R+PJ7mqYKRk6vtcqcG7KDq2pqrATi455u9z3W3233yzW999PpNStKs9rv96K1W7bqOFB++eviz/++//osf/8UnL19/+1vfA6X3X757uj+xw9HhHTYT6wSyNlB3A3TLBsae+nTWqTpA09HqeTqdx+ZM0zQJ477LvaSlywGualOp3jVyAAS3Vs2tadhL38/nc50fp/OuH3a5E2IWAgDWIugEzgZsJGZMSGYVURkn9GNrD2V8Px8fdZqooQkBZuaeuKPUk3TIHRKbZQcBYHAOWRFHBOQkFd3ASrPdvn/z5lU/ZLWaTNjAytymyYkQqXfqSBpgTrxD4bmBTuCKwh2LEDijO2IlhUUNU1Gs1mjFZEnh3xALPVo+RM7MhgiGoXQdfZ5YiEv4XyIowrL+AQDdDAjdvZQS5Swv6PqmqhGAOHRB0OOI2aqFdUfZAsdwD/w/Xjy2rGlNgRxww0kTEpCBNdNW7fLhhO6AuIr3tqXdG4RljXoiMSVxWk7AnfRg0KgJSZ9yl3JiqahOhk7IRki8y1nsNJaK+v6LL+danubx1Eq4WBOJOE6ziSND0CwVHBCBiaBnYuivd4eX1/mw03metaGkYb9/+9VXL168MtQf/+mf/uWf/RU5ffTyo9cvXo+P58cvH1HBgR/rPI3H90/nj6TrXMWMHBigEhWmhDTVVlTN/GT1VKdTK8ikte32h/1uuMq9lea1uXul1qnf1UKIoTxlquaGxCLsRse5HOfSlXnfD0PXd12XcxYEAk+AyZEJmiGjIRLuuknru3n8Ynr8cjw9trlkgIF3NSXGXlJPQZehBJQcEwAjMDoTChKGvze6ATQ3Q2itfeP733z9+iURCCCaeq1tGus4GzOnLOpJwaLDqCaOZEqGKaVO2BdHdwYBR2im7KSorsBEjGSrN+MlTwsvKIjRq7GwDwtaueNaSRLzIm6OGODXpbe/KTuQasyaYwMQMSNtPEncGIi00AnMnlMgREsp6WrTCP9NnGB3JEIMqcDwNWpN1WnjpCG1uWJKxMjheOReSglTNHVrZhi+crgcHR2lhq018KYQmHJkIgdGZgahUqtPNmt5HI/vHt+/e/cwlzJZM0YMwUk3Bxyn0iGHTx4iSqh5Ij7hfHU1XL285qEzBg94mDZRJZLT6fyTv/qbv/zzv3q8f/jWzZvvfvM7mfPd3d18mtioNGtEMKQjFlDYu/fmCUGQhKJPb0g+m6rC6HXS2sDCB4ABxTE5uoIrEHEPmMgebF7miG6GzswOUEyJqKIrQq31rMrjeRiGYRgqaUbu2DOQGLEbg4C1pvZ+Gr84PXw+n+5sHjN7J5b4xTElpj7lDqkzFAABJHdhFnACZwRGQCRkAMAy1UpGHSnoD37wg5cvX84xpA/jGTVRQCYGSgpiUNC9aZsr1DlFBwkBXAnEiSwqAXd3b6aMAQdeNPEv24vzPP9G0HVEdLZl0Xt4cwAihiYcugMQgOmm22UmtAjClVKAaRGvIGIWcthEEenCDCbEI7YEPrbilvzAxSAMACQSo4tP+SE1fov9sc8Aaq3MHIJ4iOihoavWDf0CQlIFpWX+rQZqWq2WohW1VNewl6Nq1cgNXFXP8/nh6fz23dvPvvisjTZrUyQeusQ5SWYkcTTBDBR0sxpbz1wdjHF3c3V9e8NJgDAPGRjqNN7f33/y3e/+/Oe/+Ff/6l+N94+9dK3qxx99UqZ6fDzV82TNzaw/7G92N8cGD7/6QhUMaHA2NjUEVHXvm4+tVMfR6liLEnSEWVIdZ01VjWwqXluWxIA5BiceIH5EZgCqprWW5S4kBoBpbtN0ktN5GAYbsCPpJfWYk2NyRiyAdDy1d/Ppi+l072UeUt2lmqkRJJYucWLJSBKMujgMkRicw0l+SSscEKc6qwCSIOIn3/jocDiUegx+AIIJcU5mkpgkEYujm5WmPk7dOPUKTBLpcsppwZpGrAUPdOU6bFoWXyQgl5zDiyWIUY+uy2tb/RCKEgAAqwn2FpQXlrDZYkgrxMzEhEiwltd46X2tZV29l1igD1jfWwEAAGK1ARE7xAiaNSTLZRxraohCacbMAobewKp3FToGNm0LJYIdqLn1nMWpatVR8axElIkQMZ2u6Hz2WdPA3W3yQ9NuVAQS9hMiiMw0fXr3y7/97PPP39V7AlcBYkMsymNLg3BmEs45N28zOTIqamsVGbPI75/lzev9y+G67fKnYA0cmBiHjrpDS3/5//iX8vbpo13/6eMXv/1HP3x6YdzKw/t35Vx0aqiCmO9rOfJe33zz3NpX5gJu2qw2Qk8p/ejdBJLqPE2nuYM07Had7Jh5Nno3jo9YrobdMPRzUW+VEu+IqkM1a0SGpE6K4pxAwayWqg1VBdpNOpO9h+PdlJww/O9sEW1Gd9dSCTBTyjS8UklnyWNCxO/DjI3AKzIIc2LOgNmwR03uTASISlbRKpgi4G5XxH/y9tMf/sf/5Hv/9E/e2Zxubuv791S9O9U3jV3EiFPXdcNgiD/6bKHvtdYwpbJnka5LXalIikw8WDfZ+TRVJaUuedFSDREBCVlqa22aYnnVugRjiYGTEAFM4zEU0DgnVR3nyd2DIjydR50LMiR0jjgKiJKZOWYIiJhTotD0d3BzrpAVwEDAwNyrddf7EEOfpzqW5hpeUM/CQW6AiKFqZe4SjGYzi6RlCfitlVZbawhea8W64CDi4FjEJ9QA4sYhAy6EAwcCtFXjxN11olKK2TIKJCJHBPBxnImoVP3lL3/54x//u1//6vPaoDVDaI6hzr+gsqspEGLB5s3JJQtlYOZu6IZh2IMNu53kZKvmGTS1qn03PD0+TuexlgJoKaWr6+v91dXdOM7zHNTs6FLAivlzM0ZAiwZAVXczezefwJwAr69vD7tBcjrP0/F8BERFIEajQDy4AJFDRmYwZEagRoQGHq+Itka95bIpADl52CovLcClvkIgY0eHhCCAAtSx9CzMLLVBNGQQhBYRPl7EXQgtXArd3Bzd3Fur2HdP0/njb3yy3+9H4wLN3ed5nmoxtz71abfr9gdM+ayLStrS0lkJfYjYdd2S6KoGMSCkEnAZeC1zohiKUXR1ohseU6PwFAewldsuIhT4EXcRifVjCOyAiEDIwIwUoE0nIoPQ2AJyUw/pQRbRpKROiGZB5f8HaJDLz2t8uTyXJJYphkKgeyg7RL0Da7dINd4htBBVYdW7xcjDEYjiN9F/amZBiVBVnSQSNVXVBTjn6g0QS61ffPHV3/zNT37yk5+cT3M/XLmjeUFEpkQIQLO6KbiBpr6rVoEhAedFcHJ3OByuOx4Oe0oSpzCaaWleihv96lefn5+OZtaqHa6vrl7eYpLz+8fT8Rgu2agLn2NLCrf/jS/uru+59Ule9YdPrl7cdF0bZxzLrDCRGbuKNwZD7xEHoOw4MTdjAZgRCkBBNHCIsg7BQ5csUmAnBF2gKAbL1CgKR0QQYUNxzMS98MAy5JxEOi0hPowEDJSQE0A0jmi9ZWgI6CFBXwWLtf2Lm9//4z/aXV/1M06nh6CrRMYyDMPucODdrhL7HFomvnVpghHeWkspRThHxJTSMAwhfEJLW3NB3WzZcmstZp7b73EV5GJmzinnDIQi0qI7uWLvn5v66ESkjsBEhBC+AUyOiOFZ6MAJRNVB3Z9tPC9voq1K+M9ZP8BmYY0BhkOACmBNY8gMAIIiEm33RU3AACOqU1Ori/QFpeQESCjOaGuyhdZMtVqZa2sNW4g/Ni82TdM0V0xzBUu5/+Xf/+rP//InP/3pz+Z5zjkL8zQW4Pio1RWhIRoYgnqTvoOLB/IycBmurnkYFKlqsaZgzs28tPdfvf3pX/71dDz1nDCn159883B1c5rLPM7TeTyfz1idFLQhlAraSplY3SEKs+c09emm2+2uX1y9eiUHehjr05jm+nI4vJ0flcgTKbq7JqQdYO94BNSYxSIKOpo39+ZqAbulsOFCRAIPZYa4xICA5IgE7IDuCUkAM+PAMqTUi/TCQtSxRICE6LMBZkBxSIiRdyOYgTtQBMPW8dvHuz/5Z//BH/y3/pEKQ0VHKKU0N2TKnEJ/3NUMwM0CL7A9bNW2mef5ssmYUlrYhmqAGB0bX1SrwMysNlcHs6W7Yh7hmYVTSiEYSsLbVgzx6joXb9pqbeaxATxwaADuCsxx352dURYqIqGuyr7wob6JX9xKDfn3aLdengCMAg7a3EDDRSGlAGswsSk4Oqm6m89TnefZbGGv5ZwFybARMxBBHG0OtanW1kr10kB1LrO7l1bc4Hg8j+OESarp3cPxz//qr//iL/5mPJeUOgSJrm3fJfclc3N3jFE5iYMiORBuOnsAoN6wS4qgrZZaVDUhobpXe//2q09/9gubCjH1w/Dd73/v6uVtAyNAb9VKtWKkYOrYLOp1MydwQ1oL/wZGT3u83onuOldCd2rWOzFwJrZMJkE5VPLUIe4AdoYNQsQKGVBBZ/BqRkyOho5hsu3uBGDhfhnf1AEAWEEQCSy7d8R7Sbvc9ZwyU3Kn1noSoGjyIwIKkgAIQgIMkl5DDJSCgpv7o5W7Nv5H/+K//9G3v/l3d5+HZZ2vU1shQcRSCgB6lzPyHOe26nZEhNzs+XzeJM43cwoRwfbsdLKe8+7uWqNIdFzQmkvBSn2SLnddl/uORZLZPM/zPIvI4q4C6GYqjOZC7CzIhO5AiEwp5aXZ6uRmpGpozY0cgBH9WSUFLiKZ4Tq+vVA8if+VrS2KF3jrRUgDcDF7aapudZzO5xOYLowyp8TGYE2BGQjYm9X4PuepziVKhfk8IlPVqmrnaTyezypUtf3i159+/tnbcZ44ZTcuUw3CGzO4EwIgh5ZgQiYhbFodjUVQWERSkpBlb0TqXkorNXInYkdVnx+O5/cPqNZUh2H47vd/+OLNR5O1hEfwsAxSMPDmkacSES4DRVWtRZtrI6I79YOX9+V8UHuJsNvv4WTn+bzrMwk1NNdq6maIlICxM0wICVCIELwizACVIq8N5ZHwVnRQVFfCFOAaMCXEhJgQxCUhDJwOKe8l9ZIEHMzRvA+0eewAQHYUAHEgB3QKaykFbw4KUMG/qudv/vYPfvcf/1Eln7SGrpGIBFXFEZqqtgaAnaRMPH8onqOr2KCZ5ZUNu6TNy4n/gQ6PqbqCmVnTwDQgQvSp4o8gcMgpTGOygjc3aHVuVUMAa+F3IjOxSFtVdW3lphPJcsI4mxknWcjHtjmMLhik5QPSCvX5kCas4AAgtD1WFDXLsiuYCIBUdZGJatqmMmmJMiinZKoGCESgRurNvdZ6Pp+n0zl0i0SktSaUENEMytxOp3E2O03jr3/12TiXnDqiPE8NEZkFgQHUyYWS5JRyT0mA0NBAXdFYRIRSJ13XDUO32+2U2cyrtqqeCYiQHVD1dHdXzmOfO69tPxy+/e1v+343np+0VJurtsJGZGiqXuoCYQ9t3IU5GmRKUjEc+pb57mFsczmgQyJT6CWL4GSt1lmrKvHMiRlzAwBu0R4PRU2EhjgTOIIDMhABht1yCGe4hzRMSogdSyYUhA6gJx447Yg7BFlccVVCsAYX0sjChQcPnoa5N4CCPrvP4NX9JPY//5/9T9LN4f3pSbosVo/nJxHp+36aakxnHQDNtLWmGto2sUnC2XYFri1ORFv4jBZFej4AntNsXKxc1uW45STuzdQCYsnMOTFAqObE0RSVUNBtg4zrCJyl73sOMVlhVW1zm0wZKUHXteburVRrTV0TJaCL6RvTlu7b4orybL0KABLivXHM0erEFqkSM8d9YgBoIEjkoEVba2gIumwiMw/5MDMrc5lO03SeVTWlxChElFgQqKk38/M46zzfH5/uHh7n2hRwnqs1TSkTyXyeSZCQWDCllDtBjkYBDrtd80YJAoWbuhxqSgXQ0duiBsPioaLmx7uHdj5L7ovTfhhubl48Qi1zLeO5TbOXhiBgaLVarW2erDWoGoxyg0WhIGUeunzz4sXQXz+dPn833l1Xus2d7HYinB1E7dQc1Sb2c4JGcDACggTESABWAUfEGdxAfUXJsAM7oiEYGjghoBMjdiy98MDcIWYAARgYO4BsLnGkM5MCAJgDwWJaGdnuMpxCb+AVvIBNrgXM9vm/+5//ixHaaRqH691kLQi7u92unScvZuGS7qBTmcrcZ4nVsxHMA04TBXE0gmBFKddaJWpvd/BwkA+jxFDYxmhtRZPKzclh2U/uhkBEyMRJSFgQjAgrRs2hq1SjE3KSbuhz34uk6F+11gxBmBKgdt3Sb1TVD2cITqvPMDxn/QrPq9/dY3q4/LdkEI7NfLfbxR+l2M1tZKdeuslGNNw2gLoRAJBXtIeHh/F0BgAzr7W5OiMnZjNr1swcEYva4+Px07dvz9OsBmpARKnL0cLb7XYTjBEeYp07orq6AaClJJJ5OZ0YU0qcWQ1qK7W1lPO+6xJgysa7vZX6yZuPvvrq/c1HH7376u70eJzEb29fHvnLeTxnFnIq40iOrpZTGscZPAwzDADUDdyg+UDdfhgwp/PAxw4f6/lpml53w0fDvp3GVOBadhNMd/N0Zrx+cd1NSkgiLMhAPjjsrVUwYspMiuQK0FQcBEVSOoODKSMNIh2LuGeznmWXUwZISB1iUHvR1d2ZkoeN0xpWY45kBgoufff24Z0P+X4acd9//u79/+r/+n97bDN1/HA+DiFcZu5m2towDNXmMhczZ1LO3ZAyYLTthZmjBbSGcw91s5yzqk7TFLPerfOz5Btr25CIfLVPV1XE5c9KiMyCM7MjaGuG0O93Vltob5qq5LAPa7VWzp0BIPMwDJJTrXWuBQhzlmgfp05wglKKug3DED39diFiiYhIzMzzPBtC1w1EtDhJ5ixZBJ6ToOjPwQJ2uvhWYEuPP7EYLjJBtHHk3WutrdSlxlKLxpGqSkqhSuhgpq7NHIVSNmdHQuQlS7TIJQ3ISSilhAxV1WAJFUQERE5LAwQRnRwAtFpr1rQReEHMkkJH6OOPP/7sNL168fJ4Og9Nf/bTv3v5g+9KosQcSBg0QzUyAtMWoCZX9AXotHxps14xI2OXyiBPO2rFanXyuq+VioI6EiulKfmYYBR/k4WRwnQVm7Fq53RFkphmIUVwMFRihI5QgIGaATFST9QRJfdM1CGKGSOxG1FouGvcBcdF8sbXLkcYhVCW8Xg+Tudv/PAHf/H3P/Wh56v9daYf/eM/kl0/tgmZ61yOp6OVGuQNIEIiYKI48ZAESfrlztqqmxCPCP+xbmzVF4lM2gK7H/jm1eTLEYFpaSQyAVIgS5zCI24xEGgenXGda4nWuasCEfqS5F9foAAAWKJJREFUo1Oirl96psuxnGSaYqDkjAxqAU0ANQetoe69MgEAwMDD4xSF04oSXYZxUV4Q0WadsNXOcdhFu9PMYhjESNQNZpZSEsmIjLCYytgyBhBcUB/BXc5EiERoTORO7EhATJzUAKKHCmRO4BowTEy0udxULWoATExkwLxUKZxz5rzcJ9fmCmjYmjZuRpxzt0/8w9/+4fj+4eYKfvLzn3uzv/nzv/6TNx+lfuCQqjGz6tY0PC/aXMAtBh2ES/oYjSABFGLMUpOMPdWeEb1z789jNxdq5kxPUN77PM2NR/pBvsrECMytWTNW3yGxEJMJQgVUJCRLaB1SBjAhMBDiLNITkUFC6JjYgd2QENwNl0Tb3JjMgxT/YU3Xp77wuSH+4otPa5IT2vuv3v7v/8//x0++/53TOI7TyEnmaTw+PEJVIhKktt10XABtAtR1XQwBYmFEzy2ieCTrthLeI03iNaPAVcs/Sk5cYTXbFoJn5wsMCEw1VbP4r7bW3BZy6SJqRJAkYv+w64ifUUbuC+uFDdAkWqje9HLdx+cxCGoNFG1d14lIbdbWWZ6IyIIlYQIgDPEoACesujhIAzoYoKGgdJRbdnQI/6YIAL7Yry5QO0REQXfnOHgQolHGzF0eOHVQtKnXoBcgE9Aim+GAgKEir66uoKbqBkBGLIaMKELDMOz2u2EYUkpATkYJBNgWB3dwI2ROH3/zG39/+3c266sXL7XpL3/28+/+7u8hidUGrXpprajVBuSupqXy4qKjZhI4RUdzhxhYOmJFL0Q+JEuolh7O2qEjmJo9gd0DTOZe6kOHOyEEHhwQLaEiWgQRREBQdUe17NiBZ4cmBArC1DMJMUIL9Qoyxxjdx4AVLYa7AA4OSqD+fAIA0KnNOGSG/Pbpjt/c/OLvf/on//F/9M//s3/x6Tze371H9EQ4Hk/1PJF5madVdiqqkmBkAV4Ii29LPE77mAPEXb7omyDrIiIYiw4cgHCbSQGAmzrCJsZLAyNTrBxcsQXLyonEvSmYkwOwg3kFEJFQHSVaRtTzPHepEyRiBDER6SQpV19Xf3TQkYkRzMwAwZ41dxExoLgAIK1UEhYARQRCwkVfvLUWu9cRA/KODkKkxizMKTlS+Jo1NS2hZGgOy8kaUwc1EPLaWjOllFOXkVPRdj6PpsEYwzjV3RCX0pPcPaB/RIyICm61AmNySSkN+93hcOh3mZO4O2lsU1Fq4dBWtRbg3fXV9e11OZU//MM//Nu//+XD49PnP/+lpKxziZxJW2m1SuqDB7oFLQPFVWSGiPrrXdplR1tGpJKBqRiTp8oKVRV8xkxMuWPq87u5VmQmzElyVWwkDuYQAuuEZOjInhyzUQeoCMDA6Jk8oQECuaGHxMyipeMIZmAOCpDRNbqmuLDr3B3Ap2mSfjhbefHtT37y5edXH3/0v/sv/0/v6/n+OJ/L3AuXcX735VdeWxaejmdabbOWybS5g5pDeG/56iu6dcm3Dg9cSmUi2lwjftvqT6pu6ss4N9ZhRPZ4kZQ6Zt5EBJfMCYiTRDPGatPaIJTXBKvWhTGJiACqdZ7n6TymKwHi2P+MICJJpK6m12FOE1/Q1qbW0oMyC3UtZp5rkVIKhRMlBQoaArYaYnchrOdNtTZf22HPfDNQMK+1Rmq4nZgUOdw6R/Sm6sqGqtqm6Xg8PR7PzXTzBYzPSKv6l/pizxcNw02x2gkX88AuGrWmCtTQEDwElIgcoaEX1xc3V68+/oga/s6P/mD+f/6/3v/NXz/ePYwPT9IaATJR3DdDBTRCJAdfKoAF+02EKaX+1Q0fhjYXrw2rChI4jWYlMxJhZgBAFs7S5cQi9/Ur85aNOueEnIjJUN0GRCFSZhUnTqlCVkjOBYsDMmJGpG0g48voxnH17ACvsVO1GUJIsITjrwMBGEh6mM81069+9cs7m/8P/+X/Ra72IzoQck5a6+P9/dPdfUamIbdSolhcddcAHMysmU/Hcpm0RJLsFxgHXB1X45/G2prb3GosDCIyhBjkQMgoIJJIxDIASH2f+o6ZYfUm2zBmkYqbg6uZWpBxyatq1VJRFnr7PE5RKhg2wlWiIVFK7E0bomncweUIiviWUgogDpGklJAXyS0xM1cgImjNEcgMjBCx1mqAFmO8plaqqlrT4g2Z+cKAqdRS5jmlhMyJlwaCmRn4avAOUUUdj+djq3f39+fzmSRZ2P8agFlwEZjZPVIpilIJAUQEGSnJqoIU8xaNnU3eazhaJCfilHPuuoSZIV3dXH988+ZP/uRP/uanP/vTf/cX09NpfDzfpCwiOUsbeTLTUHUlgpV/7bCwMEU66bIPWRmrqZWaFbIkADurLkEbgFE6lp47QIKGZ0Zy61sdmu0cmEgcCCFH8c4cw8qEmBXE8ETVbAHxM4Kioy1uFVsioRAtN2iBVAV3gIauyxAKHSHv0vE8GvcF7D/5F//D//A/+e/9+O/+5nu/9yNtc621PD2dTid0cDedCgFaXPMl93BwAHU3G9tEa2d801aIkmAbim0ZNl4o7cQGYGYiYeZaayRYzJxzjgkuANguxUANAGJFXmD6GYlQGBuhsyAlkZ6jFTuJZQWfpqnUiYhqrQQI6K01MGOkzKJckVJE58DehbZonF7x+VNKJBxMS2YWD+vqwPFTQDaWqn9hpplbbd6aNY2GZkqpBVZglRqN7xDpO9lShofnVxgxMvGsOh6P99N4PJ5LKUlEl+GgBzUoWhPFq7AwpRYnqWEi6vrOaelMB/uMaEHvZaRmwWXGmO/0u2FHaXr/ZGY3t7cvXrw4HA6P9w/d1eHh7v7FJx9nSU3SHPXblvzEMBigLcoxhogiPIPO2lqt1jQhdSzVrUGBRCYMJIDSgNSImlttTjgrTKpz1WYMYa4BNquzmKMjMgvEKIABCJZ+iVCk4GHDYTF92RpSBq7gDlC1RRO9bZ0zRweo53NTff/+/f/0f/lf/I//F//Fzz799W/97u/89O9/wTfd6elpfnyEprvdzkrVVgWpmS2pfzBRKHo33rTR6lO9poXPyEpfH1vhi0wQrTlAEk5dJhQz0+MxdKIC+dN1fZwYUzYRwZW/0lqL0yOzLAMsAGMmwEScUorsoNYKQAoeUKXErKpKighBut8qk9R16wYIqlnkWniJOIr4ztFaLaUSkZiLOikQszAjgjgRUatL5yulTOKllCs3aM0W0o2ZGZhlwtZq04baZBhQpLXm1oTxnTRmRuBa9fh0fnx49LHcWA9nN6Oqq9WCEHWMidKwc/cCDkjEQolcYMKWu9TEoc/Di9s89Ocyt6oiojYZKJKJ00HkWroBxItrI5bDv/2zv9nLa3tCPop8xQXPsH/3Bz/8xtt3738y3WMuDQ0B2jT13HWc61xasSxDkU5Th/ub4UhK83ycvbk6ltKg6o1JruIFjEAyc5c9SWWdQlWsz3wl9++P95y+3R/g/eO1D9eyH9zwrIbeyCr5RM3I34yDAym4VqqIDaEJFMDHMl6/uT1rLaD5qv/83Rfv7+5evnkJj0YpoAHewBtA9TZa+8Vnv/in/+w/+M//R//p7/3jP/gU7+EFv73/9f6K2/G0MwXC0WxuFcEld0kEptndWY0YDM1Aq7cG7dbR3bxVobBBFEOoTn3uHh+PZtZRb7OrGoqVVmauijBcHbqhD4h/bQ2AyuwpUeq6nHNiIQJkJ/RQFmcCIvBmoC0Tpq4nor7v+75nkbYq7DbVRlnNjV3JTXWqZW61gSdiA6+qio6JnbAqVkEk5q7vWCSAx1vpQqAejG/tcgZgVa1llDh9YnKhF/RIXbmY0Q/1VZHC27h05VfZCV9FSaMMCqTUNhxhZjdsWsdxHsdxcfemZ7XeJd+UxSZthSY7hC2pSO47yin3CRLH9vWLR9d1Btq8xTR6rrUnEkVVJeHjeP7TP/3Tv/zrnwCAoQH5eZ4+/uSTq9sXpek4zo8PRwHuhl7HVm2uqobmwAre53R1dXV1e82EYd2zxEL3yEfXBPoZfBsnrJkqcOrEWSY0JqC+fzqN4tHu1dJaAVMyRxjAEU2RzKGgVYBiUNGV8dP3709t/Pjb3/jdP/7jH/rv/+mP//W///f//ra7qmOdayuRsgrT0KW++1//b/833/z+d7/zW9/DJOc6z9oaqIKntauzhsklpYnjFJDQg7e+eMiVMgOsCTwsvX3E5xepquhu1UJqqvUWh3/OOfVdsHgNoWjrAu+TkiDxqnIebLV4r/AaZeZgF+ScU860Hg6+0M2WdRV6GSmlLCk+nLubmtZqFq1MTCltT4SVz7CdVyGWGAlYLGxVXRhhoQVtDmrutHSBlnW8GLuahtuZt9BCg1WDJb5YZIGbnfdWMBGRmpfSTqfxfD6XUsEJaTnWl+Yx+TrpIrO6TECIolmbd0PKedj3LjTsdjEiWNt/gYtGAWzeWhypTGSkrQV68c/+/N/+4uef5qFTNyc0hcPh+vbFy9L0dBo///Xn03m2uRl5UVVvAT0y19Sn1x+9Orx69fjwcJrGok1WyRgiChkZ3PQoHQCdEVInNlcX6q72Oe/LVI/lNLe5c8xEBK7eqmuFhk5I9IAhTAZGoACT++xewWfQcyvF7VHrRP79H/3O7uOXb37w/b/+8Y8/efmNly9fl9be3b038B/+zo9+94/+4Hu/9UMaxBM/jaeG6kzOZGDYlhmlqjIE1WDB5cdCpzXFjws7B/XQn91GI0Ov2nSNlbVWUI95Koe+YJfz0A+7HSKG9XpkpInCnsxDygWWEWtISkFUGiH5bxZAL4n9GU0bZkawWuviJgYxCOvAMJZZfJigJQR+oOoz4cbXrm7co6jRQ1tumb7NswRqbfu2ulKEt30TF8tWXnzX5b7vu65bNyjCqrwVh0DEeF+JRegC3rZDLdzD/MLzdfXZcHWr2ioqIhIH5DPFVDwuE2aJNe2bqxfAVCdJKdhmjlC11cLBmTtcX338ySd/8xd/W9p8uN5PZXa03PcOSCSvX3/0e7/7B69uX336q88++/WnrDLPpUAjZicnx91hePnmJQ/D/d3dOI6qmiVDdQtI29q1WL6Ft+hKVm1m6oLd7uqjj781NLs/Pv392y974EyYiB0sgjeKMctkMzABqCM2h+I+mRXTu9MTJG5kX376y4f/2v7DQ/dbv/Pb/+QbH//z/8E/n8rMzLvdLqXUDCSn/mr//u6ujaiTVjAQlr4jYAVv4zyP0zRNqhrLkQDD7hLUYhRA8OwShEzu1kL4yhq16A7BXGvRhotN3uzqcdr3wJwkD33X92FdwW4AkLqMiIwEIU9S60LhdW+mUNTdg7u4ZCi8kN+36mKJ4oZgLXavEeGqt9VKCSZxLLkYaMcr4dJOXASwYmXmnH2d7i0xK9pcl3mSXzAetq1z+UIAkFOfpBPOZkaoIaoHTkk6U1DTVi0Q4YhoDC3BBidcAo8v8hVRpAOouYM3qO7uDS36D6nLqe+2kDDV0sk65d5KMbWmCsKMBCs861S1Vr9N+6Hf9Z/k6xe38ukXpXr1GTNhys2oql/trr7/3fz65cuh6wn8008/na21aoRIKSXh3dVwuN6dvJU217rQQYA8LESBNu2noJ64u6JbKTM0K6Z8GD7+re+93l/fTac7nR7ePwgQaQtlGwNnY2p1UEcnRDZicyyus2oxpeuBkjj5WOavSvnV433/dE9EOuTCygzUIXQ8lzJP5zbeEQuzKEI1s6qzqxOaWTrXxbHCwRIKcdS+0QXyC90EAzdwSIwKZlbBWBXDg8ihgQXvxFWLNlPvKCGTE1ISSYmZnVCSyEUNDe6hNlvrqqOPHkQBFkEiSYlFaPXAg4vH0opVBTUM5y41dyTAxFzWnj0iEgqsGKScdtsMe3upKH8v6/jlgEpJtlx82wBbG1hXFeltaLJtjHC63MTggyGw7RlfQUjM3FJ298CIL2mZoakScVgnhJiSOrg3N0cUIuIknNPWiQsSEyepteIMBuFIAgCAwkCrMpi7q7XWprG+/viGmLGDbt81aGOr0ifqpCk6EjMTQErp5ur6u9/9dtelqiU9PpR3palLx/1u6HYZyWsp7hpLP6Y2RARrQhvq2QiGFoaOtsv9rKO7ozDuOt3lecjzvqu2L2pemzUwowA3gMMJGjojBYsXi7uiVcJXt7f91QBMXEbq0udPD/mrd28+etXK05s3b169esWAT4+Phdru1dX19fWXX36JJGqqVas2rwjE7m7zHBJmvt6X6LQQEbg5IQRjfGukMMd+RkRgkpRI2JAgMZCYmTf3eQKHhTklLCmJiIKTmyB2XYcSJ/IiW2IIqlpqNdVRa8BRI7Rt6cZz2rwuWW1NVetcg23ramqotUGIr8CKOQUWBgBYkNts2wte7qUtjm9Va0xCxM0Ce20AuHKWo55okagwh9JT9OmjKgnCeEyvQiej1Rr/bXsOANxMdeldthqMsw36EMMPJyAFD/sAQCRhZIoOGjFHdEGm5ktNUkqJEcayQ5yAsLXS3ISQENXAVui3unFKzZUEb1+/xORGZABVrU1jnUtOfH11xYIV2tuvvvRf8MPDQ+r63W7HjKfTU6he5z6NJ6imggGZIXcAZAywV9w5cwDYCVfTLvHV9b65/eyzX/30i18/eIN91lI1RCuIxNnMvKmjrOc2GZABmjshfTmdb3fJDOZWwcrj3/3spLq7vf2j//bvMbM1LWXOh37A3Xg6/d3Pf7rb7aBZqxWaMiCLAJCaurkgWURDc0OLdyMitwvD4JjHuDuhBShBuBv63f6Qh544mdnT06mUMp3nos2b9ftdlzL2tN9fSdebY22WDJw4SY4clR0Bmzk2B1WvkSCGCQAykSCyGdS5XaYrvrpTt9bmaQIApuQMYVtqZoJCRNZaCLfxwt+CrSMfL0Krm6pvg7b1Xbb2rvjaw7ncNFF/0AqKijBMywD8eVetkyl39+PxGOXIdkrERgqC6nPRzQmAHKGtochpNUJjRCITjvjhAIEDjckFA0fxba7mS0M356xFVXVuVVUhpy4lYUSROL7UXTJzIurTi49umynlVN0ej8d6OoI22e84JxH+0e/+aLg9PNaxghJx1yV3Px9P+OIqJUlZnJYCiZiQ2Vf+UUhCIRigkbueJh3n4aPuzavXOee3d+++fHzATo7Hcy2zVSXEHQ/I4mYGKLu01vOEyAxECI7srTbAcZpQ6LDfHc9Pb7/86u1XX757+OT29rbrc2uttYIszCiEZGqqqJaAcu44p9ZsLM3WhMSXPNQQwNeqbznYL1ldobWPSMK564bDfn91kNynlPqHp/P5fHw4TmXWov1uN3Qd99zvdznnUmtrrWjLqmomKbmZrmZZ7t7czCwwCFGMRod0S34iRVp6TbXO89zmMs+ViFJyZy6lzOMEjiYmRGqoqqYqzMIZnBrbPC/CFnjxiCYNPesx4panyDbv2FAMcS0iydn+ekt4skjQqHVBfJCbtaamQCjgVkrd8h83Q10oF8yplFZKWSAciI5g4ABOCJw4il3fD+4edKFhGLo+O8JYZmQQVVR1NBbOOZPDNE1aNKylzMwaGiciSpL6PDDzudabm5vv/uD7lOTb3//B3eNDRb97erRhx63NT086j7cvX4iQMQxXu+9879sg/NXbt+M4Dmmw2lwrCOz2e0n3bao59Tnlea6ddLHwCV3VWqtE1HddP4/d9YtkAE27nLMkInp4PCKis2BGQgFOBgRIbjBZNTNC4ZwQw2wCiSzM6zknADifJ5Hs7n/243/7h3/y/W9+8i10Pda677oyz3fvvyKHNkFgUxDZHb0ZAg2UJmzbClsOcHdVFZHNBzGqeW2tWeiWcc4pd33qO05iAGZGshgT5b6/ublxgywpNHVoBZmp2zbWjRi6Leics5kV824Y9vs9EQmxqxmZthYCE3VaGuUhMx4c8VJaeLMiYqtqwX2p9dxcmIXZFWpRbZO701o6b+jMaJ8EzS2YPb6SeyIbfHYK2H7wtXkUL7GkcVsNYBagoBbo7bXzw4geEsfrIQILbnYZmIXxwfIusAbOgIkzABMmRMG8G7ZiqLRqswGAgicSdWd3wOXq4DptIIB2URYDCQBF3bPf7z/6+PX3x++B8OtPPsJM98fzpIXPynMpp6PA4FYQE5KmzNwLCYpI7niXu+Q4uxJBypxzbmXt3yUhkbUN6gBAAIxOYAOn3A83VzcDpfl4Pj8+eW3CXEpZoAsrAGrhgCM4siHwgvJf7KoCAMXA7uru1qwhudnp/lTG+Wp/SCjHxxO02pGgg9eG6hiYQjC3BovO1ge5ta9VYIxUmxuvOi2X6bLkFMqNfd+nriNJ0Uci5q7rSt+jLerciESB8Qdyd1DQ5ta8gTpDbEgiQdAQQZrHknPmLIwChvNYxtPp6elJiKdpOp/PWhbxldABRSZndVYkDrbq6qDhhui2pvseNJZlfV+WvzHI2ghukYxssj1CgBjiSpGDL8pWrrW5mrnXC5MCMN+a/JGibRkbr0bEcME3M7OwiY5cLTK/uMoWMEDyEJJe2AkpBUg1RgTN1KstzkvPsjOGhmVtzsbfLwvRoqwAAnQ1Ye72O/zkExCuYGk/HKfH+zK3Mo+l4Ons86gdgzZwJJLUSVolMfZ9f90desqTuwj1+93h+tCagYIhiqTU5UX4CRzdYjMTepfy9f7q9e2LjuXx/d39l+90Khlhmiu4kxMTINu2FWDt+j3fwDjBAcJsCgEVAMxaKVr9x//Vn+3S/h/90R/e7m/fnet0PrWplnkcckfu4ESAYdvYQjUkLXxFX5Nb32aaEeloKQQUXMG1Lc5zW0Zw2RfZkocArotI0+rutFKmNE7DWlVVYjrUFtxvBEFGYiShNXtWrXPRqTT3Ms3lPEYZSUQUxjMOSqqkTm6q1gLlpoiBuVvjSMjp2GKQQRc4pYjgy7oyi/Z/fP3lBLhMmC6v0Rb4L7uqZTZtFsVohOGlsNAAqblbTFcwWDbFoLUYDmwZc+hEOmBwkpCYSVASUSIDZ+ZOZOmTkgdZWWQppi1YnbSi0gEdQUJlO7Dz4fShKsTCtN/vXvqL4zSeWxnrpNQczUCFjAmSEBM6OpKLMBGYKqj3fXeVdzvp72kSkqurfXv5UquPp+IOKJykU2ju1WPXQfBoXDhLTl3XWdO793eP7+90nlEEW42vTQspChFMUc3c3MDE1QDRrAGQ2aZjvEgwubtZU7M//7O/TtBfd/tvf+sTq3p6OrtWVIfkcc0RyMB0aQu6C182+mAdDOvqEwrP+879og/uG7lRVZD7vi9zm6fpdDqdnp6EU5dySqm2Yk1BgKPeUwuYjQIUd62tzkVrrbWGlGAIS4BZCwsZNUHKkh4fHmqtqMaABEiLlChMatbUWIEWhf5IQBAZeRHxBydwvUxb1qu3NGPMbBzHiON1zVwC0CrwG4/tpNhyoe1KmVlrSzERgofbZCESrC1T2uLHHFDBGs0HMbxMuSAYQMiEwvEcdWMSyUlEDADQoh0kQjHoVV+INAFJV2uIuMgiuBm0hgDNbae4GMRTSqkdH47Hx3E8PU1nLkpOmSBn6TPnhMa4OteEVa2B2ED5Jh/e5iqccu7duDV3e2jFiIhWrzQIBQNyAkcHxZjo6Wk8v7/76nw8eW1WKhsgOhMwKC5KoAa8QgzI0Oqa0/l2ZAMgMQBRrC1E8AY/+9nf//j2L7UaeTmdpqGT25cv2jwREZG4Iyg0tAZuCGxOq9RaYEu3kuDydsNaB+6HIeh8hGi11bkQUaSUZZ6naarzXEpxWpOoUkCNTJBJiEytlVpxWSFlmi2Ehmqz2sCszgXMkZZGjTWdp2meptPjE5gzRkJAy7jagWNOF6vRFmVqWLss7q4tgKwOAFsL6HIOEN8uBr5xf7eIENfrg6TlMkJsHZ4tTqiq1RpDZjeLFurC1ls3TBQissq5XPY8I1HxVR3Q6YNSHZlwFZ0mImQWRAdd0tb4tusQDNQgATPXuXAKm2WrqlMzck7NyzCDGhKaGZgGoVu1Pp6fklqmpOamDUwFAYRH16B1JkriCNXF+bo7XF01U0BHRG6zllHPx4ljJwPEbAjX2gvQGvjUatWm8/T09FSmmQzmWoUIEEJ9wKziKo8YPVtyBZD1FriFuxE4oUCkVuSI7M0S707H+ve/+PWbV68/+fiWOJWm5ggsyOhIANAAzFAZnXybBsX5677US1uibGZxmLJIIhqGDgDiAGnjOLc2zhOnrtZ6PJ5ba6206BrN42hmrsbM3dBLzhCWUbCorYzjOJ/HyNo9TNQRzZwGlESIqKVN03Q+Hk9PRy8aq1ZiXO0LlzFcUlXVdNmu5OBOZmZokfNsx9eWklxWs75Opba1vSWBZiag8f0BbeloByTIalvCQPhDmbu71bYFjC1TugwhcDF0WJcygkf7iQnB3HxtAcV0AZZOqG/3o7SGpeRAUiy8TIiSxeGZo7QFsBiPG5E3dbOqBtWOj0+tVBKvbZ7n+en48Ph0X2tFQQRSa+e5tLFMXWr1VoYcvInEsh92cz6zIQMf+v3trZ3PUy2WQfrdbtiPqsBGtrZQli+OCKhoVMAygTM11XEca5uRHMEQAB0DYeCESxSIK8QQngyAjhzI7tV3FRQh2IFgYAY+T60f8t3d06effvbqxVXO/el8/3B82u8HJCQCdVBUI3PBOA3g4tD9WryLyEQAAbtKKXWZ3b2qam2llDpNQEgoUy3H49mbuqPWliRba/M8EywK5jsiZG7aoIXWVhuPp2mawjN3MRJeG7LataBS1XkeT+cyTtEuY6LQUIEQyAqgurmZA7TnFDq6TA7tw1y9tZa6vHXnt1aSmQVObvu/EeJrreK/8YjX0pX9efl7VaWLEttXnXVbaWJwUU6sOWXafg/wzOOOO4IrDd/d3dWdSmu07umc8xIPiIoWM0MmWQ1fYR1dR7qF5o2Bm4MbmIeLIAFoqeM4Pj09PT09nedpd7PnudBYW2vtfDz3uZRJbHBesJy73e7UdTA6OWRJh8OhNatlQsQF85gqOtVzuTzZcLGqNQNyZE7ZWm1BE1nVm5cMxN0WEYVlluy2xiCksBqNf3J3RzfYTBzdzBg5SXd/9/iTv/3pq9e3H7+5ZRJiDiauEZi6gjdwR3Sixa3x8has92651ytXPUBpQ2JHEFXEoqpza600h1prPT8dp2mKZGzXDa3r5nkWoMBrddoTojU1cHavpczz3EolIl8zbzBXt1LKYu6iGq0eX8kYaN5scVIMMg5w1D/6vI0XEdTYxs9D39gFW726rdi2uoNdjsm20+AZILEhdnQB6MvWbYS1U8HMPXexjdxxoT6oVlW2NfZLaGAwMjtRs5oyJ03eagNHQgRXbyS8OBgYZUvJUqqJlHa9sRA0BZ+APcmBhKZWFVXZiaB4Y+aA0ToFkaPVUlqpyYCYOyQGuNntH+/f49X1NE+Pj4/mSJwk+yfwFQqf6vmrh3ubQSqlEQ7nZuwujlAMTppPLXVPN/Pnu8c3X9iV0aN372s9AeB+p+bjqXgSdiZgm4tWY0+MYIwVxk9+6wcp8S8//7yZp344nU4VCBzICaPusW39E4AsKx2QhTrhZmra1DTSEsYgmTtbAhLa81M5gpWnEX/xy78/7PLrF7do1WcKVSoGSAbg2FTdtYjM0yQifcqmJjmR2Xg6tdokZyYorTZzF8GcIecpOwPWuZ3m0/l8yiCH3CPQsYBCr7OOc1Pjh5NRUpZh1z8weKmn+Qz7/f56GGqt83nk1noDcbSqranOs5sxc1KAVsxXW0kzN0P38+kURR0AEBMzW7jcjc0vCZOEIeOaUjJXMEvsjRzAkZwl1DSAzMHRmkM1L+ohsgBAJrAa+LITucvzUfjh4XiZS12GjQ3/Y/5c7yJ/XUZm+3th1iiITdGNwh89ctAtBVqjESLWWh0gJO2WHejWtJHgllfRxQP9eVrX3MSgKfjcYAeh0zjXguZZUp87BU/SIRFxBYBa6ziOp9OISXa3Awt3Xbff7/f7PVQA8NYKpK6V1trzeWhNMb68q7ZmtVKrREAh27jexWi3rebMz5dl+6bxrfHi2i7nia+p+cWVh8Uizho2ZjbHaZpOp1O0tOfx6fb2JkDmGvn22uRh5roWf64Lbh5Xvap45RirB+y26OQOm9BDc4NawZZZ/tJBV1drUBtL0voUjfbEklJiTr7wJ59pxJfLKWL5ttKer+r6x1/7p8i6Lx4fMNQovOBDueeCy29mCLTF9PCgj6dv77J8dzMgWpz9EGHBF8EC/1kvFoYgk3vQavwy/2Fm4g+GzJe3GXGRW2Tjpqv302UGD6Ahcxpwc1QkEnluvAK4mdHqrxHvsqQ9uChwXJ5jpSiXVqf5od6f5WhmzbVLqe+6qo1IXAEA3VhbG+cyjmMacmop5ZRSGvb9sN/VY2lWx3ny3U4NHEC6PtmKhgcXwgARE5gQJOE+ScqcMjNza20cx3EcV11B2uYkcUO277LcSIfLy7Jtj+2xZbrNaj9kImqthkggEALJOE4izILNrDUzcBFh4UXxydzAtDWghSyyLbjQGdh1fUxM3dXqMjnKOYMhqOnK7QZYbkdrpk2pVrBZRKyFbFYjmhnQHXV9bKO4+Jrtolz8zR+eP9XaXdzy+wVwuWIrbK3jmXmTnjaz6IcCAMJlaeprV5Mur2psgA8wqFso2pba5S/d3XxRkFZYToMY1z3Xph9WxpXMVHEd7nxwg+OVl3W/sISMl/MusG6IAWQEd2cguMzHEAGgRqPXTaMjYGBmpDZPk/ksjpJTtxtSzgZgtY3FvGGrAIAejSdilITIiEiS9vv9fn88NQcmAFcDZ+76TvrhZK6qrpUpExmFmjNKcskCWZgYQjnwfD4/PT09G8XBgj1U1a0Mij2gWzBzsM3NHMJfDGzpeT+340SEHIDIlU/H8bNPv8gih313Op1Skrgv2x0R5vbcfwMzUwdcw6S2pu6ZKUZagb1lZy11C22A6A5KxswBV8lGgAZgqI6AWVKX+pQ6IgkEG/kSen3pNwbj3MDRLlb85d6+/ISRdXvw1C7AxQDghLxoIS2cHiIyBHbcwjFsXn0fJAuL0BvAB/F32QCX59T2z5dH0va6Zubq0d/ECyKYr3kIrVYf25ecFQxc3YK8DOZEGKbhfvFR2gVAT2HxAaAVao/rF3ZYVgmtH2nRho/5w7qqoiaGZkjckez7gfrshFrr/Z3b3EoxNxahlIaUupw6zomS5Exmtr/eW/NMIpnHWoFl2B206/B4Nq3uLoyuTcgkS5eoY2JahvI559ba8Xg8nU4RtIhIm23xDC9QJ0Sk6wV2dzQwWA4HlgUibxosPVpL1a7WGdyY6O7u7q/+amx1+tFv/xARSc3MFNzMBJb9FhA0DlNRQAvdzJVOpVF8PCfkZs1KaWVuzYCAAL25h2JDdIqAXMSIamtmjil1z6h1dy1atNRa/UIwy5uH+CQAMKxCQ7Ga1mYaM5uDmbdq7m4KROoeHr8EAAbAi4JtcD/QHaN7YGYhnLn1yQEgWFOXqxf+oYfYxcq7DPl2sY4vN0BoQzBzNMIXLthFEvKMPnF391La0vUD36ZNjKhrFPRtHonoiAqhUrQsd1UNRz3EBc6qbrhtMDUHcIBNoxuREksW1NrEEBxBDZt5U68N5tZmn896Ps2tOWEWzkwdImtzVFUHFCBhszbrNJYRW5Y+u3mZ6zRNZkYIAl6skmPHuM95SCIMqrW1Fow7X4/By6u3BZSvlQTbz2aGjiCMMf1d2cZwEYlKmeZxkkSpy3Wevnw/vry7/V7Vq0MfJK4tamptxcEkgVoAEN1DoPXiiF5bJYHRty5XrXGAdNwxoLaGjoru1iIpIgJmBafZqz5zk9AdycncSqnTNGlttVYzYFjCc6D0YlFu+21L0yHE6JesackCLICB7n7ZXVwxI5tTJSICIjE7gK5dTkK/xP98bd1v11MibsZEKzpRsJDiFy48EcUv40sKs7uHwO/zZ7r4Gdbzet0vLarkSIDcnRxtodVGG/RZhYaIjGEJ/9HPMidCEQHyJSOqCqGhAth8+eWmiUdhmkIArmiAbjqX89PRT3ieTuPTcRzr+fH89HDW0fe5I0qIBEDzPCsTSY67Nc6Tzqq1+b4jVWx6Un16eATzhOBaBDyh9yyHPh/2Qyds3lqrSnm/30/TFBy8GgQJx604+1qACFc59GUPQ/T81/kGrjLlEKm3ew1dkGgdOajq+TR99f59P3xiralVABBZ4nFrRTiFfpaattYwGsdIelGMRjPA3Xv3sU1mllIeuo4Bx3H0SPsTcE6pATMQslWzhqDEnBA5NNCJOGrl6TyeTqOqkgNzijKXGRBRL4ZTl9nHJTUXLjKlbT98bfWjMG09mwvreSvLAPfC7xcuXhAvo5IHH+Br2wLXHurX9kp8uL7vAwXUbGmMAIBfVswf9i5iS4WYfaTwcUYRLG4Imzb1AsbOxMwoC5ZCaaEEqDderQufJ8gOIgLuRqQBg0MIPF8nqUMUA2zWprmB12nyuY5P0/FpOh0nbuLCQokpEUqz6gaLSCqRmc3zzEC7VK21Op2f5nKazgTGCNBax5ABslDfpf2Q+r4LQPtx9t1uN89z0KZDgEk4XYhrfIC2WsZesIg8xyNcN5dzgC7yEzMiSiIi1Fqbpslae/vuvfzt3718+UJbsVYlEXPPgPwsUguu5uaqygEPYVbV7R2taUAemPl0PiHibrcTSQCASM1UF0Udg861oZsyMyM5tCH3nWRmYZa4blptnuvxeAQFRMzZRIScyQEQFXVbS9vSQsTW2tdGSbG02gUVMVY/ADQ3Wp++VcOxB0LxwS5AOqoaOwuWZ3+w2mULMHF9v3Y0A8ByC2X9emt5ZEs1srRBN4ZbGKo+7xy1lDIw11rcNCfBJEpQbCG/+0XehaHbIezu8zwTsOQU1Ls+72JktuMhtkSfu9vrm4fpqbWWWfr9Aefqp9nUO+mkaUICNwirKgKvrUzz0B0mKmyiRU92vksPu/1+dxiql2aO4sPQDcPQdcPp4Xx6Ot/sVTgxEwuCGbQq4DkRmQ0p3wy760MwQxIKIuJHu1e11k8//TSU4uOqJslbUrTISW34QjVmjoNiGAYAmGuJG7lp32LABiPJrqZaVQHRuzxg763aLz/99fVfH37wve8dT6fDbjgcZJqm3b5fthNSYNHyclou08N4iy6lrus4CZhP09R3u2iwlVbRAYWHYaiirgBTqaquK+K61ToVerELxQOmJISt6DRN43nOnOc2t2poDTP5YqPhObGvQyffEhhAM+i6HA00Iur7PnoJYb8FiEAcOBsDIKBaNCViYaJA+y2ML4AF5hkKOhGszS4W/ofp6DMn+PKgudwA+OHj8sS4OFnga7/cNnGgxjeFf1hf83kXfghGB17zH1ngdPFPRASEoYa3xEt3ctj3QymlQSVzaG5YvbXSRlZsRmTh7kROgA4dS2osYavc3M3quTzeP2FyS0oDSs9dygEqZmDbRFWbWm0EnphYgdUJNJMPvRwO++vrQ9/3KAiEw3D78PBQSokiuOs6+LAC265wfP3AfnkYvyGaGa0AfbRAjaJHBzr082AZf4JjULkUHNVLM2Te7/c5CTkYLJJpS+9hg6g4OAKad5K29efr4B/NUg5GUU4pARrMpSC4e4y14+hqzaw2UwXXMreUEndJgtiuAE5Z5DgWbe5qilprRHckesbMb0vucqU9H/KrQUFYmj6fCUS0zW3XgyRcr+MVNouny1V9+fjab8R0ORe2T7VOAGhxC3YI4UqAMLK3/6aC+jKQb0dYlzMTA2JIeRmEcc7C+/jgQbiYQK50R0qEHMbaAKExSOSwFOIBT+okAZkIiCNh0gZtOpZppgbsnJBATRWMkAC6rutdO+nEsVafbT7CERFP02O6SrJnFu9TBkPCBE5optqii4XgmcEYqpuVaUjdrs/7w+76+vrm5ir1CYQMQVz2+/1ut4tbGAejtudzHOCDDQCARJQuwpK7B/459rldCG95kLDX5y5FMxqAPz4e56neXB+YogSMzGeB/hKsvnGxrwCYOSmHho+qolm0TUhdhBbFfffGzRGiteqgAGCqrczzONV5MvWH+/thGBglnOCCo2cKtQaZHc08fkanxOsCWgsAWFNuXMV8LsnyW7BYhp4XMAdftS1IWC6YjHGGqKpbg2fw8xK7v7YdPCbBX9uL2wv95uJGRPuwmwFfC+cAcLGbt09vCMLc0B1iShcJ/Aerf9k2cQ1y4pw4UVD1ff0MROQWN81U1VQdlMwFuevyDqWCnM91fhy1qKEbBFvPlRET97m7FS5P80NKZ52saHF6cMUJ85hkT5Jg1w/kBIYE3Mzm8QRELkTQwNRa1Ta51hevXl73+8PQ933mnDiJEoD7+XTe7/cfffTR9fX1u3fvSikiso1A4itenqX+TFZZvUYizG4H8uqVsmwAoO0+WuwTBwJ/f/fwcHx6+epWGFyLMAtSXQXOKFwrHbZCYxvnL5oLuESfQPO3ZkRNwbV5HENGa3NJtZQyTeM0TWA2zqWWAMVCzj1s7RaD596JuqOJROsJ3YMpjlvkRcRgE0bFvH7ZFkERABAIKOzsGBENPC/Kdrh89KALqG2fENbhgKouleOHS3+Nqhdwt8u1/rXHtkP0wlf9cg9cnGvPp3z04w0gCq+E4NZaQLDog1fe1jet0tsiEoJ6tt5+W+dEyxVWba1xcwAg5p5TL7nrvJHMauAeyFMAsEX+hpn5uu/G3WmXuiMc59YaVEezamM78Ui5432/73Nn1dCIILk2JjFXbbWVCbQMKeXUvbi5PuQ+TKmIyIEUwWBDstFzrHL/EAL4nGS6e0Dt48Ec4N+1hUrLVcUwIom/5ygGI3uM12QHrLWeTiMzd520qaWUADXItYy0IQ5h7bvXIKnEpnKLXhww16pEhZmbm7u21hTWuSwioKtqK7XV2kpxNRWdpolIRHLmzMwpZeayDkYxSu2LL6tbE/biVITtgkSQjSgOq5kp4IKNi0yYo1G5vsLyjcLAd2OzGMCFttW60p6hEPG/8g8u921l+3o/bIOIbgXuh897LqDxA+WtmFg4YyfJnVo1VzV3XM8HuOiFL2cfPy+htUcSMA1XVVr4XwAArbWQqSB1rS20oEDNmjILtkXzZrkQqmjMSH3Ku67PkpQUzb2qmiq2uba7d7zr94fdvkwNDBml7xIyKXrRmojS0F8Nu+t+dzMchtztd7uoIC06r4C7Pp/P5y+//PLp6YlWgb5W9fnb4QdKOOhuTasuQHZmrrriDtSfF26cFAgQIgOLI+5yaxEx9f04TwCQc4Y2M7PWsm6adbKpFhsAAco8N1UIShICrLP/4CmbWShNxircNrOqmjV3RdNg6DawWlX1KUCvOXeR8KSUACiOl9XEnswM0J6nEBcFgK3ITV/B+tE3s3VOvARKek6KYP3l5odnZmkNPWt0WC77B42Ziy0nazbu7vZhl5QQyUNk3Td1E+eLl/jNeEZEbkYfwigAgKMqB6murE0/zP/94rMuUlMXG2LtsS53kZhEhJ6J/WxmVZuV6pxyXTBIiRhgmdkt17dWY7TJwTyn1KdcqbqhuVttIDi3+nT/9DA8YHObHQwTCTNxkiGn3WEAABYcJO8ks/oud0EbhyQVoLYya7NTOR6PD0HwW6/Mc0m39bPXB28Au5X6nFS3Gfl6hq3R0cFwoX9DXCwi5OWAfXo8TWO5Ogy4Yd3cwRwYPGDYqh4rcm36wZp3rdEKVsh3BwwBaIsb5KsQZ0CLY27jqssx0vSI5A6h8qm1pdSpThppzFrKt9YEL464D9dia62UEkn8xtlV3fR1LrtGoOvGMLO29pQQkVYes16cMxeYQ7i8ngDwzAe4/O3X/hQuJhd0cZxf/llcRDODrYDb7veqdAvg3MI/5jfe5sP2iIKLeySR63b6gLkX8h7e1NUs3DzVgBMAE2IniYDcn1VKoz9htbbavKkQ5cSJWN3NVE0FkqsFcayTjp3C39BaS5l3u/5wc537FPoLXhpWzTnnLCklJWytTaWM83T+7CE+Z9/3p9NpmiZE7LshVOa373i5N+IHXEX4Wmu6zWEuwCnLXdgY9Ftjh4CIWrXHx8fj8Xh7c3AzAHa1ZylmCFaWhlTHFmOeL/56y3LOQQwAhlpJ3aype1zmZUyLiMsGYI4GZ5igxaRcKNwMeQ3b/ryCm7o8E7W26wBrdh14/dgAy7PcLrP07Ybqmg/7mvPgxsu7yFx0AVN5dIcul3Q8pJRpPZoXq4T1yW0bhZm56pIv2qYtCrjKoBA6Ypy0iOgLpZmBENGokKNrqScDlp0KeOZWWjVLhIlVwBkxASTAHhJL13VD1wdeiAHcPEh1nCSlJIkNUd0V0Dm9mrrj4+P8/r5N07Db58MeVV0AwVu22pTA3RDMWTGpVz4jtGFo1y+5ent4Gq1ax1RP0456npO/h9Ja6jsgm+Zjnrr+andz/frNqxe5I9Myj09lsl23i35l88Yt5dHhy/H01fvpdAambDxwstJcTbqsYJSW6g28AgTjyQl90tndKREnntvMzChItt2iiHkAAOEiaCupYsvL3aBZE8kKkPvD1fWL0yMdj0/Tsd7c3HRA4cFD7oLUOMRuFYQ5LUiBaZqI6CqllNLJS+73uM9uBqaZh9rmeZraqUFDcTJzAENB6hDcB702a6HKRYrNDISIGpj3OfU5zWWa57mp9qnnjqaxXay3pc3oDm6gzWZbDJoI2d1bVXYgAo7sBA3cODEytbk6OSCWVgNzzswgclfn1lprtCT/IrRcxOdaa9sAiDG8+drs9sO65Dci9XO0/v/zRLg4Op4rj+ejBlPqXMCYnALaueRuF6+zoAZifExEYK6tInhEl5gut7lYUwIkzl3K0d7uum4+nmEtldBjTsfxT0PuMmc3nmsrDc42qRozR35aWp3nGULMZm0tB8JHhFGQwRkJHSCYqd7UfJ7LNE1hHy1djlDadV1U8HphvIDP/R93X3pacWNU/4Ep6YdXckFPbZFvi+XzPN9cHwCg6zrY763NvhtgPWRoxZa7ti0F3zC827Jg5qurq67rckru3nPSbEez8/G0/Q2u/cqUEpiVQluCcRmjcfUikMWqYvGauNRh+FpH8TLD+driiRJum6Yj00Zyr61uEi+4uiH95uNrK9nXhufXi2DccJcfIiAulvjzz5d7AD9Yu8/vAWvbLo4Vd0D3kO5pQalZUx0EcEMkj+PaHcGDTtxgGXSbNVAHcWRBAje38VTmcfK2VD9EhCIppUoUtgtlqo7hvsq11swiIn2/c0xzbdMUyMdCSGZLBy1w5+JMRKUUhMUqVIhFkiCYahkndHU1b1UbTucyjvM8z9M0ZQQACCvCZrp6s9FlabTYgi+JzoIDo1XF8jdX//MPGNzhFVaxIH6xlAoA79+/r7WmLptZ3/fu7mq0skbNLEDssYaif7XdtZgNd/t933UiwoDScZ3b6empTvM0TW2OqeqzfSqYiQDAcpKYN/f4Ckvuyhx2l4toSK01XQyt/sGt/rWF59tWufDjCgJW/M3cartQ5Cf8utDJ9k+/uQcglAgu87B/8Mlf+z3+xuPyRbYdvIScsMA2BI+xCJhCXAtVUG0uQCRJE33YWHKPnQBh1hBrAwESQkJgR1WDquU0zuPcphmFvbZWKxsQUd/3wzC46pOdWrPUZQCKZouIEMluB8MwdF1HwlhDp3G51nOrOM9mjEwPDw/TNPnKrxJCBVqopQqttDZZrTaf2zzPWmop1ReaUjQE50CsmkUmE8Z3EFn3ZTsYVuDX5SkBH4Z/gBWwiMsGgjWdYKTj8fjv/+6nP/qt73305tVUy9AlWAapy+vYBtliyltOHE4tK3YgRD0Y1vzErJU6z/M4jlZaLQXNN7sXZk5pwdvAc6HYEAPKzyIisIDn46JFcr/Fgi1rX0PB1x+XfxBLUVVDFSrerunSqoozCunry/VyA1xezOcNsP3/ry3lixPtEintsE3mfuME2J643cIFJh+6iGirya03bQre2BmYLDFSAhLk+CjmDRzCIxsgmskugMKYSTKyN/PSWpmsqJWq1RiwFa1UgZABNZpfACi8tA2JMUnXYUqJSFLyruty14kIIDY3xNA9x3V/VkSsWh4fH8dxvLndE4qb1bnWsSKStjaNZTzO8zjPk56fTtM011oDDbWREiNeblHKL6Za24W9mP58vTUOHwYmX0sCjJblktEiMz89PWkp90+P3/r2NySn0lqUogDQWouaaisWU0qllKoKAKE6jGEdN82dJJDkaqW1aZzmeQ4oaSjfLEQTpiiCRdjMWIK3hGtvKQCFDOHKSBRJmqqWuf6DC327Vl/7zdZrsbWZAfA8CdkSelx7qSvT5IPHbx4C2/X/QBcItw7rh6v/gw/kzyfA16ZrX3uD5Yf13c0spphLJaDhpWyuHMa9SUSIZ21mzZUidhEAEjAQmAtjzykjQzOdJ5tmaM2rQgMBzMgM0SrCSF3i4xk4UDRPTLq826VIzHKGYb/b7Xbd0J/mGasuaCre1FXN3dno6fHxeDxae4VIrbV5rqVoltSKnk7jw93j+TiWuc3neRwnX2WQN/kNuOij+5r0bxccL8vBy0T/4ofLA8HMwrxuu5KLXlpsWpHj8cjML16+fP/uS0dwRI2bsHFr1+lBW40dRCQ0CkqrcB67lBOLqdZpns7zxpAkBSKPag2dQiWWGFiQGq3LRqN5SkQxMo3niiUTR2i+Hjz4rHXp8Nzrw69vgw/hZ35RJPzmE+HDaP4PruHtD543wOVzIm/52iHwvPovHl97y6/lUR9sANuYUGaO7o6GYOigYA4BW1PH5kAG6tgs2rNEhA6y4kUzckKCZjqXOo5WG6mVsXg1Yu6kS5xw0RAxAKi1xlnZzIo2Jkgpwf+vtS/rdiQ30osFyCR5WbeqS90a+Wn84Oc59v//M7I1ttVSb3chMxNARMxDAEiQrCr1WM7TpzovcwdiQyxfqHl2jUPnPn/6+Nv75ZqSvC9GO9q7qtbuTWJvr++//vqy/il9OOm25eWac5JtSet1e/nl9Zeffr28L1ZMRJtTvLnhm5Ry3ERofkwAsEHjjzPaJcsjD/jJpfX3BQCE/W1FZA5xnuf//X//z1//9rfPf/j4fnkFANvKqKUNwdsiSUuZdOrvNJS2bbleTdWKlJTLViQXUw1IQobMaMBEbsd2+iMi5trgqFvnqlqKMgsORDxS0bj/aBT5Jn2gEMZx6CYTPng/x2fdUeOddr3RAL71e40EfWvn3LPpo5S62QzNkyHcI2EI6rVeamqKAtk966oxowGdAmj3AxoauC5gBQYOCpYlLZtcM4oy0nLdzCzOU6AYKKIVVTG1GGczAeQQUMxSEROJ07QsCxAyR+R5nufj+en4dJqW67Jl8whGgzMgAgYUhev1enm7rGtKqWxL2tYkAm8vl/W6vL68v768b9fNO6GjUUrJdYhjC4CRg/u0EaiOZjQCM7yNzPcpGGXKKPPgVtyQtmbxZiDq/Q//8pe//K9//8vnHz7RFBFR1tzJyxBQFRGLKjXjxFMpzcwLvi5bSrx6gAVEtdTGzDsxNDunpdmA4w85SYxCvXGmIToWCZi13KRbvQeD/L0jU23FXEb7URtpmqivB1SV742Se51wdyj0ajFXhX2su8vpTsXcSfp+0+7poz3vAswsF49okBmqmEhSMVVxNFlSs6TbkpivYBKmCMfDRAE4AIKJAmuceZ6mSFy2zUTzki+//JaXLQBKymDx8v62rQFM8hLPT8fDYRbNAMocPZUgxkghgsfpRE7zCYiv6yYA5/P58w/frzlvqaiCS8S0bm9vb5oThUDEWuz19fXHH38kAzBZL2m5XkqS95frr7++XK+rpCyiaMTMaqam7iQ5Ho+5FEykCJ5kBgBecs4tIcKTLrsowR4xDXuM0n+vyYUBW2ZBzfAi9Lo5ymXLOZ8/nX74078owh/++MNf//pXVFUwEOg4GtIg087n8+F0VFW3zrOU9P4WpsO2rPnqxZ8IRirinjkGn0eT2ogAQwhq3GmmNilqIG2dPKg3EGhF0tQQMbDVrffz4VaQdwr0+hNrDlxHezazrDUa3TkK23rgi27WzqJVA9wxxChsvrH1ifmHZ/pM7ZcJqCoqaFE0RSRU05TzgmigucRzsKg0ITB5fY1lzZoNRFMCQN2yZQuCAQgE396Xl9/eCM1E8OOH03EGLw2jCqGhYGhG3scUoOau9Zg5huPxeDgeD6cjGE3T5LkJdPGSbNNccs7LdXv77XXiCKrr9Zq3lNbt7e2yXrdtS1akgRMrINbOMffR933DYYNBiPSjMID29OmogfAQnRoIRsMJpmkyEGB6fn7+/o8/fHh+Xrfr+ePz9bLhmHvTnjjPc5wn71sBhKVt4l4sX2wAApDmokW8k7y/zS2FuK2oiF7U3Uy1UhDRvfVE1DNAv0hIo5Vxd/RrY/goNfqfXWTgYD3e7fTzd3j03/PUb7/f3a0b57mEILd+zUyLiimoMnFtU14kLxkNTCJekoYCUUNkA4RCWXOGHInLukRDzAIZAvBMIQb8OS2vr+85rTnnQHB+Oh6i5JKmyetCzDwv1jELkaMXSpsiIsXIYT6dn87PH96WNYTpeDwyIAC8v75l/wQXJ7m8v10JmAxSWqWU99e395e362WTJC5VoALNhhovYjJ1aU3eSsdzhluHhB3maLRq+nTobe4QNAbYrfU+wgYOzG9m27aht1w/HS/p6m02XfSqqgxts6bD7EBACuZpSDUYl4vHHM3MEX7ylqp/1m5SuAYxXV3+LdJalVgTt9AT482MBmE43ufRjh+VwN3g9B8RkVrqTf+RhlZI3Z4cL++Eih4JfqT4b/BAf7wO9ZPjoUeWqEeNdqQYUVALgTgGAcnqQS4xJFvVJtGYiwIKg4IxqYqRpmsuqkGBBA/T6cN0NNW/zYsqvL5dzOTpePj4fJ6YcvGIekFEgYqVAUaApgiSsxgIBC//5RDiYQ5TPB5PHz88ByQp5deff1lr+hGboYita2JcKs5rLtdL2jYRMS8jVlRzW8NEcY+D1iHqlNIGapybu3G7G9I7BxEQEnqKPCBi71KTUgqRL+v15eXlt5eXz//y2dsN7sgIVlHJQggdXB6Z3JyS1udcs3p1tZkJgAnknEEVm/sWAFDHjzEkIEbWzgDqtr7vOAMwM1Zos50wOn1/UQN0Fng81MmXHEZ6OB+bHX5Dey2X7vER95Fgu52qr213PDcaTqMZBwBGuOPTK5IBasWaxgolEK1CXACIQlJdSw6FBGmyoEgBEDBrWq9rVEDiGcMpHs+nD2h2Pr/HaSqlXK/r+/v7uqRy9DZSBcnciaY1H8uYKOekAGKgaBgjUFZVp1ZmjvM0U8VJRkRTTSkd4pRSWq8LAQYMKlJS1qxWM4XNoV5lkFrWPCTd2O0z0akfmz//cXZhYJKuyvc4sSdY2g4viQiE+PT0tKT15eXlf/77X/71v/3rfDpe1sUQgUhHQUtYYThiGAWkIvgsOLe4hilZta0Gd3l8KyIRcVw9qmqHanMIcqYYAoSwhzsQ71f/I/nd6YDOB+MAdgaoCPi3RiN8SRzfmSe+3XSIeaTvb/PAHZN9/Vqv64Sqsvx/6oNljpSMZlZArFhS3aRwJoVgpGSIYAAlixUzxXAIc5iOh9M8H6DI4ekUD0ekkHK+LOu6rgCfponBCyLM3POC3gfKIDvwGJKBZSmWUnIAGEJxgHnec9C1otG7J0RELEafRXJwBwZkDow1bd8j316EQFSzIqlDTw4z0TXACKk0TsQ4I3eHqlSs/ONsBmrkdsiatj//+c//9vZvx9O8blu3f1wEimloFksF7RLJKlkFfIGUZXfRisMsKgEK1C66XeQ2AtjNOWhp/S71rQO2GY1ZHqPsh+4pfiCtb5Ofj4YnrfTLHxdOj7f1ra+Db1Ihfs828u74CwyKafy2Mb2hTzwoVlwXUTBABAIUBwrbpHCJUZE9a4g8YsAYAk/Hefrw9HSeDh9O5wOFNS+H4+lwOIQQtjUty3JdlyRlPhysNdaofgkAVjUSNCAm5GAUpal+YHL8+GVZNsBlWXLOUPN+GQBylmVZDnGyMCMwgKP9EGEInunNgTmZ2ZoTQIVdsKGFbR8xvJVk2iKXj4oChnKQfpXTnJlBkyfOAIi4bRsiTtP0t5/+/ve///3z999dliulpENLc0Nwv5NXlLuQXte19/tBrEiMZJC9tEoUb+tj72afGXunFSmacyZyYEMQEVNU6PFartGVW6KEwYs4UuoX6biPQ801aj2gRt/OnaSHL1G///uFNcDvEf8wiCX7kgE3nLcHFtCNY/DyUwJQxSrDQFUNVEBKqU5oADJuqH0wTzMbPB1Oz8+fPk7HpxjRe05N0zTPPEVYwVNWcs6qkwt+Lxx2F5sjzxzmyCFgnASCIZhUWyXGKMU87O/NOq2mM1nO2exqohNPIUwE6FAx5G1sQpymAGqsaGZJig5AkXcJI8NQID74LuBLkRobrKn6I+xQGojo7TVijKXkEMLz87OIXC6X8/NTKYVK0TGVv5npveYwpeQo1vVBzmlaDTlVtVqruQv+x2/xZsS6N6jzOqQKBDae+UhCbZz3Rf/jCXdPHMfQh+VxnO/E9Bc5YdcA2CE1XSy15pJfuqn3BKpNzAaurYZ+s2zd4wFTdZ1ZMQVUqw0STU2aCPQbEAEgEaSVDzNls6sVr/2fTSBlSsneP38Xz38KUtJLyQR8Vfg+nj6cAmmWkiA8XV7z+6/b5/MfS0mlZDUDQg6kqIYqIPLdeT6djtOxpHK9rnbZLCfOQsQZdUvluia5Zk2QNQJA5jcCCJbR4DW96Rse4xyIeT5GYJEcYghTAFRNVnKOGKFkKwqg5+PxdJ5njG/v7+io74AUo3mvPJ9IKW4xt5BOa2tel07mhEdI3oXGABTEDDIhESFQQgTUQJFjnBDef37/H//93/7rh/8CbzL9lNcVAZgwTBgkZ8sGZpEZr5rWBZmR7EgzoKWSzYwwMMFmtQkiGAJSyoUxiIqal7iSmoqhAhUVB+hGqP1iKRAbKQiCoqmCKaIiGjGG2p6+QwVX/C/mHiLo1hpVrDS2gU+s2lR6PB65gUE0u0tVFVtVYzfJnOs9qwZqzQo5tBHAQxzgkXW+vf0D8f+PrjWz2s22bb76dBklKUFEDoxoRPzhcPZs9azJCEHJzN7e3jyB2VMac86Xy+X19TXnDRE4GAVU7/U7h8Nhfvr48enpaQ7zel3LVhI6IpqLEDMzaKNZx7G/q6gVB9uYEdEMpikEmry3ZJEEg1aEXWabW9t+G2x60H63poUHmfcoKYefLef8+vr6008/Iejb21vA+4RfbSVXvjjx1KpuG4wA4qoV0thjBY3CbmZweCf8ynbzXGqm16N4HnMo7n4EqPqnOg+aV0dvKxCJaEcMaLfqUHxmN2Pub1ZNoFHb/s7NBnP//40HKu/eFhh4fB48b9ZA2SaccBbE8Pz88dOHTzEEoWKEOcu2bT//+OP1eiWieToyotvxr29vKjkEno5e9yhmcAyH0+np+fn5fDyHENBwvawhpELSfXNeD+C+i9rLQZvg0Vxb40xKRIwQwjTHGAKrFlgF1CTvHXjQXTMGTZKR9w+sYbI2KQ+R+/vtK+YTNe+bk5iFEAIjI5QUtm377bffYiBVBb65EKC2OHC5SyGgVsgJ8EjeDtuzpx+rKhr5L2gwMgm2DIX+FOd8d7/SUM3oVmXk4DXPANU9AeD/krubnQwRXdU4qAj4h+4DgjiUWOzPRawN6fz9/fxhbbAvX/s777hA/di3OWG8/p/kgfqiYPaQcOGSOFuWBRQ1AJLNp/kQQlABpmiMl/T29vr+888/L8vCHGO0hpkOpZTAu8eN2XUIhRAmnnzgHOx7ittia96SlJJzTuu2ruseWvcKHgBQsyIFc5mKmRHhHGdmDr4GRZYQzHDbNiggIsgV6NgFphMEEHqcuI+zwD9mALiV/Xu0vzEA7EsIDCE4KO/1ej0d52maOlAPDnD2HYOfAEANeHe2OlxKL3zBRsGdjHo29x1J3AlXfoDIriAGsBPMKPL7vN/deWCA3aHs1zoPdAbwJ2KrzvEg9E1KIpA9aoD+d3c43H3MF7d/XvyPt4JBctThIAMAFcmbChUgDvzsuZxFyxQnRdjW/P7+nqQQBffhQKlIMiJyOB5FihmGEHgKwFIzH2udB0TiOU4hBMlyuVy2LeUt19a2uSDUWmxC9KwMNBDNWpJqAYDpMLldVEri5tSXlK149o0SESUGJlFVT1VAUO8QVR2K1RL6xrD07YEBqFa3VC+b5ZxVIBA6cVyvVzA5THPAG093t4wdvxpFkAEDU+AWCLMOSeKeUufkkvZyTZffWCsu7oMD/qDOAE3YV03ioHePZDNGJLo4H3kDBt7oMl5VUavQrCSEwRp6lQ5tu6yu6u9HdTeBHgf9axPzKP7hP+lLHeeDbKf+nQFUvYFuEaUIoHSIc+TJc7DiFNctX6/LuqYpHvKUwxRTSlkVUXLO65qOx6OIBCOKYZ5nQ52mQ5gnRmJHE0E2Qy2Wc87rJqmIiEoWyQZCRIHNFNG4LbYAREspotlA0ONfaJYb+pIBIhtZjWqocCkOKcnM02GWuroQay1MAMCk1BIxAOvlYlChYnwfybHPEPbAMNZO6la1QQhsWmFlPcNn2zZGClNrAdFCSj5lFXZBFcQYYiB0l2gQ7GvQGGOk1ivRco31DdSPiNB7tA3zyAzW3ZStO3dVPioKpg1sy8lGTRHqot9r0qAiFA1kafsGQ0cPbEReCQh3BJqvkeWoZu6hEcc/v7b9fxH/XRp5b+zdY0gGaqUUQyxFDAE1RA4EGLGa6iLi2Dvblr1zBkBNuPXWgtu2bdsGqMwcwmToeWoQQti2xBwCBsllvS7LsoBqjDFnCZ4ng0iAkVEVzSQgRvdOiDmSoKqKlVLSdDofDoe8bmlbVNUX4kq1cxQiKkJADCG426c4Uo/nzfV+H+Kmal3jPYCm+ghjRd/YzYDeWKQKv2maclJmPs6zuwqgJeg54TIBahWKVrOSELEa2Z0WOzF5QqszACKCoi+NvE6oT59pu9aqO9JxcAeXaF14VNArAVMz1RDI49gAYAZSqlpAIgQGAxURUQo4WkeDGVZJ0e7wAm9jAv1PRGxLixsi/4IX6PfQ9D9v+UDngWbD9aFXERMsgFZUCQhx4mCioAgKppatvL1eLq+XnLPV3oV1mgHADItpKSVEojARkaqklGKOYLS8XyaeIoSUyrIsaVnR4Hg4pDUDYaxtiQGUGIsX7xMgI1WUUaj/pbKdw4fDcQbQXDYzM8JpmgQrktT+PYjA5PZPFXjD0f/UcA07+/AjItTGdaXzoYc1sNnKTjTOAO5q9GwId2Y7rasZM5PVELXrPW5wze64HCe9mjr9W2AvJ3IdWH8G1zQ3Vn4/s99Q9ZavmtLgAc+nXz4yAHzpPl0/2LAO6dRf7+EM0PXdyBl3jHL32ePO+Kc1T8J41Jq2EtP9I9uIMLv9fGNKhcDLWoqUeHgKhJZK3sohHtKyEsE8zznLsqzLsr2+vJfXiwNMLOuqIofnQ4hRVbPa0+l4OBx8Xk8fnhHhx7//7fnTx21Z81Zef3u5vl3zlkopkmSOsZQNAQ4xIB63lFSDznsjhhijV7pkSWLlsq3HtBCBAYRpsmUppYT5kLd3IOSWtQkAGDgQFlMtGQC49bEzgMhMMWgp6ujkgQGxYp7WbO06Mr0Sdq7dJuua0gFPOeC2bd9//73ktCxLdQrPMXLQXGNeCBhbyZuIxBhFhBC9w+e6rkUEEaW2way9Uwm8AxcVq+36YozuZqjAJFQLFVQqI23bdrlc/aoYo0hVic5Ua9qIKIApWADrA2VmLnwMvSRJPU11IKfd0O8PxcH72mi9RrFqdnfzK/XUjMYhD4vgr4mcLpu/eM4de3yNbfoNK+sPq15sOBmdd7H2D3Zof4DAbICiWkRzEaScbb2s18u6LBvk2vEXAAJPrSEPhhDASETMAjKHEMRKyfmXn35djmsgXi/XbdvMICABgxlOLDGGkIMqCHPhEpVKBvNebpCLKREJqICGiBARY2Cv40cUBVWhwN6os4v5G2FBiIZEJB2q0io4J4JbEW6s+4/U17hO/16UjIgO7uZu7zqqLROm93t0mIsQgmsGF3xVG2j1iRERB+IpIpOlxMw8VQj/Ztjs5n7TWjXK4ef0zk4AO5HgTa7rjU0xrm57hf6jcugi9Wu2xq5hXKk2Me+1mp30+8s4A0BbjsMXTaCvUf/4yyPpP2qMfvKoa264qOVyhBAw7GJJVVUT+bIATEtRgBiZAPOWyhYkRkFNKW/blrOoGqO3pDVEnuI0z3MIk1nF2HGM4AY5aCmVvF23ZSUiSSK5kIERMQIFLlHnEHMsqhosRBEEUMmGIKrmhs0U4jFOp+l0eppPh+kwoaBhRqZiWqRMDjWF1ZZXBF/XWwtPKgKV3UHeZ6hrfxxWRHe+EUQU8ZLCngda9YQL70B4OBw8m9W01B4ZzW7QlqSJVCmGmWMMYZqQvdem0i1EIRl20JHB/NjDahXvvq0BYPBIdlME24t2W8Od3z5CIdRlvQcEVLu8dwL7Qn+7Jhf2+EC3AJ0BxlCGJyUQUbvzfp9vMcBI8SPt3tF33xmP3u7cXNK5tpuhFAMMNc5SHLJ4Z2tTRbW8lZzSlIMwmxgATdN0fmKI1+2yrbhh4Kq1YyCrIXpC5y4sLSkyhKAKnm5EhoaARoiGSIExhBCJCnMopTCaMcdgZmomZDTR04fTp8+fP37+7vn5+cPTcySWLCImrrjB8x2aMAO3dRQUu/nHxGpGzR9CDWPncRnaZW43A/Ah86pTtyOvTIf5dDrN80xEgJxywYashgSdY5D2OfUdt15ERFLqMlJEsFIqlCIuVr1MrFM2tGxkHvpvd3Ifqb995heKFTuZ7ZrnluIrJQ3emi7Xgfd6y1KKKdqwAOh0KCJuHfUR8+1bDHBH33fj/kUe2G+8n7nv75Lg1rIad0Z1QYbFvDRbNZeSSlo3wpBLUYEYp3kKXA5v9nK9XjElX2gCAFFIW9GTIqKApZSAPGyOM0/iFEuEAFrEiEAdYwoZa0IUEUViDphLVBBRUbTDPJ2fnz/98N3n7//w3XffTWG2Yvm6bVsGwgLmlOGwKKIKIqAChAZQTKX5W7j2qGvLZSYc6Ls1q6BhWMDMk2aRca+uclmoqoAQQ3T2Pp1OPfPCzDj0Gtyh6pygJ9KICIogVzGZhtxpRATp1shIBn32audmAECoaaRdlo0btprmbAYIWkuUDEzB1K3zOuljx1EAHhh+p/7Rc8jcnDymqh4J7sqnE2RfHrQfK719lQFGWX7DkV+PA9xwVvvs/VbjZ4z7t5yD6D1/iLF2zuprmnVdDU0Utqxp2xDxdDpPVnLO89tccvbQrapi4JxzKVpKocyqClQD4W56glpANjJDJQCOMaUSqPbmmJvHQ1UxZzIzE0NA5sOH48fvPn36/PHjp08InK5JxTgG74Xsvk5/RPHll6C79gVMwYAptGWcZyD0ucSm9BCNCCvMRMt/hkoW1PyG1Z5BqNeGEErevBIfHeKKhqYND+TYH1eK+ctLq1+xB5rrkgsAArXO1aqImBvYqHuBmvl0T4L9cjf96D5Gu78qta2+ZENDGUmrk/jjRz068K35ZvoiGKDFT76hAUZdM3ICfiUOcGf8DFIK7k7YCb35ce8Ev5/gC2QAQDPJeVtWNRIppRRRzDkHns7nc9S8bdtyPoMCWs08CyHkvHmmuxGEGQOSgZVSqJZ7ExKiQTFEdEQMAIBJRWRWVUUIagBwyTlrZlAxY+bD4XA6H4/nJ57YFCkiT5FjRCYjtFYV5jwgYKBelwweAGaKMBg8I3E0Wq8j0Cmgz9841KqVAZiYiEJgZpZSEXnNLKU0RVbV19dXT+4IjHOIHRvCX3JvOJcs5bxtG+2oMDRO1qi0/Qv60WpXUBVVImJ20/KszywzYzFCVICae6juNgZHT0HP5SEzsioTQieeG2NbWypEB9v3R/Tw2UBOPaERdSA2v9s/YIDxbBwM0Hu18CUSfzxnPKG7e2GU/bcWITOLqZmo6rquAlRMAMmQEPhwiOfz2dbL4XA4nU4mZiKRQwiBp4grllKWtGEgniZEVFMRAwa0iuynJv0FHLU4aixRoxSHNzTAaZpQMYNiUY4hTDHM0zTPouqOngonc1v1UtnAkZ/BPAMCCPEr5sHI+Y874zmt7wP0JJle24XtZVS1mAZGAPjll1+2bVPVeQpwOPrywEW1I694hbFHD5dlmbSiOPY5AydxHcGcbwRwl/RuYIgIwGiI35nQ1RlwJyjvlNWdyH+knxpqVLXBpYOIZverC2wti6rob0f93/8Amq0+e/RONFkAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import io\n", - "\n", - "import requests\n", - "from PIL import Image\n", - "from IPython import display\n", - "\n", - "url = \"https://pai-sdk.oss-cn-shanghai.aliyuncs.com/resources/images/11563567033_b822736d84_c.jpeg\"\n", - "\n", - "data = requests.get(url).content\n", - "\n", - "display.display(Image.open(io.BytesIO(data)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = p.raw_predict(data=data)\n", - "\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "在测试完成之后,我们可以将创建的服务删除。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/predict.rst b/docs/source/tutorial/predict.rst deleted file mode 100644 index bfc2ca8..0000000 --- a/docs/source/tutorial/predict.rst +++ /dev/null @@ -1,12 +0,0 @@ -=========================================== -模型部署 -=========================================== - - -.. toctree:: - :maxdepth: 1 - - model_deploy_container/model_deploy_container - async_inference/async_inference - modelscope_model_deploy/modelscope_model_deploy - huggingface_model_deploy/huggingface_model_deploy diff --git a/docs/source/tutorial/pretrained-model/pretrained-model.ipynb b/docs/source/tutorial/pretrained-model/pretrained-model.ipynb deleted file mode 100644 index 5e6ad2a..0000000 --- a/docs/source/tutorial/pretrained-model/pretrained-model.ipynb +++ /dev/null @@ -1,297 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用PAI预置算法微调模型\n", - "\n", - "预训练模型(pre-trained model)是通过在大规模数据集上进行训练,从而学习到数据的特征表示的深度学习模型。因为模型是通过大规模的数据进行预训练,因而可以通过少量的数据集进行训练,避免从头训练模型的高额成本。在应用时,预训练模型可以被作为基础模型,然后在特定任务的有标注数据集上进行微调,从而适应特定任务的要求。\n", - "\n", - "PAI在公共模型仓库中,提供了不同领域,包括计算机视觉、自然语言处理、语音等的常见热门预训练模型,例如 `QWen`、`Bert`、`ChatGLM`、`LLama2`、`StableDiffusion 2.1` 等,并提供了相应的预置算法,用户仅需提供数据集,即可在PAI上完成模型微调训练。\n", - "\n", - "在本示例中,我们将以`Bert`模型为示例,展示如何使用PAI提供的预置算法对模型进行微调训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-output" - ] - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 查看PAI提供的预训练模型\n", - "\n", - "我们可以通过参数`provider`为`pai`,获取`PAI`公共模型仓库下的模型,其中包含了PAI提供的模型和从开源社区精选的模型。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "\n", - "data = [[\"ModelName\", \"Task\", \"Revision\"]]\n", - "\n", - "# 获取公共模型仓库'pai'提供的模型列表\n", - "for m in RegisteredModel.list(model_provider=\"pai\"):\n", - " revision = m.version_labels.get(\"revision\")\n", - " license = m.version_labels.get(\"license\")\n", - " task = m.task\n", - " data.append([m.model_name, task, revision])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import HTML, display\n", - "\n", - "display(\n", - " HTML(\n", - " \"{}
\".format(\n", - " \"\".join(\n", - " \"{}\".format(\"\".join(str(_) for _ in row))\n", - " for row in data\n", - " )\n", - " )\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用模型的预置算法微调训练\n", - "\n", - "通过`model_name`和`model_provider`参数,我们可以获取PAI提供的预训练模型(`RegisteredModel`对象),`RegisteredModel`对象包含了模型所在的OSS Bucket信息,以及模型的预训练算法配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "# 获取PAI模型仓库中的bert-base-uncased模型\n", - "m = RegisteredModel(\n", - " model_name=\"bert-base-uncased\",\n", - " model_provider=\"pai\",\n", - ")\n", - "\n", - "print(m)\n", - "\n", - "# 查看模型的公共读OSS Bucket路径\n", - "print(m.model_data)\n", - "# 查看模型的训练算法配置\n", - "print(m.training_spec)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "获取 `bert-base-uncased` 模型的预置微调算法,以及算法的超参定义和输入数据定义。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from pai.estimator import AlgorithmEstimator\n", - "\n", - "\n", - "# 通过注册模型的配置,获取相应的预训练算法\n", - "est: AlgorithmEstimator = m.get_estimator()\n", - "\n", - "# 查看算法的超参定义\n", - "print(json.dumps(est.hyperparameter_definitions, indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看算法的输入输出通道定义\n", - "print(json.dumps(est.input_channel_definitions, indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 默认的超参信息\n", - "print(\"before\")\n", - "print(est.hyperparameters)\n", - "\n", - "\n", - "# 配置超参\n", - "est.set_hyperparameters(batch_size=32)\n", - "\n", - "print(\"after\")\n", - "print(est.hyperparameters)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "模型默认自带了测试的训练数据,例如BERT模型提供的预置算法可以用于训练一个文本分类模型,默认提供了[情感分类数据集sst2](https://huggingface.co/datasets/sst2),可以直接用于模型的微调训练。\n", - "训练数据格式为一个`jsonline`文件,每一行为一个json对象,包含了`label`和`text`两个字段,分别表示文本的标签和文本内容。\n", - "\n", - "```json\n", - "{\"label\": \"negative\", \"text\": \"hide new secretions from the parental units \"}\n", - "{\"label\": \"negative\", \"text\": \"contains no wit , only labored gags \"}\n", - "{\"label\": \"positive\", \"text\": \"that loves its characters and communicates something rather beautiful about human nature \"}\n", - "...\n", - "...\n", - "\n", - "```\n", - "\n", - "用户可以参考以上的数据格式准备数据,当前示例中,我们将基于PAI提供的数据集对模型进行微调训练。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 获取模型自带的训练输入\n", - "default_inputs = m.get_estimator_inputs()\n", - "\n", - "# 默认的算法训练输入,包含了算法使用的预训练模型,训练数据,以及验证数据。\n", - "print(json.dumps(default_inputs, indent=4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将模型配置的默认的数据集作为训练输入,使用模型预置的PAI算法提交训练作业。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "est.fit(inputs=default_inputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在训练结束之后获取产出模型的OSS Bucket路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看输出模型路径\n", - "print(est.model_data())" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb b/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb deleted file mode 100644 index 86b853d..0000000 --- a/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb +++ /dev/null @@ -1,330 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 提交PyTorch分布式作业\n", - "\n", - "\n", - "PAI支持用户提交分布式PyTorch训练作业,本文将介绍如何使用PAI Python SDK,以[PyTorch DDP(DistributedDataParallel)](https://pytorch.org/docs/stable/notes/ddp.html)模式提交分布式PyTorch训练作业。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install pygments" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## PyToch 分布式作业介绍\n", - "\n", - "[PyTorch DDP(Distributed Data Parallel)](https://pytorch.org/docs/stable/notes/ddp.html)是PyTorch提供的分布式数据并行训练功能,支持模型在多台机器上进行并行训练,从而提高训练效率。\n", - "\n", - "PyTorch DDP基于多进程的方案实现,支持单机多卡模式和多机多卡模式。在单机多卡模式下,用户可以使用同一台机器下的多个GPU来加速模型的训练。在多机多卡模式下,可以将计算任务分配到多台机器上进行并行计算,加速训练速度。对于DDP的详细介绍,可以参考PyTorch的[官方文档链接](https://pytorch.org/docs/stable/notes/ddp.html)。\n", - "\n", - "\n", - "![PyTorch DDP](./resource/ddp.png)\n", - "\n", - "> PyTorch提供的`DataParallel`和`DistributedDataParallel`模块都支持数据并行训练,[PyTorch官方](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html#comparison-between-dataparallel-and-distributeddataparallel)推荐不论是单机多卡还是多机多卡,都使用`DistributedDataParallel`模块进行训练。\n", - "\n", - "### 代码适配DDP改造\n", - "\n", - "使用PyTorch DDP进行分布式训练需要对原先的PyTorch训练代码(使用单机单卡)进行的修改,具体可以参考[PyTorch官方文档](https://pytorch.org/tutorials/beginner/dist_overview.html#torch-nn-parallel-distributeddataparallel)。\n", - "\n", - "主要包括:\n", - "\n", - "- 初始化分布式训练配置:\n", - "\n", - "需要在训练迭代开始之前完成训练环境初始化。\n", - "\n", - "```python\n", - "\n", - "from torch.distributed import init_process_group, destroy_process_group\n", - "\n", - "def ddp_setup()\n", - " init_process_group(backend=\"nccl\")\n", - "\n", - "```\n", - "\n", - "初始化需要指定机器之间的通讯方式,当使用GPU进行训练时,通常使用`nccl`作为通讯后端,而使用CPU训练时,使用`gloo`,详细的介绍可以参考PyTorch文档: [Which Backend To Use?](https://pytorch.org/docs/stable/distributed.html#which-backend-to-use)\n", - "\n", - "- 使用DDP封装模型:\n", - "\n", - "```python\n", - "\n", - "from torch.nn.parallel import DistributedDataParallel as DDP\n", - "\n", - "# model是原始单机单卡训练的PyTorch模型\n", - "model = DDP(model)\n", - "\n", - "```\n", - "\n", - "\n", - "- 修改DataLoader的采样方式:\n", - "\n", - "当使用DDP进行数据并行训练,不同的worker进程需要读取不同的数据分片进行训练。当不同机器上通过共享存储的方式使用同一份数据集时,可以使用`torch.utils.data.distributed.DistributedSampler`来对数据进行采样,从而保证不同的worker进程读取不同的数据分片。\n", - "\n", - "```python\n", - "\n", - "from torch.utils.data import DataLoader\n", - "from torch.utils.data.distributed import DistributedSampler\n", - "\n", - "train_sampler = DistributedSampler(\n", - "\ttrain_dataset,\n", - "\tshuffle=True)\n", - "\n", - "trainloader = DataLoader(\n", - "\ttrain_dataset,\n", - "\tbatch_size=args.per_device_train_batch_size,\n", - "\tsampler=train_sampler,\n", - "\tnum_workers=2,\n", - "\tdrop_last=True)\n", - "\n", - "```\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### PAI支持PyTorch DDP分布式训练\n", - "\n", - "PAI原生支持的PyTorch的分布式训练,当用户提交训练作业,指定作业类型为PyTorch训练作业时(`job_type=\"PyTorchJob\"`),PAI的训练服务会在机器节点上设置环境变量,包含作业机器数量,机器RANK,机器之间的通讯地址等信息。\n", - "\n", - "| 环境变量名 | \t描述 |\n", - "|:----------|:---------|\n", - "|MASTER_ADDR | Master机器节点的服务地址 |\n", - "|MASTER_PORT | Master机器节点端口号,如:23456 |\n", - "|WORLD_SIZE\t | 分布式作业的**机器节点总数**,例如提交的训练作业申请了4台机器,则WORLD_ISZE=4 |\n", - "|RANK\t| **机器节点的RANK**,例如启动了一个4个节点的作业,则各个机器节点的RANK分别为0,1,2,3 |\n", - "\n", - "\n", - "`PyTorch`提供了分布式训练启动工具,`torchrun`(PyTorch 1.1.0及以上版本) 和 `torch.distributed.launch`(PyTorch 1.1.0版本以下),支持训练作业的拉起。配合以上PAI预置的环境变量,我们可以便利得启动分布式训练作业。\n", - "\n", - "\n", - "\n", - "使用`torch.distributed.launch`拉起训练作业示例:\n", - "\n", - "```shell\n", - "\n", - "# for PyTorch<1.1.0\n", - "\n", - "python -m torch.distributed.launch \\\n", - "--nproc_per_node= \\\n", - "--master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE \\\n", - "--node_rank=$RANK \\\n", - " training_arguments...\n", - "\n", - "```\n", - "\n", - "使用`torchrun`拉起训练作业示例:\n", - "\n", - "```shell\n", - "\n", - "# for PyTorch>=1.1.0\n", - "torchrun \\\n", - "--nproc_per_node= \\\n", - "--master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE \\\n", - "--node_rank=$RANK \\\n", - " training_arguments...\n", - "\n", - "```\n", - "\n", - "用户需要修改` 以上的作业启动命令,同样适用于单机多卡的训练作业启动。\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "PAI Python SDK提供了Estimator的接口,用于提交训练作业,以下示例中,我们将通过Estimator提交一个PyTorch分布式训练作业。\n", - "\n", - "\n", - "- 准备训练代码\n", - "\n", - "PyTorch提供了多机多卡的[训练代码示例](https://github.com/pytorch/examples/blob/main/distributed/ddp-tutorial-series/multinode.py),在修改了模型和checkpoints保存路径后,我们既可以将其用于提交到PAI进行训练。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 通过以下代码查看准备提交的训练代码\n", - "!pygmentize train_src/train_multinode.py" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- 提交训练作业\n", - "\n", - "我们将使用PAI提供的PyTorch 1.12版本的GPU镜像完成多机多卡的作业训练。使用`estimator.fit`提交训练作业之后,SDK会打印作业的控制台链接,用户可以通过控制台查看作业状态,日志详情等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"pytorch\",\n", - " framework_version=\"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "print(\"Training Image URI: \", image_uri)\n", - "\n", - "\n", - "# 每一个机器实例的GPU数量,需要根据用户选择的机器型号(instance_type)进行修改\n", - "gpu_count_per_instance = 2\n", - "\n", - "# 训练脚本使用torchrun命令启动\n", - "command = f\"\"\"torchrun --master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE --node_rank=$RANK \\\n", - "--nproc_per_node={gpu_count_per_instance} \\\n", - "train_multinode.py --total_epochs 10 --save_every 5 \\\n", - "\"\"\"\n", - "\n", - "\n", - "# 提交训练作业\n", - "est = Estimator(\n", - " image_uri=image_uri,\n", - " source_dir=\"./train_src\", # 训练代码所在目录\n", - " command=command,\n", - " job_type=\"PyTorchJob\",\n", - " instance_type=\"ecs.gn6i-c24g1.12xlarge\", # 2 * NVIDIA T4 GPU\n", - " instance_count=2, # 机器实例数量\n", - " base_job_name=\"pytorch-ddp\",\n", - ")\n", - "\n", - "# fit方法提交训练作业,默认等待到作业执行完成\n", - "est.fit()\n", - "\n", - "\n", - "# 查看作业的输出模型\n", - "\n", - "est.model_data()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 参考:\n", - "\n", - "- PyTorch Distributed Overview: https://pytorch.org/tutorials/beginner/dist_overview.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/pytorch_ddp/resource/ddp.png b/docs/source/tutorial/pytorch_ddp/resource/ddp.png deleted file mode 100644 index ef2638d..0000000 Binary files a/docs/source/tutorial/pytorch_ddp/resource/ddp.png and /dev/null differ diff --git a/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb b/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb deleted file mode 100644 index fa5de20..0000000 --- a/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb +++ /dev/null @@ -1,969 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "source": [ - "# 基于PyTorch训练和部署MNIST图片分类模型\n", - "\n", - "PyTorch是一个非常流行的深度学习框架,提供了极高的灵活性和优越的性能,能够与Python丰富的生态无缝结合,被广泛应用于图像分类、语音识别、自然语言处理、推荐、AIGC等领域。本示例中,我们将使用PAI Python SDK,在PAI完成一个PyTorch模型的训练,然后使用训练获得的模型部署推理服务。主要流程包括:\n", - "\n", - "- Step1: 安装和配置SDK\n", - "\n", - "安装PAI Python SDK,并配置使用的AccessKey、工作空间以及OSS Bucket。\n", - "\n", - "- Step2: 准备训练数据\n", - "\n", - "我们下载一个MNIST数据集,上传到OSS上供训练作业使用。\n", - "\n", - "- Step3: 准备训练脚本\n", - "\n", - "我们使用PyTorch示例仓库中的MNIST训练脚本作为模板,在简单修改之后作为训练脚本。\n", - "\n", - "- Step4: 提交训练作业\n", - "\n", - "使用PAI Python SDK提供的Estimator API,创建一个训练作业,提交到云上执行。\n", - "\n", - "- Step5: 部署推理服务\n", - "\n", - "将以上训练作业输出的模型,分别使用Processor和镜像部署的方式部署到PAI-EAS,创建在线推理服务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai\n", - "!python -m pip install pandas" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 准备训练数据\n", - "\n", - "当前示例中,我们将使用MNIST数据集训练一个图片分类模型。为了支持训练作业加载使用,我们需要将数据上传到OSS Bucket上。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用以下的Shell脚本,我们将MNIST数据集下载到本地目录data。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%sh\n", - "\n", - "#!/bin/sh\n", - "set -e\n", - "\n", - "url_prefix=\"https://ossci-datasets.s3.amazonaws.com/mnist/\"\n", - "# 如果以上的地址下载速度较慢,可以使用以下地址\n", - "# url_prefix=\"http://yann.lecun.com/exdb/mnist/\"\n", - "\n", - "mkdir -p data/MNIST/raw/\n", - "\n", - "wget ${url_prefix}train-images-idx3-ubyte.gz -P data/MNIST/raw/\n", - "wget ${url_prefix}train-labels-idx1-ubyte.gz -P data/MNIST/raw\n", - "wget ${url_prefix}t10k-images-idx3-ubyte.gz -P data/MNIST/raw\n", - "wget ${url_prefix}t10k-labels-idx1-ubyte.gz -P data/MNIST/raw\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PAI Python SDK提供的OSS上传API,将相应的数据上传到OSS Bucket上。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "from pai.session import get_default_session\n", - "\n", - "sess = get_default_session()\n", - "data_path = \"./data\"\n", - "\n", - "data_uri = upload(data_path, oss_path=\"mnist/data/\", bucket=sess.oss_bucket)\n", - "\n", - "print(data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step3: 准备训练脚本\n", - "\n", - "使用PyTorch训练模型,需要我们准备相应的脚本。这里我们以PyTorch官方提供的 [MNIST 示例](https://github.com/pytorch/examples/blob/main/mnist/main.py) 为基础,修改了数据加载和模型保存的逻辑,作为训练脚本。\n", - "\n", - "- 使用环境变量获得输入数据路径\n", - "\n", - "训练数据将被挂载到训练作业环境中使用,训练代码需要读取指定的路径获取训练数据。\n", - "\n", - "\n", - "```diff\n", - "\n", - "- dataset1 = datasets.MNIST(\"../data\", train=True, download=True, transform=transform)\n", - "- dataset2 = datasets.MNIST(\"../data\", train=False, transform=transform)\n", - "\n", - "+\t # 使用挂载到训练容器中的数据,默认为 /ml/input/{ChannelName},可以通过环境变量 `PAI_INPUT_{ChannelNameUpperCase}`\n", - "+ data_path = os.environ.get(\"PAI_INPUT_TRAIN_DATA\")\n", - "+ dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)\n", - "+ dataset2 = datasets.MNIST(data_path, train=False, transform=transform)\n", - "\n", - "\n", - "```\n", - "\n", - "- 使用环境变量获取模型的保存路径:\n", - "\n", - "用户需要保存模型到工作容器中的指定路径,PAI的训练服务将其才能够持久化保存模型到OSS Bucket上。默认要求用户需要将模型保存到环境变量 `PAI_OUTPUT_MODEL` 指定的路径下(默认为`/ml/output/model`)。\n", - "\n", - "\n", - "```diff\n", - "\n", - "- if args.save_model:\n", - "- torch.save(model.state_dict(), \"mnist_cnn.pt\")\n", - "\n", - "+ # 保存模型\n", - "+ save_model(model)\n", - "+\n", - "+\n", - "+ def save_model(model):\n", - "+ \"\"\"将模型转为TorchScript,保存到指定路径.\"\"\"\n", - "\n", - "+ output_model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - "+ os.makedirs(output_model_path, exist_ok=True)\n", - "+\n", - "+ m = torch.jit.script(model)\n", - "+ m.save(os.path.join(output_model_path, \"mnist_cnn.pt\"))\n", - "\n", - "```\n", - "\n", - "PAI提供的预置[PyTorch Processor](https://help.aliyun.com/document_detail/470458.html) 在创建服务时,要求输入的模型是[TorchScript 格式](https://pytorch.org/docs/stable/jit.html) 。在本示例中,我们将模型导出为 `TorchScript格式` ,然后分别使用 `PyTorch Processor` 和镜像方式创建推理服务。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "运行以下代码,创建一个训练脚本目录。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "将训练作业脚本保存到`train_src`训练脚本目录,完整的作业脚本如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "%%writefile train_src/train.py\n", - "\n", - "# source: https://github.com/pytorch/examples/blob/main/mnist/main.py\n", - "from __future__ import print_function\n", - "\n", - "import argparse\n", - "import os\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torch.optim as optim\n", - "from torch.optim.lr_scheduler import StepLR\n", - "from torchvision import datasets, transforms\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self):\n", - " super(Net, self).__init__()\n", - " self.conv1 = nn.Conv2d(1, 32, 3, 1)\n", - " self.conv2 = nn.Conv2d(32, 64, 3, 1)\n", - " self.dropout1 = nn.Dropout(0.25)\n", - " self.dropout2 = nn.Dropout(0.5)\n", - " self.fc1 = nn.Linear(9216, 128)\n", - " self.fc2 = nn.Linear(128, 10)\n", - "\n", - " def forward(self, x):\n", - " x = self.conv1(x)\n", - " x = F.relu(x)\n", - " x = self.conv2(x)\n", - " x = F.relu(x)\n", - " x = F.max_pool2d(x, 2)\n", - " x = self.dropout1(x)\n", - " x = torch.flatten(x, 1)\n", - " x = self.fc1(x)\n", - " x = F.relu(x)\n", - " x = self.dropout2(x)\n", - " x = self.fc2(x)\n", - " output = F.log_softmax(x, dim=1)\n", - " return output\n", - "\n", - "\n", - "def train(args, model, device, train_loader, optimizer, epoch):\n", - " model.train()\n", - " for batch_idx, (data, target) in enumerate(train_loader):\n", - " data, target = data.to(device), target.to(device)\n", - " optimizer.zero_grad()\n", - " output = model(data)\n", - " loss = F.nll_loss(output, target)\n", - " loss.backward()\n", - " optimizer.step()\n", - " if batch_idx % args.log_interval == 0:\n", - " print(\n", - " \"Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\".format(\n", - " epoch,\n", - " batch_idx * len(data),\n", - " len(train_loader.dataset),\n", - " 100.0 * batch_idx / len(train_loader),\n", - " loss.item(),\n", - " )\n", - " )\n", - " if args.dry_run:\n", - " break\n", - "\n", - "\n", - "def test(model, device, test_loader):\n", - " model.eval()\n", - " test_loss = 0\n", - " correct = 0\n", - " with torch.no_grad():\n", - " for data, target in test_loader:\n", - " data, target = data.to(device), target.to(device)\n", - " output = model(data)\n", - " test_loss += F.nll_loss(\n", - " output, target, reduction=\"sum\"\n", - " ).item() # sum up batch loss\n", - " pred = output.argmax(\n", - " dim=1, keepdim=True\n", - " ) # get the index of the max log-probability\n", - " correct += pred.eq(target.view_as(pred)).sum().item()\n", - "\n", - " test_loss /= len(test_loader.dataset)\n", - "\n", - " print(\n", - " \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n", - " test_loss,\n", - " correct,\n", - " len(test_loader.dataset),\n", - " 100.0 * correct / len(test_loader.dataset),\n", - " )\n", - " )\n", - "\n", - "\n", - "def main():\n", - " # Training settings\n", - " parser = argparse.ArgumentParser(description=\"PyTorch MNIST Example\")\n", - " parser.add_argument(\n", - " \"--batch-size\",\n", - " type=int,\n", - " default=64,\n", - " metavar=\"N\",\n", - " help=\"input batch size for training (default: 64)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--test-batch-size\",\n", - " type=int,\n", - " default=1000,\n", - " metavar=\"N\",\n", - " help=\"input batch size for testing (default: 1000)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--epochs\",\n", - " type=int,\n", - " default=14,\n", - " metavar=\"N\",\n", - " help=\"number of epochs to train (default: 14)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--lr\",\n", - " type=float,\n", - " default=1.0,\n", - " metavar=\"LR\",\n", - " help=\"learning rate (default: 1.0)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--gamma\",\n", - " type=float,\n", - " default=0.7,\n", - " metavar=\"M\",\n", - " help=\"Learning rate step gamma (default: 0.7)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA training\"\n", - " )\n", - " parser.add_argument(\n", - " \"--dry-run\",\n", - " action=\"store_true\",\n", - " default=False,\n", - " help=\"quickly check a single pass\",\n", - " )\n", - " parser.add_argument(\n", - " \"--seed\", type=int, default=1, metavar=\"S\", help=\"random seed (default: 1)\"\n", - " )\n", - " parser.add_argument(\n", - " \"--log-interval\",\n", - " type=int,\n", - " default=10,\n", - " metavar=\"N\",\n", - " help=\"how many batches to wait before logging training status\",\n", - " )\n", - " parser.add_argument(\n", - " \"--save-model\",\n", - " action=\"store_true\",\n", - " default=False,\n", - " help=\"For Saving the current Model\",\n", - " )\n", - " args = parser.parse_args()\n", - " use_cuda = not args.no_cuda and torch.cuda.is_available()\n", - "\n", - " torch.manual_seed(args.seed)\n", - "\n", - " device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n", - "\n", - " train_kwargs = {\"batch_size\": args.batch_size}\n", - " test_kwargs = {\"batch_size\": args.test_batch_size}\n", - " if use_cuda:\n", - " cuda_kwargs = {\"num_workers\": 1, \"pin_memory\": True, \"shuffle\": True}\n", - " train_kwargs.update(cuda_kwargs)\n", - " test_kwargs.update(cuda_kwargs)\n", - "\n", - " transform = transforms.Compose(\n", - " [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n", - " )\n", - "\n", - " data_path = os.environ.get(\"PAI_INPUT_TRAIN_DATA\", \"../data\")\n", - " dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)\n", - " dataset2 = datasets.MNIST(data_path, train=False, transform=transform)\n", - " train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)\n", - " test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)\n", - "\n", - " model = Net().to(device)\n", - " optimizer = optim.Adadelta(model.parameters(), lr=args.lr)\n", - "\n", - " scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)\n", - " for epoch in range(1, args.epochs + 1):\n", - " train(args, model, device, train_loader, optimizer, epoch)\n", - " test(model, device, test_loader)\n", - " scheduler.step()\n", - "\n", - " # 保存模型\n", - " save_model(model)\n", - "\n", - "\n", - "def save_model(model):\n", - " \"\"\"将模型转为TorchScript,保存到指定路径.\"\"\"\n", - " output_model_path = os.environ.get(\"PAI_OUTPUT_MODEL\", \"./model/\")\n", - " os.makedirs(output_model_path, exist_ok=True)\n", - "\n", - " m = torch.jit.script(model)\n", - " m.save(os.path.join(output_model_path, \"mnist_cnn.pt\"))\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step4: 提交训练作业\n", - "\n", - "`Estimator`支持用户使用本地的训练脚本,以指定的镜像在云上执行训练作业。通过`Estimator`,我们将以上准备的训练作业脚本提交到PAI,使用PAI提供的PyTorch镜像执行训练任务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用PAI提供的PyTorch的GPU训练镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"1.8PAI\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " # 训练作业启动命令\n", - " command=\"python train.py --epochs 5 --batch-size 256 --lr 0.5\",\n", - " # 需要上传的代码文件\n", - " source_dir=\"./train_src/\",\n", - " # 训练作业镜像\n", - " image_uri=image_uri,\n", - " # 机器配置\n", - " # PAI的训练服务支持机器实例类型请见文档:[公共资源组实例和定价](https://help.aliyun.com/document_detail/171758.html?#section-55y-4tq-84y)\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4vCPU 15GB 1*NVIDIA T4\n", - " # 训练作业的Metric捕获配置\n", - " # 训练服务支持从训练作业输出日志中(训练脚本打印的标准输出和标准错误输出),以正则表达式匹配的方式捕获训练作业Metrics信息。\n", - " metric_definitions=[\n", - " {\n", - " \"Name\": \"loss\",\n", - " \"Regex\": r\".*loss=([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " ],\n", - " base_job_name=\"pytorch_mnist\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "`estimator.fit`方法将用户的训练作业提交到PAI上执行。任务提交之后,SDK会打印作业详情页链接和训练作业的日志,等待作业执行结束。\n", - "\n", - "当用户需要直接使用OSS上数据,可以通过`estimator.fit`方法的`inputs`参数传递。通过`inputs`传递数据存储路径会被挂载到目录下,用户的训练脚本可以通过读取本地文件的方式加载数据。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 使用.fit方法提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " # 训练作业的输入数据,每一个Key,Value对是一个Channel,用户可以通过环境变量PAI_INPUT_{ChannelNameUpperCase}获取对应的数据路径\n", - " # 例如以下的train_data,训练的脚本中可以通过`PAI_INPUT_TRAIN_DATA`获取数据挂载后的路径.\n", - " \"train_data\": data_uri,\n", - " }\n", - ")\n", - "\n", - "# 训练作业产出的模型路径\n", - "print(\"TrainingJob output model data:\")\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step5: 部署推理服务\n", - "\n", - "在训练作业结束之后,我们可以使用`estimator.model_data()`方法拿到训练作业产出模型的OSS路径。下面的流程中,我们将训练产出的模型部署到PAI创建在线推理服务。\n", - "\n", - "部署推理服务的主要流程包括:\n", - "\n", - "- 通过`InferenceSpec`描述如何使用模型构建推理服务\n", - "\n", - "用户可以选择使用Processor或是自定义镜像的模式进行模型部署。以下示例中将分别使用两种方式部署获得的PyTorch模型。\n", - "\n", - "- 通过`Model.deploy`方法,配置服务的使用资源,服务名称,等信息,创建推理服务。\n", - "\n", - "对于部署推理服务的详细介绍,可以见: [文档:部署推理服务](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/model.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Processor 模式部署\n", - "\n", - "[Processor](https://help.aliyun.com/document_detail/111029.html) 是PAI对于推理服务程序包的抽象描述,他负责加载模型并启动模型推理服务。模型推理服务会暴露API支持用户进行调用。\n", - "\n", - "PAI提供了预置[PyTorch Processor](https://help.aliyun.com/document_detail/470458.html),支持用户方便地将TorchScript格式的模型部署到PAI,创建推理服务。\n", - "\n", - "以下示例代码中,我们通过PyTorch Processor将训练产出的模型部署为一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.predictor import Predictor\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " # 使用PAI提供的PyTorch Processor\n", - " inference_spec=InferenceSpec(processor=\"pytorch_cpu_1.10\"),\n", - ")\n", - "\n", - "p: Predictor = m.deploy(\n", - " service_name=\"tutorial_pt_mnist_proc_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "`Model.deploy`返回的`Predictor`对象指向创建的推理服务,可以通过`Predictor.predict`方法发送预测请求给到服务,拿到预测结果。\n", - "\n", - "我们使用`numpy`构建了一个测试样本数据,发送给推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "# # 以上保存TorchScript模型要求输入为 Float32, 数据格式格式的形状为 (BatchSize, Channel, Height, Width)\n", - "dummy_input = np.random.rand(2, 1, 28, 28).astype(np.float32)\n", - "\n", - "# np.random.rand(1, 1, 28, 28).dtype\n", - "res = p.predict(dummy_input)\n", - "print(res)\n", - "\n", - "print(np.argmax(res, 1))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试完成之后,可以通过`Predictor.delete_service`删除推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 镜像部署\n", - "\n", - "Processor模式启动的推理服务性能优越,适合于对于性能较为敏感的场景。对于一些需要灵活自定义的场景,例如模型使用了一些第三方的依赖,或是推理服务需要有前处理和后处理,用户可以通过镜像部署的方式实现。\n", - "\n", - "SDK提供了`pai.model.container_serving_spec()`方法,支持用户使用本地的推理服务代码配合PAI提供的基础镜像的方式创建推理服务。\n", - "\n", - "在使用镜像部署之前,我们需要准备模型服务的代码,负责加载模型、拉起HTTP Server、处理用户的推理请求。我们将使用Flask编写一个模型服务的代码,示例如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 准备推理代码保存目录\n", - "!mkdir -p infer_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile infer_src/run.py\n", - "\n", - "\n", - "import json\n", - "from flask import Flask, request\n", - "from PIL import Image\n", - "import os\n", - "import torch\n", - "import torchvision.transforms as transforms\n", - "import numpy as np\n", - "import io\n", - "\n", - "app = Flask(__name__)\n", - "# 用户指定模型,默认会被加载到当前路径下。 \n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - "model = torch.jit.load(os.path.join(MODEL_PATH, \"mnist_cnn.pt\"), map_location=device).to(device)\n", - "transform = transforms.Compose(\n", - " [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n", - ")\n", - "\n", - "\n", - "@app.route(\"/\", methods=[\"POST\"])\n", - "def predict():\n", - " # 预处理图片数据\n", - " im = Image.open(io.BytesIO(request.data))\n", - " input_tensor = transform(im).to(device)\n", - " input_tensor.unsqueeze_(0)\n", - " # 使用模型进行推理\n", - " output_tensor = model(input_tensor)\n", - " pred_res =output_tensor.detach().cpu().numpy()[0] \n", - "\n", - " return json.dumps(pred_res.tolist())\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " app.run(host=\"0.0.0.0\", port=int(os.environ.get(\"LISTENING_PORT\", 8000)))\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "通过`pai.model.container_serving_spec`,我们基于本地脚本和PAI提供的`PyTorch`镜像创建了一个`InferenceSpec`对象。\n", - "\n", - "- 模型服务的代码和启动命令:\n", - " \n", - "用户指定的本地脚本目录source_dir会被上传到OSS,然后挂载到服务容器(默认到 /ml/usercode目录)。\n", - "\n", - "- 推理服务镜像:\n", - "\n", - "PAI 提供了基础的推理镜像支持用户使用,用户可以通过`pai.image.retrieve`方法,指定参数`image_scope=ImageScope.INFERENCE`获取PAI提供的推理镜像。\n", - "\n", - "- 模型服务的第三方依赖包:\n", - "\n", - "模型服务代码或是模型的依赖,可以通过`requirements`参数指定,相应的依赖会在服务程序启动前被安装到环境中。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import InferenceSpec, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "torch_image_uri = retrieve(\n", - " framework_name=\"pytorch\", framework_version=\"1.12\", accelerator_type=\"CPU\"\n", - ").image_uri\n", - "\n", - "inf_spec = container_serving_spec(\n", - " command=\"python run.py\",\n", - " source_dir=\"./infer_src/\",\n", - " image_uri=torch_image_uri,\n", - " requirements=[\"flask==2.0.0\"],\n", - ")\n", - "print(inf_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用训练作业输出的模型,以及以上的 InferenceSpec,我们将通过 Model.deploy API部署一个在线推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "import numpy as np\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " inference_spec=inf_spec,\n", - ")\n", - "\n", - "predictor = m.deploy(\n", - " service_name=\"torch_mnist_script_container_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "我们准备一张 MNIST 测试图片,用于发送给到推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAABvklEQVR4nF2SO2tUURSFv73PPufMMzGG+EALhZhoaxBs7EWwCoiNhfgbLERrQSwshfwA7SyTIIJgJVgpBBEhEZuQQsIgE/XeubMtZu4wk9UuztrrcWAMEwACGQQSs9CUFARyApuhogKICSFgzZFaTZaoigwHolSo2GD6pYTRUTOoZbUmvXJodhoDMUJDwqyflAGYA6vNyrSntfXbq38e95a3PwQtAVSwjELm/v6hb30ZlL6Rwlg2gSLQvb7vn9bS3JuhP6EzFmsIEoF77ptB853Sd05oitRJrcXp5/5zY9Gwb95fR8dlKiCZh799K2Hdm4e7T1s6ZTIiZ/aq11hc2nF/Hw0h1wEj4awX50892vS/v/q3YIl5JrK0WgfeL32wu+c/WJyZKsLcaq/4+uziuXfFi0QkyuhsAIWwQIb5G+4PkPZk6gQSgNCE7mXvrUAgjQcx0Fh7Pzn0DgJGBLAKvHINPgj56GpRNigKnBLAXHDHpaI6YjkXw3/lZCrFAajoBvhcdFMJUk11FEzHE3/3a22yYFPsKK0m7vrHS3Qjx2EE48rLg7dEYvsYqSKCXnjVX2lrg9kPJpOiAVnIAP8B0Kx+GvoyGWQAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "\n", - "!pip install -q pillow\n", - "\n", - "\n", - "import base64\n", - "from PIL import Image\n", - "from IPython import display\n", - "import io\n", - "\n", - "\n", - "# raw_data是一张MNIST图片,对应数字9\n", - "raw_data = base64.b64decode(b\"/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APn+rVhpmoarP5GnWNzeTYz5dvE0jfkoJovNMv8ATmK3tjc2zByhE8TIQw6jkdR6VVq9oumPrWuWGlxyLG95cRwK7dFLMFyfzr3aXwp4ltAfB3gWwudI01JNuoa7eZhku5AMHafvFOw2Dn6ZJ4z4yeLk1HUbXwrZSSy2Oh5heeaQu88wG1mLHk4wR9c+1eXUqsVYMpIIOQR2r1D4QazqOs/FnSG1fVLi9ZI5vL+2TNKc+U2ApYnB7/hXml5LLNfXEsxLSvIzOSMEsTk1DRVnT7+60vULe/spmhureQSRSL1Vh0NWNd1mXX9ZuNUuLe2gmuCGkS2QohbABbBJwTjJ9yelZ1f/2Q==\")\n", - "\n", - "im = Image.open(io.BytesIO(raw_data))\n", - "\n", - "display.display(im)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "推理服务使用 HTTP 请求体内的数据作为输入的图片,SDK 的 `raw_predict` 方法接受 bytes 数据类型的请求,通过 POST 方法,在请求内带上用户推理数据,发送给到推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = predictor.raw_predict(data=raw_data)\n", - "print(resp.json())\n", - "\n", - "print(np.argmax(resp.json()))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后可以删除服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predictor.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg b/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg deleted file mode 100644 index 6f60f55..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb b/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb deleted file mode 100644 index 679a203..0000000 --- a/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb +++ /dev/null @@ -1,750 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# StableDiffusion模型LoRA微调\n", - "\n", - "[StableDiffusion](https://huggingface.co/runwayml/stable-diffusion-v1-5)是由StabilityAI、CompVis与Runway合作开发并开源的文本生成图像的模型。他可以直接用于文本生成图像的任务,也可以作为基础模型进行微调,从而从数据集上学习到新的风格,或是用于完成新的任务。本文将介绍通过在PAI完成LoRA微调StableDiffusion模型。\n", - "\n", - "## 背景介绍\n", - "\n", - "[LoRA(Low-Rank Adaption of Large Language Model)](https://arxiv.org/abs/2106.09685)是由微软提出的高效微调大语言模型的方法,他通过冻结原始模型参数,在模型上新增低秩矩阵作为可训练参数的方式微调模型。研究者发现,通过在Transformer块的Attention层上添加LoRA低秩矩阵对模型进行微调,能够获得与全参数微调水平相近的模型。相比于全参数的微调,LoRA有以下优点:\n", - "\n", - "- 训练的参数量小,计算资源消耗低,训练速度更快。\n", - " \n", - "- 对于计算资源/显存的要求更低,支持用户在消费级/中低端的GPU卡对大模型进行微调。\n", - "\n", - "- 冻结了原始模型参数,在训练过程中不容易发生灾难性遗忘。\n", - "\n", - "- 产出的模型较小,存储的成本较低,仅需推理时和原始的模型一同使用进行推理。\n", - "\n", - "后续有开发者,将其引入到[StableDiffsion模型的微调](https://github.com/cloneofsimo/lora)中,取得了不错的效果。HuggingFace提供的[Diffusers库](https://github.com/huggingface/diffusers)支持用户使用扩散模型进行训练或是推理,他支持用户使用LoRA微调扩散模型,并提供了相应的训练代码,支持[文生图](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py),以及[DreamBooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)的LoRA训练。\n", - "\n", - "当前示例,我们将基于[Diffusers库提供的训练代码和文档](https://huggingface.co/docs/diffusers/training/overview),在PAI完成StableDiffusion v1.5模型的LoRA微调训练。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 准备工作\n", - "\n", - "### 安装PAI Python SDK\n", - "\n", - "安装PAI Python SDK,用于提交训练任务到PAI。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 安装PAI Python SDK\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以执行以下代码,验证配置是否成功。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 配置成功之后,我们可以拿到工作空间的信息\n", - "assert sess.workspace_name is not None\n", - "assert sess.oss_bucket is not None" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 获取PAI提供的StableDiffusion模型\n", - "\n", - "PAI的公共模型仓库提供了StableDiffusion v1.5模型,用户可以通过以下代码获取模型的信息,用于后续的微调训练。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.session import get_default_session\n", - "from pai.libs.alibabacloud_aiworkspace20210204.models import ListModelsRequest\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取PAI提供的StableDiffusion模型信息\n", - "resp = sess._acs_workspace_client.list_models(\n", - " request=ListModelsRequest(\n", - " provider=\"pai\",\n", - " model_name=\"stable_diffusion_v1.5\",\n", - " )\n", - ")\n", - "model = resp.body.models[0].latest_version\n", - "\n", - "# StableDiffusion 模型的OSS URI(公共读)\n", - "print(f\"StableDiffusion ModelUri: {model.uri}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## LoRA TextToImage微调训练\n", - "\n", - "通过LoRA训练StableDiffusion模型,可以快速,低成本地获得一个能够生成指定风格的模型。在以下示例中,我们将使用一个Demo的图像文本数据集,对StableDiffusion模型进行LoRA微调。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据\n", - "\n", - "当前示例准备了一个简单的文本图片数据集在`train-data`目录下,包含训练的图片以及相应的标注文件(`metadata.jsonl`)。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!ls -lh train-data/\n", - "!cat train-data/metadata.jsonl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们需要将数据上传到OSS Bucket上,供训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "train_data_uri = upload(\"./train-data/\", \"stable_diffusion_demo/text2image/train-data/\")\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Diffuerser提供的训练脚本默认使用[ImageFolder](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder)格式的数据集,用户可以参考以上的格式准备数据,更加详细的介绍可以见HuggingFace datasets的[ImageFolder数据集文档](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder)。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 准备训练作业脚本\n", - "\n", - "我们将使用Diffusers库提供的[训练作业脚本(train_text_to_image_lora.py)](https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/train_text_to_image_lora.py)完成LoRA训练。执行以下代码,我们将代码下载到本地,用于后续提交训练任务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p train_lora\n", - "\n", - "# code source: https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/train_text_to_image_lora.py\n", - "!wget -P train_lora https://raw.githubusercontent.com/huggingface/diffusers/v0.17.1/examples/text_to_image/train_text_to_image_lora.py" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们提交的训练作业将使用PAI提供的PyTorch 1.12的GPU镜像运行,我们需要准备一个`requirements.txt`文件在训练代码目录下,以安装一些额外的依赖包。\n", - "\n", - "训练脚本目录提交到PAI上执行训练时,目录下的`requirements.txt`文件将被安装到作业环境中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_lora/requirements.txt\n", - "\n", - "diffusers>=0.17.1\n", - "\n", - "# source: https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/requirements.txt\n", - "accelerate>=0.16.0,<=0.18.0\n", - "torchvision\n", - "transformers>=4.25.1,<5.0.0\n", - "datasets\n", - "ftfy\n", - "tensorboard\n", - "Jinja2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "Diffuers提供的训练脚本,需要使用`Accelerate`工具启动,并通过命令行参数的方式,传递超参,预训练模型路径/ID,以及训练数据集地址。PAI的训练作业,支持通过环境变量的方式获取输入输出的数据/模型路径,以及训练作业超参。以下脚本中,我们通过环境变量的方式,传递超参、输入输出路径给到训练脚本。\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "command = \"\"\"accelerate launch --mixed_precision=\"fp16\" train_text_to_image_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --train_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --dataloader_num_workers=8 \\\n", - " --resolution=512 --center_crop --random_flip \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --max_grad_norm=1 \\\n", - " --lr_scheduler=\"cosine\" --lr_warmup_steps=0 \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=$PAI_HPS_SEED\"\"\"\n", - "\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"validation_prompt\": \"a photo of cat in a bucket\", # 验证模型的Prompt\n", - " \"validation_epochs\": 1, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 10, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 2, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 5, # 每隔100个step保存一次模型\n", - " \"seed\": 1337, # 随机种子\n", - "}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以下代码中,我们使用`Estimator`类,指定训练作业使用的镜像,训练作业超参,输入数据路径等,将LoRA训练作业提交到PAI执行。\n", - "\n", - "对于使用SDK提交训练作业的详细介绍,用户可以参考文档: [提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "\n", - "command = \"\"\"accelerate launch --mixed_precision=\"fp16\" train_text_to_image_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --train_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --dataloader_num_workers=8 \\\n", - " --resolution=512 --center_crop --random_flip \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --max_grad_norm=1 \\\n", - " --lr_scheduler=\"cosine\" --lr_warmup_steps=0 \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=$PAI_HPS_SEED\"\"\"\n", - "\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"validation_prompt\": \"a photo of cat in a bucket\", # 验证模型的Prompt\n", - " \"validation_epochs\": 1, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 10, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 2, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 5, # 每隔100个step保存一次模型\n", - " \"seed\": 1337, # 随机种子\n", - "}\n", - "\n", - "\n", - "est = Estimator(\n", - " image_uri=image_uri, # 训练作业使用的镜像\n", - " source_dir=\"train_lora\", # 训练代码路径,代码会被上传,并准备到训练作业环境中\n", - " command=command, # 训练任务启动命令\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4 vCPU, 16 GiB 内存, 1 x NVIDIA T4 GPU\n", - " base_job_name=\"sd_lora_t2i_\", # 训练作业名称前缀\n", - " hyperparameters=hps, # 作业超参,训练命令和脚本可以通过 `PAI_HPS_{HP_NAME_UPPER_CASE}` 环境变量,或是读取`/ml/input/config/hpyerparameters.json`文件获取\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "使用`inputs`参数指定准备到训练作业环境的模型和数据,提交训练作业。 \n", - "\n", - "`inputs`参数是一个字典,Key是输入的名称,Value是输入数据的存储路径(例如OSS URI)。相应的数据会被准备到作业执行环境中(通过挂载的方式),训练作业脚本,能够通过环境变量`PAI_INPUT_{KeyUpperCase}`获取到输入数据的路径,通过读取本地文件的方式读取预训练模型和数据。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Input PreTrainedModel: \", model.uri)\n", - "print(\"Input TrainData: \", train_data_uri)\n", - "\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " \"pretrained_model\": model.uri,\n", - " \"train_data\": train_data_uri,\n", - " },\n", - " wait=True,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在启动命令中,我们使用`--output_dir=$PAI_OUTPUT_MODEL`,让训练脚本将模型写出到指定的输出目录中。对应的模型数据会被保存到用户的OSS Bucket中,我们可以通过`est.model_data()`获得输出的模型的OSS URI。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pai.common.oss_utils import download\n", - "\n", - "print(\"OutputModel Path: \", est.model_data())\n", - "lora_weight_uri = os.path.join(est.model_data(), \"pytorch_lora_weight.bin\")\n", - "lora_model_path = download(oss_path=lora_weight_uri, local_path=\"./lora_model/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以上训练获得LoRA模型,可以使用diffuser的推理pipeline加载使用:\n", - "\n", - "```python\n", - "# StableDiffusionPipeline加载LoRA模型\n", - "\n", - "\n", - "import torch\n", - "from diffusers import StableDiffusionPipeline\n", - "\n", - "# 加载基础模型\n", - "model_id_or_path = \"\"\n", - "pipe = StableDiffusionPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n", - "\n", - "# 加载LoRA模型\n", - "pipe.unet.load_attn_procs(lora_model_path)\n", - "\n", - "# 使用推理pipeline\n", - "image = pipe(\n", - " \"A pokemon with blue eyes.\", num_inference_steps=25, guidance_scale=7.5,\n", - " cross_attention_kwargs={\"scale\": 0.5},\n", - ").images[0]\n", - "\n", - "\n", - "```\n", - "\n", - "或则用户也可以将其转为safetensor格式,在StableDiffusiuson WebUI中使用。\n", - "\n", - "\n", - "```python\n", - "import torch\n", - "from safetensors.torch import save_file\n", - "\n", - "# 加载模型\n", - "lora_model = torch.load(lora_model_bin_path, map_location=\"cpu\")\n", - "\n", - "# 转换为safetensor格式\n", - "save_file(lora_model, \"lora.safetensors\")\n", - "\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## LoRA && DreamBooth微调训练\n", - "\n", - "### DreamBooth简介\n", - "\n", - "DreamBooth是Google的研究人员于2022年提出的技术,支持在少量的图片上进行训练,然后将自定义的主题注入到扩散模型中。\n", - "\n", - "![](./resource/dreambooth.jpeg)\n", - "\n", - "图片来源: https://dreambooth.github.io/\n", - "\n", - "直接使用少量的图片文本数据集对扩散模型进行训练容易导致过拟合,或是语言漂移。DreamBooth使用以下方式避免了模型的退化:\n", - "\n", - "- 用户需要为新的主题选择一个罕见的词(标识符),模型将在训练过程中将这个词和图片的主题进行关联。\n", - "\n", - "- 为了避免过拟合和语言漂移,微调过程中,使用相同类别的图片参与训练(这些图片可以由扩散模型生成)。\n", - "\n", - "对于DreamBooth的详细介绍,用户可以参考[DreamBooth的博客](https://dreambooth.github.io/),以及[HuggingFace博客](https://huggingface.co/blog/dreambooth)对于DreamBooth的介绍。\n", - "\n", - "当通过DreamBooth训练扩散模型时,用户可以选择进行普通的微调(直接微调原始的模型参数),也可以使用LoRA的方式进行微调,在以下示例中,我们将使用LoRA的方式,进行DreamBooth训练。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据集\n", - "\n", - "为了训练DreamBooth,用户需要准备特定风格的图片数据集,当前示例中,我们准备了数据集在`sks-dog`目录下。\n", - "\n", - "通过以下代码,我们将将数据集上传到OSS上,供训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "train_data_uri = upload(\"sks-dog\", \"stable_diffusion/dreambooth/train-sks-dog/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练代码\n", - "\n", - "我们使用HuggingFace Diffusers库提供的训练脚本,通过LoRA && DreamBooth方式训练扩散模型。通过以下代码,我们下载训练脚本到本地。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 创建训练脚本保存路径\n", - "!mkdir -p train_dreambooth/\n", - "\n", - "# 下载HuggingFace diffusers(v1.17.1)库提供的示例代码(因为访问GitHub的网络并不稳定,用户当出现下载失败,可以多尝试几次)\n", - "!wget https://raw.githubusercontent.com/huggingface/diffusers/v0.17.1/examples/dreambooth/train_dreambooth_lora.py -P train_dreambooth/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业将使用PAI提供的PyTorch镜像运行脚本,我们需要通过以下的`requirements.txt`安装训练脚本依赖的库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_dreambooth/requirements.txt\n", - "# %%writefile 指令会将当前内容写入到 train_dreambooth/requirements.txt 文件中\n", - "\n", - "diffusers>=0.17.1\n", - "\n", - "# source: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt\n", - "accelerate>=0.16.0,<=0.18.0 # diffusers 提供的示例代码(v0.17.1)无法运行在accelerate>=0.18.0上.\n", - "torchvision\n", - "transformers>=4.25.1,<5.0.0\n", - "ftfy\n", - "tensorboard\n", - "Jinja2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "通过以下代码,我们使用PAI Python SDK,提交训练作业到PAI。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "command = \"\"\"accelerate launch train_dreambooth_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --instance_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --instance_prompt=\"$PAI_HPS_INSTANCE_PROMPT\" \\\n", - " --resolution=512 \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --lr_scheduler=\"constant\" \\\n", - " --lr_warmup_steps=0 \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=\"0\"\n", - " \"\"\"\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"instance_prompt\": \"a photo of sks dog\", # 训练的图片数据文本使用的标注Prompt。这里的sks是我们使用的数据集的特定风格标识符。\n", - " \"validation_prompt\": \"a photo of sks dog in a bucket\", # 验证模型的Prompt\n", - " # \"class_prompt\": \"a photo of dog\", # 用于生成类别图片数据,避免模型过拟合&&语言偏移\n", - " \"validation_epochs\": 50, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 500, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 1, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 100, # 每隔100个step保存一次模型\n", - "}\n", - "\n", - "\n", - "est = Estimator(\n", - " image_uri=image_uri, # 训练作业使用的镜像\n", - " source_dir=\"train_dreambooth\", # 训练代码路径,代码会被上传,并准备到训练作业环境中\n", - " command=command, # 训练任务启动命令\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4 vCPU, 16 GiB 内存, 1 x NVIDIA T4 GPU\n", - " base_job_name=\"sd_lora_dreambooth_\", # 训练作业名称前缀\n", - " hyperparameters=hps, # 作业超参,训练命令和脚本可以通过 `PAI_HPS_{HP_NAME_UPPER_CASE}` 环境变量,或是读取`/ml/input/config/hpyerparameters.json`文件获取\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Input PreTrainedModel: \", model.uri)\n", - "print(\"Input TrainData: \", train_data_uri)\n", - "\n", - "est.fit(\n", - " inputs={\n", - " \"pretrained_model\": model.uri,\n", - " \"train_data\": train_data_uri,\n", - " },\n", - " wait=True,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练任务会在输出目录下,生成一个`pytorch_lora_weights.bin`的模型文件,相应的文件会被上传准备到用户的OSS中,用户可以通过以下的代码,将模型文件下载到本地。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import posixpath\n", - "\n", - "from pai.common.oss_utils import download\n", - "\n", - "# 输出模型路径\n", - "output_lora_model = posixpath.join(est.model_data(), \"pytorch_lora_weights.bin\")\n", - "print(\"OutputModel: \", output_lora_model)\n", - "\n", - "model_path = download(output_lora_model, \"./lora_dreambooth_model/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "获得的LoRA模型,用户可以通过Diffuser提供的API,在推理pipeline加载使用,用户可以参考diffuser的文档:[DreamBooth Inference](https://huggingface.co/docs/diffusers/training/lora#dreambooth-inference)。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "通过当前示例,我们介绍了如何基于HuggingFace diffusers库,在PAI上完成StableDiffusion模型的LoRA微调训练。用户可以通过Diffuers库的API,直接在推理Pipeline中加载使用这些LoRA模型,也可以将模型转换Safetensors格式,用于StableDiffusionWebUI中。\n", - "\n", - "除了对于LoRA的支持,Diffusers库支持对于直接对扩散模型微调,也支持包括TextInversion, ControlNet, InstructPix2Pix等方式训练扩散模型,并且提供了相应的训练脚本和教程。用户同样可以参考本示例,在PAI运行这些训练任务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 参考\n", - "\n", - "- HuggingFace LoRa Tutorial: https://huggingface.co/docs/diffusers/training/lora#texttoimage\n", - "\n", - "- HuggingFace LoRA Blog: https://huggingface.co/blog/lora\n", - "\n", - "- Google DreamBooth Blog:https://dreambooth.github.io/" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg deleted file mode 100644 index 89d760d..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg deleted file mode 100644 index 13eec49..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg deleted file mode 100644 index 48aba20..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl b/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl deleted file mode 100644 index 8ad3b35..0000000 --- a/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"file_name": "cat1.jpg", "text": "cute cat"} -{"file_name": "cat2.jpg", "text": "a little cat"} -{"file_name": "cat3.jpg", "text": "a little cat"} diff --git a/docs/source/tutorial/tensorboard/tensorboard.ipynb b/docs/source/tutorial/tensorboard/tensorboard.ipynb deleted file mode 100644 index 6dd3855..0000000 --- a/docs/source/tutorial/tensorboard/tensorboard.ipynb +++ /dev/null @@ -1,245 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用TensorBoard可视化训练过程\n", - "\n", - "TensorBoard是一个用于追踪、可视化、分析模型训练过程和训练结果的工具,它提供了多种可视化功能,可以与PyTorch、TensorFlow、Keras、Huggingface transformers、ModelScope等机器学习框架一起使用,帮助用户了解模型的训练过程和性能。\n", - "\n", - "PAI提供了TensorBoard服务,支持用户在PAI创建TensorBoard应用,用于查看训练作业输出的TensorBoard日志。\n", - "\n", - "本文档将以不同的机器学习框架为示例,展示如何在PAI使用TensorBoard追踪和可视化模型训练过程。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练任务" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们首先需要准备训练脚本,使用将PyTorch的TensorBoard utility记录TensorBoard日志。\n", - "\n", - "\n", - "> PyTorch提供的TensorBoard utilities的使用可以见文档: [torch.utils.tensorboard 文档](https://pytorch.org/docs/stable/tensorboard.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p src" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "镜像里需要先安装TensorBoard,可以在训练目录中准备 ``requirements.txt`` 指定需要按照的第三方库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile src/requirements.txt\n", - "\n", - "tensorboard" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile src/run.py\n", - "\n", - "import os\n", - "\n", - "import torch\n", - "from torch.utils.tensorboard import SummaryWriter\n", - "\n", - "\n", - "# 通过环境变量获取TensorBoard输出路径,默认为 /ml/output/tensorboard/\n", - "tb_log_dir = os.environ.get(\"PAI_OUTPUT_TENSORBOARD\")\n", - "print(f\"TensorBoard log dir: {tb_log_dir}\")\n", - "writer = SummaryWriter(log_dir=tb_log_dir)\n", - "\n", - "def train_model(iter):\n", - "\n", - "\n", - " x = torch.arange(-5, 5, 0.1).view(-1, 1)\n", - " y = -5 * x + 0.1 * torch.randn(x.size())\n", - "\n", - " model = torch.nn.Linear(1, 1)\n", - " criterion = torch.nn.MSELoss()\n", - " optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)\n", - "\n", - " for epoch in range(iter):\n", - " y1 = model(x)\n", - " loss = criterion(y1, y)\n", - " writer.add_scalar(\"Loss/train\", loss, epoch)\n", - " optimizer.zero_grad()\n", - " loss.backward()\n", - " optimizer.step()\n", - "\n", - "if __name__ == \"__main__\":\n", - " train_model(100)\n", - " writer.flush()\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "est = Estimator(\n", - " command=\"python run.py\",\n", - " source_dir=\"./src\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - ")\n", - "\n", - "est.fit(wait=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用TensorBoard应用监控训练" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在PAI启动一个TensorBoard应用,查看使用Estimator的训练作业写出的TensorBoard日志。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tb = est.tensorboard()\n", - "\n", - "print(tb.app_uri)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用完成之后,删除TensorBoard应用" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tb.delete()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb b/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb deleted file mode 100644 index b2e1f62..0000000 --- a/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb +++ /dev/null @@ -1,797 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用 PAI Python SDK 训练和部署 TensorFlow 模型\n", - "\n", - "[TensorFlow](https://pytorch.org/) 是由Google开发的开源机器学习框架,它可以用于构建和训练各种类型的神经网络和机器学习模型。当前示例中,我们将使用PAI Python SDK,在PAI完成一个TensorFlow图片分类模型的训练和部署。主要流程包括:\n", - "\n", - "\n", - "1. 安装和配置SDK\n", - "\n", - "安装PAI Python SDK,并完成SDK配置.\n", - "\n", - "2. 准备数据集:\n", - "\n", - "这里我们选择使用Fashion-MNIST数据集,将获取的数据集上传到OSS Bucket供训练作业使用。\n", - "\n", - "3. 提交训练作业\n", - "\n", - "按照PAI训练作业的范式,准备TensorFlow训练脚本,然后使用PAI Python SDK提供的Estimator API,将训练脚本提交到云上执行。\n", - "\n", - "4. 部署模型\n", - "\n", - "将以上训练作业输出的模型,部署到PAI-EAS,创建一个在线推理服务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 准备工作\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step2: 准备训练数据\n", - "\n", - "[FashionMNIST](https://github.com/zalandoresearch/fashion-mnist)是一个流行的视觉分类数据集,数据集中包含70,000张28x28像素的灰度图像,这些图像代表了10种不同类型的服装,包括衬衣、裤子、套装、鞋子等等。当前示例将使用`FashionMNIST`数据集训练一个服饰图片分类模型。\n", - "\n", - "我们将首先下载数据到本地,然后再上传到OSS bucket中,供训练作业使用。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "# 下载训练数据集\n", - "!mkdir -p fashion-mnist/train/\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz -O fashion-mnist/train/images.gz\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz -O fashion-mnist/train/labels.gz\n", - "\n", - "\n", - "# 下载测试数据集\n", - "!mkdir -p fashion-mnist/test/\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz -O fashion-mnist/test/images.gz\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz -O fashion-mnist/test/labels.gz\n", - "\n", - "!ls -lh fashion-mnist" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "from pai.session import get_default_session\n", - "\n", - "sess = get_default_session()\n", - "\n", - "train_data = upload(\n", - " \"fashion-mnist/train/\",\n", - " \"example/data/fashion_mnist/train/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "\n", - "test_data = upload(\n", - " \"fashion-mnist/test/\",\n", - " \"example/data/fashion_mnist/test/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "print(\"train_data\", train_data)\n", - "print(\"test_data\", test_data)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [ - "keep_output" - ] - }, - "source": [ - "在本地环境中,加载和验证下载的数据集。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install pillow\n", - "\n", - "import gzip\n", - "import os\n", - "import numpy as np\n", - "from PIL import Image\n", - "from IPython import display\n", - "\n", - "\n", - "def load_dataset(data_path):\n", - " image_path = os.path.join(data_path, \"images.gz\")\n", - " label_path = os.path.join(data_path, \"labels.gz\")\n", - "\n", - " with gzip.open(label_path, \"rb\") as f:\n", - " labels = np.frombuffer(f.read(), dtype=np.int8, offset=8)\n", - "\n", - " with gzip.open(image_path, \"rb\") as f:\n", - " images = np.frombuffer(f.read(), dtype=np.int8, offset=16).reshape(\n", - " len(labels), 28, 28, 1\n", - " )\n", - "\n", - " return images, labels\n", - "\n", - "\n", - "test_images, test_labels = load_dataset(\"./fashion-mnist/test/\")\n", - "train_images, train_labels = load_dataset(\"./fashion-mnist/test/\")\n", - "\n", - "for arr in test_images[:5]:\n", - " im = Image.fromarray(arr.reshape(28, 28), mode=\"L\").resize((100, 100))\n", - " display.display(im)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 提交训练作业\n", - "\n", - "通过SDK提供的`Estimator`API,用户可以将本地训练作业脚本提交到PAI执行。\n", - "\n", - "### 2.1. 准备训练脚本\n", - "\n", - "以下我们将基于TensorFlow提供的HighLevel Keras 构建一个2层的卷积神经网络训练模型,对于TensorFlow以及Keras API的详细介绍请参见TensorFlow的官方文档: [Basic classification: Classify images of clothing](https://www.tensorflow.org/tutorials/keras/classification)\n", - "\n", - "训练脚本将被提交到PAI执行,在训练脚本的输入输出数据以及超参上需要遵循以下规范:\n", - "\n", - "- 训练作业脚本通过读取本地文件的方式读取挂载到执行环境的数据\n", - "\n", - "输入数据通过 `.fit` API 传递,对应的数据存储路径会被准备到训练作业容器中。执行的训练脚本可以通过环境变量`PAI_INPUT_{CHANNEL_NAME}` 获取输入数据的挂载路径,然后通过读取本地文件的方式拿到输入的数据。\n", - "\n", - "\n", - "- 训练脚本需要将输出的模型保存到指定路径\n", - "\n", - "用户的训练代码必须在训练作业结束之后,将模型写出到 `PAI_OUTPUT_MODEL` 环境变量对应的路径下(默认为 `/ml/output/model/`)。\n", - "\n", - "- 使用输入超参\n", - "\n", - "训练服务预置了一些环境变量,支持用户引用获取超参,输入数据等,其中`PAI_USER_ARGS`是将用户指定的超参以命令行参数的形式拼接起来的字符串。用户的训练脚本可以通过Python argparse库解析输入的超参。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p tf_train_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile tf_train_src/train.py\n", - "\n", - "import tensorflow as tf\n", - "import argparse\n", - "import gzip\n", - "import os\n", - "import numpy as np\n", - "\n", - "def load_dataset(data_path):\n", - " image_path = os.path.join(data_path, \"images.gz\")\n", - " label_path = os.path.join(data_path, \"labels.gz\")\n", - " with gzip.open(label_path, \"rb\") as f:\n", - " labels = np.frombuffer(\n", - " f.read(), dtype=np.int8, offset=8\n", - " )\n", - " with gzip.open(image_path, \"rb\") as f:\n", - " images = np.frombuffer(\n", - " f.read(), dtype=np.int8, offset=16\n", - " ).reshape(len(labels), 28, 28, 1)\n", - " return images, labels\n", - "\n", - "\n", - "def train(batch_size, epochs, train_data, test_data):\n", - "\n", - " # Load dataset from input channel 'train' and 'test'.\n", - " train_images, train_labels = load_dataset(train_data)\n", - " test_images, test_labels = load_dataset(test_data)\n", - "\n", - " # model train\n", - " num_classes = 10\n", - " model = tf.keras.Sequential([\n", - " tf.keras.layers.Conv2D(8, (3, 3), activation=\"relu\", input_shape=(28, 28, 1)),\n", - " tf.keras.layers.MaxPooling2D((2, 2)),\n", - " tf.keras.layers.BatchNormalization(),\n", - " tf.keras.layers.Conv2D(16, (3, 3), activation=\"relu\"),\n", - " tf.keras.layers.MaxPooling2D((2, 2)),\n", - " tf.keras.layers.Dropout(0.3),\n", - " tf.keras.layers.Flatten(),\n", - " tf.keras.layers.Dense(64, activation='relu'),\n", - " tf.keras.layers.Dense(num_classes),\n", - " \n", - " ])\n", - " model.compile(optimizer='adam',\n", - " loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n", - " metrics=['accuracy'])\n", - "\n", - " model.fit(train_images, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(test_images, test_labels), verbose=2)\n", - "\n", - " # save model\n", - "\n", - " model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - " model.save(model_path)\n", - "\n", - " return model\n", - "\n", - "\n", - "def main():\n", - " parser = argparse.ArgumentParser(description=\"PyTorch MNIST Example\")\n", - " parser.add_argument(\n", - " \"--batch_size\",\n", - " type=int,\n", - " default=64,\n", - " metavar=\"N\",\n", - " help=\"input batch size for training (default: 64)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--epochs\",\n", - " type=int,\n", - " default=14,\n", - " metavar=\"N\",\n", - " help=\"number of epochs to train (default: 14)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--train_data\",\n", - " default=os.environ.get(\"PAI_INPUT_TRAIN\"),\n", - " help=\"Path to train data (default: /ml/input/data/train/)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--test_data\",\n", - " default=os.environ.get(\"PAI_INPUT_TEST\"),\n", - " help=\"Path to test data (default: /ml/input/data/test/)\",\n", - " )\n", - "\n", - " args = parser.parse_args()\n", - "\n", - " train(args.batch_size, args.epochs, args.train_data, args.test_data)\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "用户可以在本地测试对应的训练作业脚本,例如通过类似的以下命令\n", - "\n", - "```shell\n", - "\n", - "python tf_train_src/train.py --batch_size 32 --epochs 10 --train_data ./fashion-mnist/train --test_data ./fashion-mnist/test\n", - "\n", - "```\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2.3. 使用 Estimator 提交训练作业\n", - "\n", - "\n", - "`Estimator` 支持用户将本地的训练脚本,提交到 PAI ,使用云上的资源执行训练作业,他的主要参数包括以下:\n", - "\n", - "- 用户通过 `entry_point` 和 `source_dir` 指定训练脚本:\n", - "\n", - "`source_dir` 目录是本地执行脚本所在的目录,对应的目录会被打包上传到用户的OSS Bucket,然后准备到训练容器的 `/ml/usercode` 目录下。 `entry_point` 是训练作业的启动脚本,支持使用 Python 或是 Shell 文件。\n", - "\n", - "- 通过 `image_uri` 指定作业的训练镜像:\n", - "\n", - "在当前示例中,我们使用PAI提供的 `2.3` 版本的TensorFlow CPU镜像提交训练作业。\n", - "\n", - "\n", - "- 通过 `hyperparameters` 传递的作业使用的超参:\n", - "\n", - "超参会通过命令行 arguments 的方式传递给到训练脚本。 例如以下示例中,对应的训练脚本的启动命令为:\n", - "\n", - "```shell\n", - "\n", - "python train.py --epochs 20 --batch-size 32\n", - "\n", - "```\n", - "\n", - "- 使用 `metric_definitions` 指定需要采集的Metric:\n", - "\n", - "PAI 的训练服务从训练作业输出日志中,以正则的方式捕获用户指定的Metrics信息。用户可以通过作业详情页查看输出日志。\n", - "\n", - "- 使用 `instance_type` 指定作业使用的机器实例类型:\n", - "\n", - "对于提交训练作业的更加详细的介绍,请查看 [文档:提交训练作业](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/estimator.html)\n", - "\n", - "在通过 `.fit` API提交训练作业之后,控制台会打印训练作业的控制台详情链接,用户可以通过该链接到控制台查看作业的日志,采集的Metric,机器资源利用率等更多训练作业信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 获取PAI提供的TensorFlow 2.3的CPU镜像\n", - "image_uri = retrieve(\"TensorFlow\", framework_version=\"2.3\").image_uri\n", - "print(image_uri)\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " command=\"python train.py $PAI_USER_ARGS\",\n", - " source_dir=\"./tf_train_src/\",\n", - " image_uri=image_uri,\n", - " instance_type=\"ecs.g6.xlarge\",\n", - " instance_count=1,\n", - " hyperparameters={\n", - " \"batch_size\": 32,\n", - " \"epochs\": 20,\n", - " },\n", - " metric_definitions=[\n", - " {\n", - " \"Name\": \"loss\",\n", - " \"Regex\": r\".*loss: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"accuracy\",\n", - " \"Regex\": r\".*accuracy: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"val_loss\",\n", - " \"Regex\": r\".*val_loss: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"val_accuracy\",\n", - " \"Regex\": r\".*val_accuracy: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " ],\n", - " base_job_name=\"tf_tutorial_\",\n", - ")\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " {\n", - " \"train\": train_data,\n", - " \"test\": test_data,\n", - " }\n", - ")\n", - "\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在训练结束之后,用户可以通过`est.model_data()` API拿到用户写出到`/ml/output/model`路径下的模型保存到OSS后的路径地址。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step3: 部署推理服务\n", - "\n", - "以下我们将训练产出的模型部署到 PAI 创建在线推理服务,部署推理服务的主要流程包括:\n", - "\n", - "- 通过 `InferenceSpec` 描述如何使用模型构建推理服务。\n", - "\n", - "用户可以选择使用 Processor 模式,或是自定义镜像的模式进行模型部署。这里我们使用了 PAI 提供的预置 TensorFlow Processor部署一个在线服务。\n", - "\n", - "- 通过 `Model.deploy` 方法,配置服务的使用资源,服务名称,等信息,创建推理服务。\n", - "\n", - "- 通过 deploy 方法返回的 `Predictor`,可以向推理服务发送预测请求。\n", - "\n", - "对于部署推理服务的详细介绍,可以见: [文档:部署推理服务](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/model.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[Processor](https://help.aliyun.com/document_detail/111029.html) 是 PAI 对于推理服务程序包的抽象描述,他负责加载模型,启动模型推理服务。模型推理服务会暴露 API 支持用户进行调用。\n", - "\n", - "对于 TensorFLow,PAI提供了预置的 [TensorFlow Processor](https://help.aliyun.com/document_detail/468737.html) ,用户可以方便得将获得的 [SavedModel](https://www.tensorflow.org/guide/saved_model) 格式的模型部署到 PAI,创建推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " # 这里使用了 2.3 版本的 TensorFlow Processor。\n", - " # 一般情况下建议用户使用最新的TensorFlow Processor创建服务。\n", - " inference_spec=InferenceSpec(processor=\"tensorflow_cpu_2.3\"),\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"tutorial_tf_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "`Model.deploy` 返回的 `Predictor` 对象指向新创建的推理服务,可以通过 predictor 获取在线服务的状态,发送在线请求给到推理服务。\n", - "\n", - "使用 TensorFlow Processor的在线服务,会通过一个API暴露推理服务的模型的签名信息,包含了模型的输入输出数据格式,[详情可见 TensorFlow Processor介绍](https://help.aliyun.com/document_detail/468737.html#section-w41-c2x-vsb)。\n", - "\n", - "> 当前仅 TensorFlow Processor 拉起的在线服务支持获取模型签名信息。\n", - "\n", - "通过 SDK 提供的 `predictor.inspect_model_signature` 获取相应的模型签名。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"signature_name\": \"serving_default\",\n", - " \"inputs\": [\n", - " {\n", - " \"name\": \"conv2d_input\",\n", - " \"shape\": [\n", - " -1,\n", - " 28,\n", - " 28,\n", - " 1\n", - " ],\n", - " \"type\": \"DT_FLOAT\"\n", - " }\n", - " ],\n", - " \"outputs\": [\n", - " {\n", - " \"name\": \"dense_1\",\n", - " \"shape\": [\n", - " -1,\n", - " 10\n", - " ],\n", - " \"type\": \"DT_FLOAT\"\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import json\n", - "\n", - "\n", - "model_signauture_def = p.inspect_model_signature_def()\n", - "\n", - "print(json.dumps(model_signauture_def, indent=4))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "通过 `Predictor.predict` 方法,可以向推理服务发送预测请求,拿到模型推理之后的结果。\n", - "\n", - "通过 `inspect_model_signature_def`,我们可以拿到模型的输入签名信息,然后可以使用对应的信息构建我们的请求数据。以上的模型只有一个Input,Name是 conv2d_input,输入数据格式是 (-1, 28, 28, 1),分别为 (BatchSize, Height, Width, ChannelCount), 我们需要将数据reshpe成符合要求的格式,然后发送给到推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAJOUlEQVR4nO1Y25IcN3I9JzOBuvR9ZjgUrV1H7P9/lcPeiF2JokTOrbsKmemH6h6OKFtecuUXe85LV0QDOMgLTiYAvOIVr3jFK17xilf8vwf/F1dmIv84EgLn5QCQzEyIKt0dAGB/CAeRfOYRZiSkdphPkX8UCchfWSKZgA0rPsWMf46EFFNGa5G/9nlmJmDrg3yaHr+VZNk2Ves46un+LhDy2RBkZAZg+3daHj5+k7sIEAlQrK4O+3ovpwbkS5Llsx6+M/+B32rJAqnj5vCmG/2UU8aXWyFW252O59W/lSR12F5dXw8j2H+8+4LEuq5ejZqR30SynIgEYKvD9c31sO6G9V+P7deD6na/vhnadPT8FhLgkqzd+nC93wyrvu/mT08v/5duc3273wxxnNo3WXJZh7babDdjNQjz8ZGRCRCZmVb77e3tvi+zXIZ/vbsSEKvDbrdbWYOw2+fqz+GeFPo8hXXj6nBYGY5T0W8hIQQOSF1tD/tNz9Okxbbr7z3aFGIyPz0268Z+NRTEw7HTfyKFtd9eH7Zj9RZFdRiGGtPJtcj88DBr7Us1Rsu+yDdYwnNq2frm7fWmSiyHclh3MRXXIl77RjFRTYqq8KtJiEuFsP27f9l2kilUMxVkRASoKBLe3JucT/7XpzAvmtHvbr/rYo6kmKnEHDGdgk1Vak7zFBAo87lYfX1MVPv9bjPYySOpaibwiBZBUkThcIcITURE4n8m+VzyLsII9Lur24M1P55AFTNTtMwWQGZGpEeEkCJml6D81yTnxbmMuXwtAVm9+/5qmw8xzyyqairhnpEEMjxjbpGgqoWZ6fxV7uJFs7Zv/7Rle/AWJlRTyZjnJIVARkTzSIiohZVSjv8wCZeYJ1i68fp6P0ynU3OgE1NETqcZYgTJ5xpJCkvt+qfI/JLk7KezROdlAhYOcHXYf3cY7TQ9tRSFmaLFdJxpFFBEkEIAhAis9sPT1H5D8gXymSSQAMr27e3bbUmfTg0lqEWj+XRqAkuAIkiSCZAC61frY0T84zFJUGR79ebNrqN7RDChZozW5hbn7gF46THt1rvj3P7bmFxGJnBpP9Ctx+t3bw4DTs1pxJJarc1OEWaGL5rwPF2G3dXpePwNST5zkEDmMmeprsPtzc3NfsU2tWa9p5RimtFaiohyIZGlsAAZIsPh6XT36XcsWcz+3ITANm++v9r1ZZqePFU9WUzFw4NqlIWEWAp7ZgbY7+fHH/X3UjjPGQUAYsXGm8NuXeiteVAokM4EGR4qSgGQwVjkLTO8SXbb+DR+JnkZg/Ph5udmimW122yurjpMp2wtQSpFO0NEZEKUBElkAJGBcBKCstOfB76w5HOreRbbF57Sfvfdm92qNz+2ACkUUavV4B65nI9lXgCRiXBmqFpfdsPvuutMTEjpNvvr253RfZ5CiipFrdYqyISKmS17SmRmJhLhGUmpOnSFsM9n/LL1y6+ImKpp6frVYTcUuntA1IqSamYqAAtROkN45nJ/SACRIUqJSJZhCCOQzJfN7LOTSulqP3R93/fjIC2bJ02smpJy1nGVSisaLds5S3LxmkJiPrmX1Wa6WPJbV9Xa9WO/Wo/D2BVNzO4JNbNiSogKgUwxEVFeRC+ABIlESGabW9i4PtpzTlHIc3FaAtH3/TiMq7Hvi6K1yMTZS0JSmOkAqUJEnI8gAgEhk1SSECt9n5aLF6lWigohFJLUfhj62teuVjNGRoIKsaKCDKUgMsQ1RdPDI3ORCM+knTVfrWbXdZ3bORpaaz9UFYqYCNWG1dAVUyUzW2QCCkoxAQJCZDpBg2q0uaWoCJEt4lx4s4XWDn1XrNgSj9L1/TB2RUTVVKhlWA118YS3iCRFKKK6JAnlrJrizvk4swhFEMigUKxosKlV1mKqBsBK6fq+7/paVU1VBWL90JmQoJCiSaGIgDzHTxFEJNIbptOkksvVOgIAhIgERUWFpAHo1uMw9tXUSi2mFCbVGB4gSZOSIKmU9MhMUrQAke6Z0eJ0nEsBiczICG/m1ZCpFBEi0wAbD/vV0CmSpasmBCNJzwZQ1cQoBKhCR2SCFDUysjGZ4e4REBEQJDMiIv2cQMwIt8Lx8PZq6ITespSiApxbKACMhIo+n57MyKVjEEoCSRWWqqWrlUiCqg6KiIioMMNba3Yj23d/vrGY2wxPn5RMRJzbB2ozFRUgaSo+NwcR4Q4C1NRq6IewvjdBenVvLWjFSJqmT9PpdLRbO/zpL1f5cP+UHjG3RbEzISRIWYKHBNU0vSU1MyIgBFVK32l6iqmQmZnRmlOLMpOabTqdjkdbd/vrN9fNIgHxeHYLEklkhosIEaC5MSmiaqYiZ73vO0NiCQBApM9ONUmPNIa3eZ5togdURu+mubkHwIylG8lEIC+3f4CiqiqmpdYiQqtdMRWGQ6woCZAZzalKb3Mom6kI7ade93ebbiiZGRGemRFza35BQIQgxErt+q6aaq1dEUq3Wnc5T/PxlGUcq3CRlggKs52mJsy+60q19z021/2u76uJZDoi3ed5nts0t3memydVAIjVflitexMrtRoh/W5f4v6T++TU1UrON5IEiJhMG9m6Wkqx+1Y//L2c1mNfiwqVicjqfiGZIylnkm5Yr3olVeDRchbrTh9/ubt78E3dS2FeuidEZmZIRmRmWkwP78vTZuy7UkqttaiKmdXewyPCPQFdstms9p2mt6nN09PjUVbrOt3fPz4e/baNHTURLQIZ6T5NUxMej4+Pj0+GeHp/+qEWM7M6btZj33Wr9aqvJEnBuVdnQoQU+DQ9Pd59+vThw6dZq/p8mueW/5qbzg3ZTs0j3Ju35qF8+Hh3d3c0xGm+IwmhdpvDYb0aV9urFr0WVTPRRQKQACPaPD0+3H388P79D3//6dHJXJqzT9ub+lCY02lq7q01j0AYn3765e5hMiDPz5EAHo9P63EYt/cf170VM1s0+XKPdW+n09Pj/cdf3n/48cefXxTrv/3bTdsuJO7urUWAaTz+x0+Pc/y6JXoSv+9qHVarroqJqojI5VkuEeFzm07Hp4e7+7uHlxMf/l3/OijQWouI8FiKDeef//YYXzzZ0kxUxNR00VDy5StmIjIivLU2z81fTNT1ujMub4OZl/6WiOnpOOMVr3jFK17xilf838R/ApS1P0sVdtkIAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ankle boot\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAPD0lEQVR4nKWa2XPkyHGHf5lZBaDRzeY5nPvQzqHDDwrZitCz/2+/2W8KOyTHSrva3dlDs8O5ODz6ABqoqkw/AE02j+aMZDyQjGahPmRm5YkmfNaV39q2w7cXPiq2Mw2LefiMu91nMfzmeKxhPl39aGvTa8j9dPH/hPjMOy8ikm3ubNr2xokSUfcvKrdGTkNbV1VIKcUQY0z/FGSws7W5McqdZKPNIc2O5xBZQvJBThpi2zZNU82np6ez2T8FcduP7t+9szvy4opRzk3dsvfcQ4jJUopqqZ4df/xw8OY9WvuHIERE5PYePX3y8P7+2LFkuecUVTJ/vihqSga2evrx/ZuNcpD746R2HehaiAw3hmU53Lv/8O7tva2hgMQJeaNLt5oaCfLM5+XG3r337w9PPn6MnwtxOw/u3N7f3dneHA/LzEFNI4iJRc4XhaQGsJC6kRvtP54cH779+1+nnw0p958+ffLozmYuQlDVpGoGEWFmIoKZaoqqIGJmYp+NSZvq5M1X1evqcyDE4vYePH3x+P6tAoCFoKqqagBg3Rk2U0tJrQMKxAljtLVZhoPXbdteOQBXIflw4/HzX31xayyAphSDqpqBAFMjoIeYggAYFKbKwuBy9+FvqvH7t4eXo8AVCJe7d5/98sX9ASqYmSY1MwIxyFK/LxRG4I5sloxgREVe7v9K9r5JJ5+EyHD/ydMn9/dQNdEAWyqRYf3x7D+lnmgwNUtg8n7zUTYMB77+JGTz4fOnd4YATA1EIkxMpmoGmMGIwJ0IHdJgABHIksgY4dU4u7znFYjb+eJfnuyQxmAwsM9zJwRtFrHfkohFiFJMSkSmCmImBqFN5Da2t4c52acg24+e3ctTnYKBwS4rAICFeXknO3FMQaN1VBAzCWANnMtHG2XOl6LYVcMPdva3UIWUwERC2lhUMzKBJjBIHAsTACIAJCBiAgyajNmXZVk4tZshcFkBEAxg50jn7fxokka39wpplJ0T5ximSqJkBpdnnGJMKZmBSeCyvCg06c2Qs4s4z2x+evjuh9fh/u+2MgtGvsgYQBs0wWlKcEMBmpRCNHIsBJJ8MAj2ORACAPGZnR79+ONff2h/dS95CCCOkWJTLxIXmQCgJIiLRZsUQswEkmI4bNKFEHYVQszLvxyhOvjmu5ev4m5lgBBB6+nhSV03tnXnVplHNAdVMLD4TFgIAGXleFw3n7AJkQCAAcTQk5d/+u5ook00wAusnbz88ruqjfzsdxtbOfDqL19PBnu3tnc3ck5qKpRtbG/N5p+AdKfGAIBh04NvXyYlNQBOtGnf/OU//3tu5k93Hu4DmH75H+93Xrx4spGNaNF0kJ3tk+NPSrJM4wSYxhDUEBSA+Gp29NPLH35qALn35t14H/Tux6/f3OJieJczxAYG+OHmZuluhlAfzEFERjR68II+HqcQFcBw9vNX3/9o9wwJ2cevTncdf3sYUB++3n6SACaYwZXj8eBTEHRBgZhIhXd+HXf+Us9TMgAuvPyvH9LW/cLaxh//+ZuNwr2ZjJRmb/ZO2z4JmBQbG5+CdN5sBiY20NYzX5z8NO+da/L9//x9796LDavmJ4evQj7ImtlQ6vrwcBoAIpiZDDZGhdwIYe96u3e2KbYWhxsCEKGtmlc/v61Pq7a1tpkfv69d7gDVpoqTOvXOBVcMh/mNEHK5708wMRG6MMyACGZ/f/O/Bw3qt4NBqhenx6cNCZNAJ00zbTpRzUiKsrwZgqyLGwYiZkJfKoCdYPLdl1+/JehhcrENMSrFRsmzNQhBrbMJ4PJykPMNEGKfe15VF4lzjkHMNn/91csjhbNgKVE2QJjNgs8G4maDYdYRYOCs6B50HYSdz9yZJAQY+yL3TJKXFE8O3pw0GD9+OOJk4Dj5+efp5uOH5eQYz/dcQgLMjH2e+xslcd477gzIRDAjnxeewXkJmx0d18Deb39/OwdSrF//OWX3//D73dN3s1v3s4BEBAW5vHvQ9ZJkywVETIbuuYQshXw+m9eA3Hr+r49HhJTm37cfcO/Xf7g7Ozgq9rJeEtBSG2shF0U1Uyaf507rN38b/PBhIXlWjqmaGVM+Gta3d6Y5QkCxUeSd/KZw/Al1scu7I0xERGaqDm6QO7z/Y8y+/7Ed7+8W5Q+n2wXxg2e3aTDKq29pRyDbBQk0mYKIs+JmSGeBpSApGSB54XH4x++lOpXd54/9yTfvWOB+9++/hR8Npn/6ejje2380MgcNXTHzacgFdakZOCs3kF6/5rzcfvj8ufzt628WgC9/+cWw3N2Z/3wsd57xTiQBw6BM5G62CS0N38VIAsyIR/sP2ib4zVt3f/HiC2kPjipQfmdzUO49aRc/f4B/KN5xvx60jH9rJZHuCJsZiECsKkrjh6d1RLFz996DX9xlSjsLcvnzZ7uFuLz+Foj5eDxgwKiv6ugiY40kZgYQmGGqGD+2Gn60e+f23v4WZ+WTwFmxe2fbDTaKj3fyZri1tzUQYBkiz/Pe9ZK4LDu3CTHDNGL8IKspH23t747HJSBbibN8UGYkuT588nTy+N7eeMDnjCvXZcN3kE4SJqjGyONHuy18UW6MCk7EpRg5L2jZ0fDBv2XV/d882C09ALpcBK+B5LkXgpkZCCRkIdB4ZAoWFramJaMcxKTR2Ody5w9P42h3e1AIDNRVnp+CuCz3fNaVQFOMkfM8A2ApxJSU2DGDLIUkSm576yyWqhGrdb0Lsd4Eyc4hGtqgCnadmazTI/VH20xToKyzQ0qmAAMJBpBkeaPrIOSyzPU5npCqaStZJtYwDKopKQhIRkamxNBWNWdYG2IyuMwJkhlAviijnWvuaj6R5enSdnq4GGwPRJvUJT4CESyCDCBisxDbhhFjDFFl4IQSKQDyg3Ke1kFIfFdIMANpcfKh2ixJLDVqgCybeIMRmAiqysaWYmjqIKkoWbqk6vLCxXN9XfV4YYBFKen88PW83WEBUydFV/iRrTgEAcTMcTqDjsa9r7NkuV/p5i/neOeEAHZGqZ18eDXDQxJy0kE6SYj73QESZiFj09l75R1Dd2hIfJY1aR1EnBMG2KmlZvbx3XS0IGEn1MkCWvE4AzFEhEiSzT7EotZl1GJxbiVIXgMhACIxtfVsMp2H9dECXZQiJk7VcbPdWF8VGZh5JdpfLSS6VkZiapvFogkJfUd/ef/O6QACMWt13ExaA8gMlm5sTFnOINA2xG48BFVjQ1+JrwYOIoKBmHUxreftUpKU9MKyiymMxHvpvAApRO1njmZ27URueRczYl0t0vIZrG9A1qrLCZQJsJRUcuSOPsUAs5eLOmLn1xuexXuBAoAlpWyUlRnBVAEsJ0RnD4ReRmLy3vH5psTOZ15uOl1sZgTTGM0Pi1FG0H5uc91lZmBybgnp3NVlN0C6o2cAkNpFoAIDf32KuEABsWnXjC0VcpMkfcUBWKznC+Qy8NTF92u9hQBTNoJpaKPRMr2L79ucayU5V0qoZpVm+cDTeSNhq6juDEJNDbDYdi7V9xtZ5tcaHme7WKhntfpB4W5SF8HsDNJGXdpElk7dq++yJNRtaqGe1ckVueumTteave/GDdCwqOtFo0bM3URWVnbmy3d1zTgQq1mVXJH35TdWg/05oWdA23o+r+qkYAZAIqtV5FVJYDDA4qJaaC/J+dZ0afFSEovtoq7rqJAlZGXpJQiA3uNSaCPlg9xd2fva5QRouwiKbqjHwmvVZcuROWCqyMpR6WCKKyZZima94dnlGWvbpm5iQHyh5r4kiSU9fz0h+XA08NBuCnwx+i5r+K4OBOflMLPQRmPqplhC646w6QqDOCuHuZjqNerqD7ZZNxeWctwM0C4K17UbIrI2aWEl3hrI5bknpWsy1tn6/ocf7aQhNYs27+4UL2udsQvq/VTNSHzmoFBbDQXWG5psmToA+M3bsilN3Qz76aW/yeNtuSERs/OOTaH9XP4aQawfXWWb+zLgRdUoACN23q2Pwp0ghD6SOiEk3KAu9I/khttKWs37OohklXENhFgIICnKrHBMqrCr3rRS3RERIFlZtc0k29E+q/D69Gtm6KZD8OWGL1zXAC8nYGf7nyWEpSLZF1lTh2yRzpR4oyToJHHFyBWOYf1bpbXa6qi+KOZtM2iWktxQSCw7UgCSD1zedcKXkkj/+7y2sAQUG4t63iyLbCK5SV3a13HkitIVmRD6gImly3fp9hxCiC1hsIO2srOXtszrDW+my1E+i3deZLVGWfllgKKr6Q0pkBWbNjte9FMcEAuvDSvde7e+kGhSNLoaG1d5nSSmBMlysRCiLSXhG9WlKTnAqg+vs8G+CbOdhdyz3fsP+jmHCKXU1tPjozSPWE6ubzC8ppQSAD199W1RPlQvfObvPcTOjjAxzIydp6adHr0/eNdMAs5L8XWQLhAbQadvvi/vVAksvIyCFxVHy53IZWjb6dGHt+9sFjpJus5vDYSYyNQIGuaTVLXGzizpldbhTF1dmQrRtp5NJ8OFAjCIz7K1sYvFZ0Jmy8EwC4kATTCs2OWcQ1BVlwGDjC00TZs6lbrBqMzW18LeC8EAY59nov15ToaLI6yzsKJJuxZSY4qxc0YlPxgNVoZzVwyvZsuOTOuTQyudVpNaaaVoX+GYJvNhSybHJ5N5m5Yp78bMmJr5fJQTASnFpn715XRz4HQxay5DlkNzswQ3Gsn83cHB21kiqDIYcVEtwroWO8xPjkdDYiDFUE3a4y8Hni200a4pWLqAo+A8p3Z2Ovk4IaEUMwi1k6OTel2LHaYft7e2iYAUQ/3hw0+euetL119GzLAYU7QNx6qAoJkcndbrJhJhfnJaJSbATMN87RdRrr+yrnwEszZVvaKuizkvzk9PKvMAnODiW8/PuEiccx5wGduFL5dcglQnk1odAMdM15WwNzFY2DnpnhC28gWUS5B6Nq3iMm/cWAFfIwd36QiwFEO0dadL22o2n88z0nkd+h7wYoF67fa9Z2pbTU9PHGEyOZ3M23WG19DUs9PjIYfjaW+5NV3vJQhgZrE6/fh2u5R0+v7Dx8liHcRSaKrJqbnFyWwRzyFXQvAViAGWqsnRh51NF48Oj05mzVqIpdAu6kKbugm6VNcntHV+sy6q2cT5MJ/Pu++KXA/RLmtRTEn/0RMMIMU2BAohhBhXPPj/ALU7R99e9smFAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pullover\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAJSklEQVR4nK1ayZIctxF9mQBq6W2Gm0SHJdkK++KwT/7/k+86+ORwhBQObSRnOJzuZld1LQAyfUB1D0krqtpTxNxmweuXiXz5kBjCyCJAAbu6fvHHv3y7fH+zpeWK6vd1NLlzmaMYef188e67f/xrbBfAjv8YAMjm5SLPXF4sqMwpehGTOZdZipGKzGVZRjoXxCyefvni2VVpJaeyoEWxFJM5mxmKEaurImw26zbKCM44CEEB9+TrP//hmy+K/kmLzKFtGhjnjGPEgMXCZS9fvt4d+zCLiXv2p79988VVLiGCWX3Xw1rLFghRCkfLN1/dQOI8EHv9+2+/ui5Ovxl9IGMMERCi5Az3/MWL+tiM7TANYsrrZ1clQ6EgAAZEUCVFSoQr15tlzo8FSbmkbLnOuZcYIpFhqEJVoYqorEuoLZZl9miQhMKuKC167ztPxjERNIqIKgRGMyecl2VGjwYZfifLGRJDCKRkiKAxDiAaFWTzsnSPZ5IWWQtAySgxE1RVASKAhJCKNbczmSgRAyC2BsxQUQUxEWmMzADZvMjtTCbMzABABiCCQBVExKRQggLs8myUydgHSIusYQBDjAj4aDuNUchOgEwz4cwyoHISJyJWRfqCSoxBbTYXxORuYKKJBhEGBAU0mACTZWYOCFFRZgyIyG/+XCVG2MzNSrxxy2VuU0qYAYWoQEEDJ5UoZPN8DhOy5XpdGkBUiYgUqqJKSG2TQBqVbOZm5cSU61VhARFh4kFoaDhhRABUyVjLc0BssV4VrBCRYWtmHWol4dDY9heCLFarnIVCiKQggJhJQhgONIFYSEeb73ROXLleFSwcRSSBWMOiEUO1kzGqMYQ4hjLNZLMuTNJEYNAsJeZTabIRxL6fA0JusdksjCoxkwoZQxBRGBZRTSBRfduF3y6jy5i45dXVwkDJglSMceR7D2tZfIxQENsgXdP6mUzWpVGQSRkx2u9r3mxygQoAYmbp29bPYWIX63VhkocgYkP9/Y932dfFkqMHACImDV0f5jCx5WqVs4ooyFhjtP3lu/+s/v7smVoiIiIGxM9LPGy+XOQsKqLGWkNS/fjdP5+t/0rkEgiBNPrxIzzVtDgvC0cDE8PSHd788O/vb1qiQUgIUJFZxQi2zhmoiBJbi77a7fdo/OAkLlxTTIiNIYiqKlur7W5bCbKhe6iqKoiYR/VxGsQYZqiKgq2R+t1dJSgyBkBESQmYkzV+NAgb5xgqCjBTrO5udx0MA8Cp+kFseHSfSRCXZ4aQwgK/v3l91wzpcM5AREFkZ/YTdpljUkmZ7nZvXt0dwaQAnHpIFIBnM7GZZUpaCPj3tze7HhAvgLGc3BimGtd04i2S+SEG/OHubQ1oXx+6oKe9NcbxrjUBQsQmeUZiBkJ1/64DtN3fH30UEDFBY+j9LFlJZluHXh6O+10ApN3fO0MESreVvhsHmfTCp3AzMxD7phdA+vowdBAiQIL3cWyPSZtKJ4kiApDuuNo3xy4qMRERVMKEQE6C6OkDE06XSGjo2i4miSRA4tm9PBJkWOkkDWoroWv7gJMQawxxrDH+PyAS49AANfRdLw86fDYzc0AUBEZE06RWrrHvgxCBzpuPVuNlTJSIKUp97B/CNTC5pOIvZAJiDVIfewEA6eqq7kfT8NGavjMCEAUzxaaqOwGAWG/vdsc4HqMP1mTFA4CKYaPheBhAwuHt6nd1OFv6zxMuBTOF4ylGsb5fbY8RPKBMHK7LwgWAGKE9DjmJ3WF76M6XE506wtPaxcOVV0N3HNyo+Pp97QFmIkrFOIpyUbiSiQtd0ybzrkGrJhCYOd1N47yKV40JJQlWkl5VaTpJSk/pmj1Lu8T3kcCGoaE9Nmfznj77oMJTAjmRE419EwTMROLb40fXED1lXsJMgYxd46FsGBqD/7QBpmhNhmvqdInvfFTmdO99cFcfn6aZbkWi91FBIHbFojhPAdONcbCpxoyb4SkQDb4PChBMtlyv8k9AkoM01o7OVqZAYt+2XogAyhbrh/Hvh+Ga7epDU1WdEEFgF6tPZ8yfR7vU1+8PjTCrqF1sHsI1jIwNDzetWXXi692+ESaN4hbrVWE+/mtmAkSijHKZCpevdrtjZIbAlpv1p+FKbn6ukeir7a4ORCrInGwWD0xOE+gLpH4CRPtquzsGIigyNlefhOv07DWR+Ckmvtpu6wBSUZOZ4uFxQSUp5MTfA5jOSX/Y7qpeT8f04SOrRGDCb53WJeGqeoGqBO66s3tXjUFJB4yZRsLX+13VRVUVTx9KvUo4PQ3h09n6p2tSVtqqOvZRMMyZz/qh8oHsz1RhSFc3fZA0eLDZQ+pVdRjg0tRIdariAd+0nQ8KNsZmxTLnD8w8ziCj8Zr2XbHrQxAlNmyzclEOoq4qZy2ZyQSARhkiwsbmZeFSPepwUVQNMuVTL3CQZFzmDAHExhXF8IoRfdcFJdXoo/I4ygUgbIw1zESU3rMTE+nbzoMgsQs693QBiKFPISGbL1elI+A0eKRpZ3cZSGz3232nhkGmXG+WA0ianROgEsIMB5kkNlZvX2FTsAHs4urJfZY67sCACDJ37AEgHG5fWc4MQc3yydO3+XAxSk8ERKT/6/o+XpeEq767uW+ECWpXz19cF0OWz9vKZ7jHx+ru9vpLNVB11y+bN4skimzSizZU4pycpH9fiPW72+eNGojajbTPyjPIufnOnAsDkGZ7+7KOgIpZ4LAZcvLhzhOtazwnRESQdn9zd+hFRcDWmpSM2B2bXogJYOvGbeoYEyKAFNId9O59F5yIIsYYIwD19X5f5GwIZLJizlN5qggf2/tD61lUNISYem5oDtW1GKb0VO7mvserhLrpvAMTrMuLIogi+r6PxAQ2NsvnMNFTNUTvA6wB5c3zF89RB4CMdS4DsqIsy8c/+n94LEPXS85A9vT5l190bVCwzZwjAHlRTORk/HSd/ad2VdUEACBXrnIGRIYjANWJnnXp5K7b32zCVaF9f//zu6NXwB/e3S5yEOSw3R+Oj384e4hXe/cTVU9zv9++/eH7mypC23c/M6Sx6ve/vL7dHsdmtpcy2f5KfZ03b169+fXXmy4A3fYVOxty6bavb+72zWcACcd96Tiv3vz0+uau0vSd5W5fFNLs9u/rZs5j5mlJ6NquQ3usDnU3zCH7tut7I33Xez+uwv8FU8faPheawW4AAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trouser\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAI+ElEQVR4nK2ZW3MbyZGFT2ZWdQMgRWlW0tgzXu+sH/zg//+LdsPrcWhmJBJAX6oqL/vQoIb2A7pDIJ/IQER9PFWZJy8grPxQ991Pf/mY6mjmDodrs4AADhZBaD3/9unp+hlpjSG53+0PkqBmDsC1aQQDAIvAJLrc5XYTJO3evHv/8ftcxqbqxBKtNbUAs7BwtDmj1hhV/Zsh1D98/NNf/vpjLudaq0nXUSulzi1S34vAptNTn7u7p+O5fjtk992P//3Xv/1nN5/LPGu6O0ibpnEYrb+/y+z19PnXw92bt79i/nYIujfv//DDDz/mep6nSfOb+9SmcTiedf/2bYbNTxLS7Xp9kiuHrEGk3+87DoBYMichkrzzZna4u0veNPeH5lF74W+HEKfMOp66OtemHlMwTJ0EIgRvzbjrSyHQtVPWQzixDo+dVTUNdiWOsOBggmutipQlzDxugHDuUtTZ4QABcAKCRDgLES3/RGitdgtEur4jLZRSMBlJJoQTJyRhYknC5G08jeVKmqxfV+4yac05UwtQzuwK4kTCBE4pCaycjqfZboEQU5gahMOcWRgEMIjhBEmJ4WU4nm9S4lqreoCYmQgR7u4BorC2SGG08TzUWyA2j1M5sDARCOFKZhZB5NVYJOUk0PF0I6SMYzUAjnCHG9w8iODWpBMWRrR5nG6KrtBWSy0lETdVInAQEXOYuXFGmLVa6m1WDwqvY+YwsaYixMQCoghXyg6t81z0WmhtgnC0kVxrDvMsmYmCCGYwtIYyDWNZYayHMFO00U1LhtOOc2ImYrCGOktM47C82W1KKBSmbUoRfAgRIuJMXtGaqZ9OU2lBdO3d1yBBxDCrZUwULg+REiJJyjajTRPrcByKgcVwBbOqBAhzR0SYp1l2OVImyilFnaq1aZyqEfNNLuzWGsK1VWuecf/QK9J9v0/sdZxrLWWu5tfLyXoyTufu0InrVJrnfD6nKPGODrvQeZxqra2qrpSTVUgbjjnvcwx1qt6N44DpS/3Qv++8zdNUW9XatNnVJ9ngXcP+rXTG1pqjzHM8/vM8/3H2sDKX1qqp2dL2fTsk3BbXZUaEm3IZhrEGUbi11tTMHbeFMEiSwNS526ExMwAWEWYmRLiZuxPLbdFFKSdSDdmBmLIwp9x1OQkTM8LDHSQiuEZZt5XlLE7ZgS4xIWK5PibC8t5Et4VwhHsQg4lTcJ8TaRnH6mBmYSYiIkTETW+y1PdkBAinfZ+iDU/3k4JERFgMi7arlPWMVwsWwINS12fRNjy9GRXEIizMIET4dSXXWtgLxRyEcA8iIng5H4+TgpmZFw3hfmueuHsgXJVIzE3n89PDZCQijHD38LUnWbX6cLeIcDMFWFXbfD6eJiMWYQozN18Tsm714e4LBWxqpnUap2pLbC/J6DcqweXWIyIWWaoKs6Xdg5vFWmhtgTyLWYB+8UK3ICaYGUC4FRLPUpa8djUHGO4OIrgZvYYSX2z4xV9YvHmBKIWuGP2Whrup+qKDGeHmgWUtQQS4hs3XJ6BNkFo1AAKYmeDmgXBTj6UB1/ZKEAcREV1SHIsNLIGtrdZ2vcKv24q3UjSIL5DfLT3CzczM1jJxXUlYnYtCwKClIhIAMLO7amvqQXy9mKxD4K1UBYMIfKm7AIgZYaqtWdA6Zb1otbasoPDs7vT8ibstGX+rEsDU3M1MUxCn1CW5WABRhKlHuzlPgHAz9dZUA5xz1zHCzYKWiFYr7TUgz2EUxJKSLM/hYCGEqV3dp22EwC8IXAokEFpmChYhYMOTbIOoLk0kwVDVEW0ak7HIkj03hzCW+/IAMVGolubwOg6dLREtJDeHMC6tFziYKcyqBqJO54OSCL8aBBGglCMxwtwc8DoO5JCUcyZXeQ0IEXHqIzMizD0QdRyEg1LqOrbGawa4AUIglpQiJYabGuB1OOUOIkkSsH5dq80dABZhREjeZbS5OrycT5NL5qXzW42uDRBiEbS5eH+3p3mcFV5Opwl9F/PpPOtKj7pRieRMOhfaPxxoPk8Kn09PM/U9lfNQVvN9m5LU73eZZffw7g7jcdTluqLbdUJLJ7lywoaH5/7+nVaT7//4H4zzl1ER5XwcVfYP778fhef52nZ7K2T39iOkTz/99GB0+nUwRD2dRpX84c8xuB6n/BqQh/cp79Offzx8jtMXM0Qbh6nx/sN/5WEe+CldH3435UnqD31QfnefWecCAFZLC969/YjzeGyHV1BCRNwdLGXTaW4ALkUr9fHOd4NMh7Ty8tsa7qBuz5jKcdKvaJaO7jVlOr+GEkQEp55Qhq+bWSJiSXxQEbvr1xxyCwREzE4Ms+eGlIiIRfpdWJfXbmujC4uEgOl5KUDMzExMklJalgY3Q8BMxLGcfMHyc+OK1RFom3ctp9LL5cbyO4kQ1jZqGyGXZQq96EyW8YqIYbWu7Z63QJYaT0v7c5kdwz0iiClameuto8MzBcQUX+e6ZYUQIAotpb2CEoRZgOgl5CJpmU3XBq1N1+WmBhaEPxfB5ychbGoht4SwmzmYFkUXxjKrghBX96jbIQCIGC92Z8/rg41KtpRfyX2XXtbY8OcOnH5fV9wIybv9LtPLVaObusciZJ2xrWjlvuOvDw0gQk0vIey+Hl0bXfhfPGVZGl6uy9v6OLc5hF/aSsDaMrIQrLXXmLRC61yZOCL8Eq9hrV0SVGvRV7CVqOMwKb7uPADAm5p/tZXXmBmnx9/ucpagFxmhqmoOeJvn2784A2z49Z+Hw73Ey6fX1lpzuNUNkA3XZcNvv3yZQoReJLdqU7WA1Xm++g32RkjU8Tzqv025fnHIeKXrAiI45cTpZf9OkrtMuxR1fpV6QiySMvhfGJy6ntCJ11epjOHamhm81fa7g0S4A65lXleyJYTHL58+/PK+1k+fvoxfz9Ph6XPB9Mvnp/E18kSPP2P/4e5h/Mf//uPp+Yv3mB9/Tgcf/ufnz8PqDnJLnpzV3vzw3XD+v79/Oj033FGPn3jvx79/epxWvX7TAqfJl8cnHL88HuevN6PTaV/s8fH0e6N/CyQQZS61llJeuHpYq2SltHUG/h+njzIeBCxmbgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trouser\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAQGklEQVR4nJVaaY8cR3J9LyKPquo5SIqSvDBk+P//Gn83YNi70mq1kkjN0d1VlRkR/lDdQ4ozTXILIIGZ6cqXcb24mnh6SCAC4PZj4OPn1ZtdUVEhTx9LSaIt+3f/WPDFJ335IwAw7HZTFhEhSQYATQylT1drfPHtiyCnV1VVRfTm7TejepDC7R9EIqzP43CzWO/Wu/u/DHK+Xpp2Qx2GN99/M9q8BIVUUU2MsIjoy/5wPD4+Pj7sj18lSQBngzxpQKfXb252Vzff/eVtne8fDUJVzSWLtTVEGN4Pf/z622//1Nj/y5KAoDCNr95++2p3ffvtX96W4zD1IFNKuRRpyxKSNCXMv+2GTE11WT3iJQu9CBIA8m4suYw3r1/tckE/PBSXqh4QVaFH7+b0BKrk8cby7f64zsf7++MLpkl/PptPuhrefns9DsM0Dtr7o813Q0nCaJEAb+F9aQ5ILsmOXt+8crN++P2v/7d+AeRPz+7bH97sxpqVbX9Y93d1vL6Z0FsEHG7NVjMz5ly4dr0exiSx/5mPh0f7OhBJqX7z5s3rqSaFMbzbMs/OSBRIuPfevFlfVq8j1D2Yx5plsvfv27vH+VO7PAMJAPnm9vq7t7uM1hHW1iDMDL5M05Bg3axbkD4/9ClRzVZjDDX78H2bfvzb/DlJeLZHefPvb25vSszmHu4RIm7m6+E2X0999u4OSmDdL3GjqbVjzIcyjIlvx9dl/3v/GnXV2++/GxOsrd0iIKQQPi+L3DJBAJCBcOvdggxbra1l7UO9vtkdfhofvghC6rC7uUnWrHtEOEBA4Wuzw7z2kAw3631pHNKg1roDEW7dIpV4/fbbtKz9AgiJAEit4zQWLLMHlM4Ih6Ri3tpyPNSkNXtb23Jc063U3NoaSlVlmFno7tsfhvf3HQDPzPFMEkoep7Gk1mZPScTdzIMpe5v7enjMU80wCbaj16lKrNZCVZMq3Hrz4e1RY/4TxzxXl5ZxrFng5tyYHRGgQAX9cCdSqxg6vYeOoy+tmVNUVYgwR7l5uz7kizbZGDLVccgEQLiBAMmAW0CzP0ZPN2NuvqoQmnKzZXWKSEpCQTh0unkY0yWQ2GySx92Y4EHxMBchRBAGILPtD8v0b0NdlpRUg4S3dQnJQU0KYRik7q4Gfbr0BUnGaUywoIg7AgKQsAAT7bDY2xUqBCgOt95bIyGakoISDhn61agX1bUZPo9XU4YHKBGI2Ng7nEyyejvu94++HOfugDcuaw/RlFREACKYhnSYLttkAym766mEGSQisCVAAkERzwo7/vHr2A6HoyHacW2rU3MpWYUgECzTNO8u2uQMMl3tMswhAYd7BAAGgyIpZy5/DIOvy9I8mktvkFyHkgQQEmDayX76EkgadoNGBKikwzfFIQgilQLfvy/w3tYWEHGH5lozAaoyApJyzfJ5EGgdh+jmIAQw704FALpThoi03mVhtGUNUSUllTqIWaRc2Hts0fslkDLU3gMgSNC7CYKgu4kMOaJ1zRnrvCBVEWqpwxBuRJ2wuPWlzS0YnwOh5pJDGMKI8PBuSpAIA7Qy1rmJhbRlRk1ZJOVSsi9GlhGxdFu5GrV/BkRSUlWhCOFm4Wa23SoiAiLoCI9wN4MDknJJyu12NVbCnaF1PHq8GIwAqCUpIQpJsMXcukWA3FDcAKRg0hPFipZaE9yCknJxZURQ6u4aS78AQqaaGQFhKtJtNvMIbEU2EW5OJJGUQkRAES21KsyCmlJyIRDUen1r/qFu/QQklQ2ETFVX3Rx4q+TBLe4hIiouIhBStGSFB0REJLZPSp2uj8t6wSaahzHTzEHVZBIOSUgiJCAUkNy8DpIKErGRfFBEGO6gbNQyDomXQOo0ZlgPP+W6QFKoCoUECdn6hnAwB5UIUkUiRCWshVMlAlq/AFLY+5amSFIIiFKUPEdOBFwoiRuFkkKGQuDdnUJ4pDrWfAlEyjhmeJdQgKIphSO4+QQABOARVFAhGz1vL6qoRg8H4Ait0/gRtXwKUmumWTAiwFS7dPcIBzeODWxlmGjQwyM83EkyS8p0Nw83hA7TcFmSPAyJbqCHO/PgXFo4Ik7J3t3cIyUqjB6BMDMIc8op0bt5GOA67MbPgNSaGQ5xD0iuHtY3JVEAhJuZQyCyAQPhRggkJYW7uyHgOu6mclFdeRiyhEeIByQVt4VbRxEBbnkySAoj3INCuPuZDSPcegg8Tf2qXgaptQAOowUVFqvgzCoMYuMvVTKsuYgI3EQQbiAR3ltAWCquPwuSzcNDHKqhIoiAgBsGAJKqIgxrkVQF4R7wDqUIvDeHpjTIrly0ieacwsNcQ7NYX5sBWwPmpwpEKCJPpLyBeLeuTCoIc0C0oF4MRmpK6t7NO8sYh8PdYfHTDIIuICkQFUYEQBHZynt6d2UtIggH0tZYXgCBiKoJvCcZptbvfl+CAgYiyBBIBCmC8IBARYWICO/zqmknSYkIbh6PCyAU1VO6kjKgP7yzUhWBiEAAFAS5HYxtNgEA8H446mAqSkR4gCIXQU5/jPCAJqyPD8Eqm+Oe+CyAkyd//F5f9nLtAiHCsfXhFyXhZtMtdcZ6eJRRJE4zgAgq4Ob02BLl9nsyfDnIEkIlwuFMki6CgCIbPUUgbNnvs1ENZ5qkkt0NuqnP3Tf1h69Hri4qZIQ7N2tdkuRje9k6HwbjuQNDBJgY7SwX4iwJfT1wcVXdmlsqkl4KRvIphQLh7XikBbceRYiAKI3hJ5uEbxWAINYjlpCkgi1b8iIIKZQ4TecQ1pYl9QC3kIiIoFK2+R4Q3NpWCOntgNU1bQlURKmqL4OIUHguHMK9t7U6CICiDPOA8IOu40mSiPUYq4vo9rqIaMpPVeSfhgVpK9POZ7j7qagNUGFnrztd48kmAKzNsdhW18dWraeczpXXBxBCS9Ytn57OgGyl5EbvHmFm4qCcIl3OGG7r7Otmvgg3j2AqNSw+BaGW/JEk4UGKylP3izDpXR0iqmIb/caWa6wtttqpJ4OZi+RaW2ya+ChkTiARgY21u5296lSpnhRIkZN8Zw8G3Mys+xMfgLk+EfFHIJpyTkq4h6iir2trBqoKnkrVjZc0pS2R+HahbeQa1ttTZ0JJpZyj/iN1Sc5JT/k0Zfb5uLTuklLfitRtIhwQJNVTAnaQoslTLl2steYBQghoLiUvz7zr6YIWKhl9Ph6XHpAnSZ7Ms026z3oR1ZRzBvqizc4lIF+UBCdHMnNoKdL8MDfzOBPNE68DQPBkCwCQhKSMPh+kQYQRpyu/AIKIMOu9G/NQtc2Ph6X1c/vLc9Ea4Wa2JS85dT/M4uvx8YENW2W0Ge4ct38CcbPee7ecx6Lt+HhYzU4dyhkGCHczC4dCtrm9JEm0dny812Bu0v1Jqc9BSISZeTAPOZbjYbH48LfTxSPcaYag4PQLClXo6+GxZKYk2ynQF0C4OasHKCnnZutq2AYaxEbQQjjcXTaD4xwtoKScsByGKaecBH2O2aj6XBLVpIxzHHRrHSmXnJNww1A7hePJqXjyMXdKHgbphzEXzTlx3S/7lUm3bP2xJFpq0bNfpMW6M0etOT25sCBsy5sAKaAgwt1dWKYl23ycICkl9oMcTHJ6JomkYRqyQBS5FBFvJkMa89MkkSIOxzmmRTZncXen1N1aYl2ag6KITk/DWNIzkOH61c2gIklrqRTvUa7jqsDOdwfpp1rotKnZINwow1VP6n1ZaJtz16tX87v8zPDD7aubKYlm1GGARo96w6scvVt82BacY5+nmsHdzUOGa2OO3lb1oKako7fl11OP8pEkZbyaSlJQhqE6+9J1ypOaP3X9PKvt1Nu5k+buplJ31tCaQcQ0qSb4bhqeqQtMKeeUgHR9NTWuh2OUkmBwnE7dovFUAbi7+abAgJRp9aXn8uaNPeyR9ZTunqkLQUm1asrXr66OXB4eUhJag3MLXpKCrS1GuHXrTm60I2Woj3sfr/7z39dffmkpSw8ze5Z+w9raKrSWMg5l6Ye7x2lHeuNW6gS2WhinKtO6dRP1QERIHmrs19v6/X/M7d7pth4Px/N45QOIr/t7sbUNdUBqD3d3D3OVnGAMoQq3dEvBmSfjXGm6Gcq0ZDs87o/z4fHhIdQPD+/f79unktj+N/tjGGqtdXc7Hv/3l/tgnYpEhCCJwK1h22aeN6cb00RfyXobO51//e/Xfz/+9Pe905bj4927+VNJ7MH+KCmlnOp0u+s//v2h6rjLvQUpSSW8NSQRIUUVIhRsGa3PUceRt7X987/e36x3d3OEWVuX/eG5JAchSNU8Xe/47tdDTsNO5hYUVSLcmihVSBUBuQ2/Bd5X5lJxPeB+/6P21rq7n/T5qeED55XX436Ux70zlcoGQBTRvTsoKQuhylORJ7LVWdDBpqnGw5197XZuQePskFwKBCEi6OHNqbkWASineTHlVA9TNA9X1/ftpTXjhRVgdxhYck5BgEK3HgbNpRSJ4DZVOY8kNjaTevVq3+3Z1uwiiBnAYahZ/NRGWA+olnSuJp4S/1Z0ww356tXj4fDScS+AbPrW4fpm1K3VcVo35lqSWmAr9E49AsO6uvd1MZleP97p8/OegxAgHcBw8/r1JO4OuIV5SNlV9tUjoClvdK8KN6pZnxnYvdm/Sx/d8zOSkGBA6u03ryaxHoB7eFDLMFpfm4ckpG0RtXWY7tZnAOOrh22nwU+OvLT7ZdldXxWYObjtTlJWeG9rENvAABCRcEa4974K6243yEuHPQfZJGUar6Zkp54DmosWn9d57cwpp41XqMlhQribQVOt6atA4vxfGqZB3S0kHEzjJN7WZW6etZSslDN14TT7CqZzfxMfNryX1HVe0k2j2nnrJLlwWQ9rc0pKKek5Sz71LQHRnPUr1bX5BvN0fcO1tUCEo7cZ8/5g2Crcc7na6b0zhSSH5CJjTZ841iUQnkBuX8WyzA63cGCJ42GRWkrS8yrNmrgvjXmQFNQy5qkmsZOIH+nrJRcmAIjmSlG0tUvAl9WWuZdUB4mTmiLCYK2R3QNU1dA/tc+fVRcAoB0eSobWjhIR7gbNqU5XFd1VE3upPeVk0iTXWrKiYX6cn3934RIIA37/E759fTNOg0MQbZ1ba1F2V8nXrkOlMlvOmQFJmsuQcXz//sffzl9diK8BsT/+5913P/yQp7HmwlgO+7UbtFa2ZZU6SEkjcsk5J2WAEvPdLz//9PN+o/rgF1wYAcAf97/fYxyHqUxVsSRZPaCqQQ+qKJGYS6lDEV979/Xht7/99R+/Hr9aEgCImN//c/KHV7txzNKX1bbmKdra4KbRjdbbumgsh7n19eEfP/787rG9dNYlWgGA+dd4txtLHYasFEmnzX43qDLcQ4WgL4/3h6W3+f6Pu8PyouFf8rjzI7mmJJrqOA7jOOSScjr32wgPJ7z3ef/H+4e5u/XWzF78LtHnQM6P7qar6+tdLSVnVW6LufBwRF/nw/273+5fSrofnq/5pprdG1JOJAD308ggHAbv63I87h8/j4H/B3D9sapwrbYGAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "T-shirt/top\n" - ] - } - ], - "source": [ - "from IPython import display\n", - "from PIL import Image\n", - "import numpy as np\n", - "\n", - "class_names = [\n", - " \"T-shirt/top\",\n", - " \"Trouser\",\n", - " \"Pullover\",\n", - " \"Dress\",\n", - " \"Coat\",\n", - " \"Sandal\",\n", - " \"Shirt\",\n", - " \"Sneaker\",\n", - " \"Bag\",\n", - " \"Ankle boot\",\n", - "]\n", - "\n", - "\n", - "for arr in test_images[:5]:\n", - "\n", - " res = p.predict(\n", - " data={\n", - " \"conv2d_input\": arr.reshape(1, 28, 28, 1),\n", - " }\n", - " )\n", - " idx = np.argmax(res[\"dense_1\"][0])\n", - " display.display(Image.fromarray(arr.reshape(28, 28), mode=\"L\").resize((100, 100)))\n", - " print(class_names[idx])" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试完成之后,删除相关的服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/tutorial/train.rst b/docs/source/tutorial/train.rst deleted file mode 100644 index 2a70cbe..0000000 --- a/docs/source/tutorial/train.rst +++ /dev/null @@ -1,11 +0,0 @@ -=========================================== -模型开发 -=========================================== - -.. toctree:: - :maxdepth: 1 - - pretrained-model/pretrained-model - pytorch_ddp/pytorch_ddp - tensorboard/tensorboard - checkpoint/checkpoint diff --git a/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb b/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb deleted file mode 100644 index adc324a..0000000 --- a/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb +++ /dev/null @@ -1,529 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "# 使用 PAI Python SDK 训练和部署 XGBoost 模型\n", - "\n", - "\n", - "[XGBoost](https://xgboost.readthedocs.io/) 是基于决策树的梯度提升算法([Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting))的高效工程实现,是一个流行的机器学习库,它能够处理大的数据集合,并且做了许多训练性能优化工作。\n", - "\n", - "在这个教程示例中,我们将使用PAI Python SDK,在PAI上完成XGBoost模型的训练,然后将输出的模型部署为在线推理服务,并进行调用测试。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 准备工作\n", - "\n", - "我们需要首先安装 PAI Python SDK 以运行本示例。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# 验证安装\n", - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 准备数据集\n", - "\n", - "我们将使用[Breast Cancer数据集](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)),训练和测试XGBoost模型。准备数据集的步骤如下:\n", - "\n", - "1. 通过 `scikit-learn` 下载和拆分 Breast Cancer 数据集,使用 `csv` 格式保存到本地。\n", - "\n", - "2. 将本地数据集上传到OSS Bucket上,获得数据集的OSS URI,供云上执行的训练作业使用。" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "使用SKLearn下载和拆分数据集。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "# 安装 sklearn, 用于数据集下载和切分\n", - "!{sys.executable} -m pip install --quiet scikit-learn\n", - "\n", - "# 创建数据集目录\n", - "!mkdir -p ./train_data\n", - "!mkdir -p ./test_data\n", - "\n", - "from sklearn import datasets\n", - "from sklearn.model_selection import train_test_split\n", - "\n", - "df = datasets.load_breast_cancer(as_frame=True)\n", - "\n", - "train, test = train_test_split(df.frame, test_size=0.3)\n", - "\n", - "train_data_local = \"./train_data/train.csv\"\n", - "test_data_local = \"./test_data/train.csv\"\n", - "\n", - "train.to_csv(train_data_local, index=False)\n", - "test.to_csv(test_data_local, index=False)\n", - "\n", - "print(f\"train data local path: {train_data_local}\")\n", - "print(f\"test data local path: {test_data_local}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "上传数据集到OSS Bucket。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 上传数据集到OSS Bucket\n", - "from pai.common.oss_utils import upload\n", - "\n", - "\n", - "# 上传训练数据到OSS\n", - "train_data = upload(\n", - " train_data_local,\n", - " \"pai/xgboost-example/train_data/\",\n", - " sess.oss_bucket,\n", - ")\n", - "\n", - "\n", - "test_data = upload(\n", - " test_data_local,\n", - " \"pai/xgboost-example/test_data/\",\n", - " sess.oss_bucket,\n", - ")\n", - "\n", - "print(f\"train data: {train_data}\")\n", - "print(f\"test data: {test_data}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step3: 提交训练作业\n", - "\n", - "通过PAI Python SDK提供`Estimator`,用户可以将训练脚本,提交到PAI创建一个训练作业,获得输出模型,主要流程包括:\n", - "\n", - "1. 用户编写训练作业脚本\n", - "\n", - "训练脚本负责模型代码的编写,它需要遵循PAI训练作业的规则获取作业超参,读取输入数据,并且将需要保存模型到指定的输出目录。\n", - "\n", - "2. 构建`Estimator`对象\n", - "\n", - "通过`Estimator` API,用户配置训练作业使用的脚本,镜像,超参,以及机器实例类型等信息。\n", - "本地的脚本会有Estimator上传到OSS Bucket,然后加载到训练作业内。\n", - "\n", - "3. 调用`Estimator.fit`API提交作业\n", - "\n", - "通过`.fit`提交一个训练作业,默认`.fit`方法会等到作业停止之后,才会退出,作业结束后,用户可以通过`estimator.model_data()`获得输出模型OSS URI路径。\n", - "\n", - "更加完整的介绍请参考 [文档: 提交训练作业](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/estimator.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们通过XGboost提供的SKlearn API,构建了一个XGBoost的训练脚本:\n", - "\n", - "- 训练作业默认接收两个输入Channel: train 和 test,训练脚本会从 `/ml/input/data/{channel_name}` 中读取训练数据。\n", - "\n", - "- 训练结束之后,训练脚本需要将模型写出到到 `/ml/output/model` 目录下。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "!mkdir -p xgb_src/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile xgb_src/train.py\n", - "\n", - "\n", - "import argparse\n", - "import logging\n", - "import os\n", - "\n", - "import pandas as pd\n", - "from xgboost import XGBClassifier\n", - "\n", - "logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n", - "\n", - "TRAINING_BASE_DIR = \"/ml/\"\n", - "TRAINING_OUTPUT_MODEL_DIR = os.path.join(TRAINING_BASE_DIR, \"output/model/\")\n", - "\n", - "\n", - "def load_dataset(channel_name):\n", - " path = os.path.join(TRAINING_BASE_DIR, \"input/data\", channel_name)\n", - " if not os.path.exists(path):\n", - " return None, None\n", - "\n", - " # use first file in the channel dir.\n", - " file_name = next(\n", - " iter([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]),\n", - " None,\n", - " )\n", - " if not file_name:\n", - " logging.warning(f\"Not found input file in channel path: {path}\")\n", - " return None, None\n", - "\n", - " file_path = os.path.join(path, file_name)\n", - " df = pd.read_csv(\n", - " filepath_or_buffer=file_path,\n", - " sep=\",\",\n", - " )\n", - "\n", - " train_y = df[\"target\"]\n", - " train_x = df.drop([\"target\"], axis=1)\n", - " return train_x, train_y\n", - "\n", - "\n", - "def main():\n", - " parser = argparse.ArgumentParser(description=\"XGBoost train arguments\")\n", - " # 用户指定的任务参数\n", - " parser.add_argument(\n", - " \"--n_estimators\", type=int, default=500, help=\"The number of base model.\"\n", - " )\n", - " parser.add_argument(\n", - " \"--objective\", type=str, help=\"Objective function used by XGBoost\"\n", - " )\n", - "\n", - " parser.add_argument(\n", - " \"--max_depth\", type=int, default=3, help=\"The maximum depth of the tree.\"\n", - " )\n", - "\n", - " parser.add_argument(\n", - " \"--eta\",\n", - " type=float,\n", - " default=0.2,\n", - " help=\"Step size shrinkage used in update to prevents overfitting.\",\n", - " )\n", - " parser.add_argument(\n", - " \"--eval_metric\",\n", - " type=str,\n", - " default=None,\n", - " help=\"Evaluation metrics for validation data\"\n", - " )\n", - "\n", - " args, _ = parser.parse_known_args()\n", - "\n", - " # 加载数据集\n", - " train_x, train_y = load_dataset(\"train\")\n", - " print(\"Train dataset: train_shape={}\".format(train_x.shape))\n", - " test_x, test_y = load_dataset(\"test\")\n", - " if test_x is None or test_y is None:\n", - " print(\"Test dataset not found\")\n", - " eval_set = [(train_x, train_y)]\n", - " else:\n", - " eval_set = [(train_x, train_y), (test_x, test_y)]\n", - "\n", - " clf = XGBClassifier(\n", - " max_depth=args.max_depth,\n", - " eta=args.eta,\n", - " n_estimators=args.n_estimators,\n", - " objective=args.objective,\n", - " )\n", - " clf.fit(train_x, train_y, eval_set=eval_set, eval_metric=args.eval_metric)\n", - "\n", - " model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - " os.makedirs(model_path, exist_ok=True)\n", - " clf.save_model(os.path.join(model_path, \"model.json\"))\n", - " print(f\"Save model succeed: model_path={model_path}/model.json\")\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 使用Estimator提交训练作业\n", - "\n", - "通过 Estimator, 我们将以上构建的训练脚本 (xgb_src/train.py) 上传到 OSS上,通过`fit` 提交一个在云端执行XGBoost训练作业。 fit API接收的inputs分别是之前上传的训练和测试的数据,会被挂载到作业容器中(分别挂载到 `/ml/input/data/{channel_name}/`),供训练脚本读取输入数据。\n", - "\n", - "提交之后,SDK 会打印作业的详情URL,并且打印作业日志,直到作业退出(成功,失败,或是停止)。用户可以点击作业URL查看任务详情,执行日志,模型的Metric,机器资源使用率等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 获取PAI提供的XGBoost训练镜像\n", - "image_uri = retrieve(\"xgboost\", framework_version=\"latest\").image_uri\n", - "print(image_uri)\n", - "\n", - "# 构建一个Estimator实例\n", - "est = Estimator(\n", - " # 作业启动脚本\n", - " command=\"python train.py $PAI_USER_ARGS\",\n", - " # 作业脚本的本地文件夹路径,会被打包上传到OSS\n", - " source_dir=\"./xgb_src/\",\n", - " image_uri=image_uri,\n", - " # 作业超参: 会通过Command arguments的方式传递给到作业脚本\n", - " hyperparameters={\n", - " \"n_estimator\": 100,\n", - " \"criterion\": \"gini\",\n", - " \"max_depth\": 5,\n", - " \"eval_metric\": \"auc\",\n", - " },\n", - " # 作业使用的机器实例\n", - " instance_type=\"ecs.c6.large\",\n", - ")\n", - "\n", - "# 使用上传到OSS的训练数据作为作业的数据\n", - "est.fit(\n", - " inputs={\n", - " \"train\": train_data, # train_data 将被挂载到`/ml/input/data/train`目录\n", - " \"test\": test_data, # test_data 将被挂载到`/ml/input/data/test`目录\n", - " },\n", - ")\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step4: 部署模型\n", - "\n", - "以上训练获得模型,我们将使用[预置XGBoost Processor](https://help.aliyun.com/document_detail/470490.html)部署为一个在线服务。主要流程包括:\n", - "\n", - "1. 通过构建一个InferenceSpec\n", - "\n", - "InferenceSpec负责描述模型如何部署为一个在线服务,例如模型使用镜像部署,还是使用processor部署等。\n", - "\n", - "2. 构建Model对象\n", - "\n", - "Model对象可以直接部署服务,也可以通过`.register`注册到PAI的模型仓库。\n", - "\n", - "3. 使用`Model.deploy`部署在线服务。\n", - "\n", - "通过指定服务名称,机器实例类型,部署一个新的在线推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.predictor import Predictor\n", - "\n", - "from pai.common.utils import random_str\n", - "import os\n", - "\n", - "\n", - "# 使用模型文件地址以及 InferenceSpec 构建一个Model对象\n", - "m = Model(\n", - " # `est.model_data()`返回的是模型文件所在的OSS目录的URI,XGBoost processor需要传递具体的模型文件。\n", - " model_data=os.path.join(est.model_data(), \"model.json\"),\n", - " inference_spec=InferenceSpec(processor=\"xgboost\"),\n", - ")\n", - "\n", - "\n", - "# 部署服务\n", - "p: Predictor = m.deploy(\n", - " service_name=\"example_xgb_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - " # 启动的服务实例个数。\n", - " instance_count=1,\n", - " # 按照 每一个服务的资源使用量,而不是机器类型创建服务。\n", - " # instance_resource_config=ResourceConfig(\n", - " # cpu=2,\n", - " # memory=4000,\n", - " # )\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step5: 测试在线服务\n", - "\n", - "`Model.deploy`方法返回一个 `Predictor` 对象,`Predictor.predict`方法支持向创建的推理服务发送推理请求,拿到预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(p.service_name)\n", - "\n", - "test_x = test.drop([\"target\"], axis=1)\n", - "\n", - "p.predict(test_x.to_numpy())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试结束后,删除服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "pai-dev-py36", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - }, - "vscode": { - "interpreter": { - "hash": "63703143536f433679c5464335316251eaa13807b3fcc3854dae32f2699871d6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/user-guide/pretrained-model.rst b/docs/source/user-guide/pretrained-model.rst index b32c4b7..cf3f96c 100644 --- a/docs/source/user-guide/pretrained-model.rst +++ b/docs/source/user-guide/pretrained-model.rst @@ -87,113 +87,46 @@ PAI公共仓库中的部分模型,也提供了微调训练算法,支持用 .. code-block:: python - from pai.model import RegisteredModel - from pai.estimator import AlgorithmEstimator + from pai.model import RegisteredModel, ModelTrainingRecipe # 获取PAI提供的Bert模型 m = RegisteredModel("bert-base-uncased", model_provider="pai") - # 获取模型的微调训练算法 - est: AlgorithmEstimator = m.get_estimator() - - # 查看算法的超参数定义描述、输入定义描述,以及输出定义描述。 - print(est.hyperparameter_definitions) - # [{'DefaultValue': '1', - # 'Type': 'Int', - # 'Description': 'Number of epochs to train the model. Each epoch is one complete iteration over the entire training dataset.', - # 'Required': True, - # 'Name': 'max_epochs'}, - # {'DefaultValue': '16', - # 'Type': 'Int', - # 'Description': 'Number of samples that will be propagated through the model. A higher value might consume more memory.', - # 'Required': False, - # 'Name': 'batch_size'}, - # {'DefaultValue': '0.00001', - # 'Type': 'Float', - # 'Description': 'The initial learning rate to be used for training. A higher value usually implies more aggression in gradient updates.', - # 'Required': False, - # 'Name': 'learning_rate'}, - # {'DefaultValue': '2000', - # 'Type': 'Int', - # 'Description': 'Number of updates steps before two checkpoint.', - # 'Required': False, - # 'Name': 'save_steps'} - # ] - print(est.input_channel_definitions) - # [{'Description': 'Input channel for pretrained model to be fine-tuned on.', - # 'Required': True, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Base', 'ResourceType': 'Model'}, - # 'Name': 'model'}, - # {'Description': 'Input channel for training dataset.', - # 'Required': True, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Train', 'ResourceType': 'Dataset'}, - # 'Name': 'train'}, - # {'Description': 'Input channel for validation dataset.', - # 'Required': False, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Validation', 'ResourceType': 'Dataset'}, - # 'Name': 'validation'}] - - - # 查看算法的默认输入,包含了预训练模型,训练数据,验证数据等 - training_inputs = m.get_estimator_inputs() - print(training_inputs) - # { - # 'model': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/models/bert-base-uncased/main/', - # 'train': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/datasets/sst2/main/train.json', - # 'validation': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/datasets/sst2/main/validation.json' - # } - - # 使用默认输入进行微调训练 - est.fit(inputs=training_inputs) - - # 查看训练输出的模型,默认模型存储在OSS URI上 - print(est.model_data()) - - -以上的训练任务中,我们使用了PAI提供的公共数据集,对模型进行微调训练。当用户需要使用自己的数据集进行微调训练时,需要先将数据准备到OSS,或是NAS上,然后将数据的OSS或是NAS路径,作为训练任务的输入。 - - -使用用户训练数据集提交训练任务: - -.. code-block:: python - - from pai.estimator import AlgorithmEstimator - - # 获取模型的微调训练算法 - est: AlgorithmEstimator = m.get_estimator() - # 配置修改提交的训练算法超参,具体的超参用途可以查看 est.hyperparameter_definitions 中的描述. - est.hyperparameters = { - 'max_epochs': 1, - 'batch_size': 8, - 'learning_rate': 2e-05, - 'save_steps': 2000 - } - - # 默认的训练输入 - default_training_inputs = m.get_estimator_inputs() - # 使用用户的数据集进行微调训练 - training_inputs = { - # 使用PAI提供预训练模型作为基础模型输入 - "model": default_training_inputs["model"], - # 使用用户的训练和测试数据集 - "train": "oss:///my-dataset/train.json", - "validation": "oss:///my-dataset/validation.json" - } - - est.fit(inputs=training_inputs) - -用户可以通过模型卡片上的文档,查看模型的微调训练数据格式。同时也可以参考相应的模型微调训练的默认输入数据格式,进行数据的准备。 - -下载PAI数据集到本地目录: - -.. code-block:: python + training_recipe = m.training_recipe() + + training_recipe = ModelTrainingRecipe( + model_name = "bert-base-uncased", + model_provider = "pai", + instance_type = "ecs.c6.xlarge", + # 训练任务的超参数 + hyperparameters={ + "max_epochs": 1, + "learning_rate": 0.00001, + "batch_size": 16, + "save_steps": 2000, + }, + ) - from pai.common.oss_util import download + # 查看模型微调算法输入定义 + print(training_recipe.input_channels) + # 查看模型微调算法超参数定义 + print(training_recipe.hyperparameter_definitions) + # 查看默认训练输入数据 + print(training_recipe.default_inputs) + + # 提交微调训练作业 + job = training_recipe.train( + job_name="train_recipe_example", + # 配置使用用户在OSS Bucket上的数据作为训练数据 + # inputs={ + # "train": "oss:///" + # } + ) + # 获取微调后模型路径 + print(training_recipe.model_data()) - # 默认的训练输入 - default_training_inputs = m.get_estimator_inputs() + # 使用PAI提供的推理服务配置部署模型 + predictor = training_recipe.deploy( + service_name="bert_example", + ) - # 下载PAI提供的公共训练数据到本地 - download(default_training_inputs["train"], "./train/") +用户可以通过PAI ModelGallery提供的模型卡片上的文档,查看具体模型模型的微调训练数据格式。 diff --git a/pai/estimator.py b/pai/estimator.py index 8a8d936..2ad604e 100644 --- a/pai/estimator.py +++ b/pai/estimator.py @@ -771,6 +771,7 @@ def _build_algorithm_spec( def fit( self, inputs: Dict[str, Any] = None, + outputs: Dict[str, Any] = None, wait: bool = True, show_logs: bool = True, job_name: Optional[str] = None, @@ -783,6 +784,9 @@ def fit( the key is the channel name, and the value is the input data. The input data can be an OSS URI or a NAS URI object and will be mounted to the `/ml/input/data/{channel_name}` directory in the training container. + outputs (Dict[str, Any]): A dictionary representing the output locations for + the training job. Each key/value pair in the dictionary is an output channel, + the key is the channel name, and the value is the output data location. wait (bool): Specifies whether to block until the training job is completed, either succeeded, failed, or stopped. (Default True). show_logs (bool): Specifies whether to show the logs produced by the @@ -808,13 +812,18 @@ def fit( wait=wait, ) return self._fit( - inputs=inputs, job_name=job_name, wait=wait, show_logs=show_logs + inputs=inputs, + outputs=outputs, + job_name=job_name, + wait=wait, + show_logs=show_logs, ) def _fit( self, job_name, inputs: Dict[str, Any], + outputs: Dict[str, Any], wait: bool = True, show_logs: bool = True, ) -> TrainingJob: @@ -829,10 +838,9 @@ def _fit( input_channels=algo_spec.input_channels, ) + outputs = outputs or {} if self.checkpoints_path: - outputs = {DEFAULT_CHECKPOINT_CHANNEL_NAME: self.checkpoints_path} - else: - outputs = None + outputs.update({DEFAULT_CHECKPOINT_CHANNEL_NAME: self.checkpoints_path}) outputs = self.build_outputs( job_name=job_name, @@ -1218,6 +1226,7 @@ def _get_default_training_instance_type(self) -> str: def fit( self, inputs: Dict[str, Any] = None, + outputs: Dict[str, Any] = None, wait: bool = True, show_logs: bool = True, job_name: Optional[str] = None, @@ -1251,6 +1260,7 @@ def fit( output_configs = self.build_outputs( job_name, output_channels=self._algo_spec.output_channels, + outputs=outputs, ) return self._submit( instance_count=self.instance_count, diff --git a/pai/session.py b/pai/session.py index ae84987..6ef6585 100644 --- a/pai/session.py +++ b/pai/session.py @@ -286,7 +286,7 @@ def __init__( self._credential_config = credential_config self._region_id = region_id - self._workspace_id = workspace_id + self._workspace_id = str(workspace_id) self._oss_bucket_name = oss_bucket_name self._oss_endpoint = oss_endpoint diff --git a/tests/integration/test_estimator.py b/tests/integration/test_estimator.py index 7efe140..32ae661 100644 --- a/tests/integration/test_estimator.py +++ b/tests/integration/test_estimator.py @@ -75,6 +75,39 @@ def test_xgb_train(self): model_path = os.path.join(os.path.join(est.model_data(), "model.json")) self.assertTrue(self.is_oss_object_exists(model_path)) + def test_output_config(self): + xgb_image_uri = retrieve("xgboost", framework_version="latest").image_uri + sess = get_default_session() + + est = Estimator( + image_uri=xgb_image_uri, + source_dir=os.path.join(test_data_dir, "xgb_train"), + command="python train.py", + hyperparameters={ + "n_estimators": 50, + "objective": "binary:logistic", + "max_depth": 5, + "eval_metric": "auc", + }, + instance_type="ecs.c6.large", + ) + test_output_path = ( + f"oss://{sess.oss_bucket.bucket_name}/sdk-test/test-output/{random_str(6)}/" + ) + est.fit( + inputs={ + "train": self.breast_cancer_train_data_uri, + "test": self.breast_cancer_test_data_uri, + }, + outputs={ + "model": test_output_path, + }, + ) + + self.assertEqual(test_output_path, est.model_data()) + model_path = os.path.join(os.path.join(test_output_path, "model.json")) + self.assertTrue(self.is_oss_object_exists(model_path)) + @skipUnless(t_context.support_spot_instance, "Skip spot instance test") def test_use_spot_instance(self): xgb_image_uri = retrieve("xgboost", framework_version="latest").image_uri