diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f8022cf --- /dev/null +++ b/Dockerfile @@ -0,0 +1,34 @@ +#FROM python:3.8-slim-buster +FROM python:3.10 + +# 将当前目录的所有内容复制到目标目录 +ADD . /workspace/code-repo +# 设定后续操作的工作目录为 /workspace/code-repo,有利于提升可读性和管理效率 +WORKDIR /workspace/code-repo + +RUN pip install fastapi uvicorn +RUN pip3 install requests + +ENV PYTHONPATH /workspace/code-repo + +# 安装依赖工具,例如 curl 和 sh(如果尚未安装) +RUN apt-get update && apt-get install -y curl + +# 执行 Ollama 安装脚本 +RUN curl -fsSL https://ollama.com/install.sh | sh + +# 安装完成后,启动ollama服务 +#RUN ollama serve +#RUN ollama run qwen2:0.5b + +# 设置环境变量以允许 Flask 绑定到所有可用的 IP +ENV FLASK_RUN_HOST=0.0.0.0 + +# 暴露 Flask 默认端口 +EXPOSE 8000 + +# ENTRYPOINT [ "python3", "./app_stream.py" ] +# CMD ["flask", "run"] + +CMD sh -c "ollama serve & ollama run llama3.2" +CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000", "--load-balancer", "sunrpc"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..c90c21f --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +.PHONY: build +app=rongliang_algorithm_serving_applet_cloud +legacy_image=$(shell docker images --filter=reference="*rongliang_algorithm_serving_applet_cloud*" -q) +version=$(shell date '+%Y%m%d%H%M') +build: +ifeq ($(strip $(legacy_image)),) + @echo "nope" +else + docker rmi -f ${legacy_image} +endif + docker buildx build --platform linux/amd64 -t ${app} . +# docker login --username=trsopenapi@1219654161317312 registry.cn-beijing.aliyuncs.com/saasalpha/tmaster --password=AlipaySaas22 +# docker tag ${app} registry.cn-beijing.aliyuncs.com/saasalpha/tmaster:ats_${app}_$(version) +# docker push registry.cn-beijing.aliyuncs.com/saasalpha/tmaster:ats_${app}_$(version) \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..7015661 --- /dev/null +++ b/README.md @@ -0,0 +1,213 @@ +
+
+ 中文 | + English +
+ +Category | +Function | +Status | +Description | +
---|---|---|---|
API Service | +OpenAI Standard API | +✅ | +The service interface complies with OpenAI standards, minimizing integration costs through standardized APIs. It enables users to seamlessly integrate and maintain the system, swiftly respond to business requirements, and concentrate on core development. | +
Blocking access capabilities | +✅ | +Suitable for tasks requiring integrity and coherence or for overall verification and processing of results, this approach obtains complete output in a single iteration. Throughout the process, the user must wait until all output content has been fully generated. | +|
Streaming access capabilities | +✅ | +Suitable for real-time applications with stringent response time requirements, such as code completion, real-time translation, or websites with dynamic content loading. The model transmits content incrementally during generation, enabling users to receive and process partial outputs immediately without waiting for full completion, thereby enhancing interactivity. | +|
High-performance gateway | +⬜ | +High-performance gateways effectively manage high-concurrency requests, reduce latency, and enhance response times by optimizing data transmission, employing advanced load balancing algorithms, and implementing efficient resource management. | +|
Multi-engine Support | +Ollama | +✅ | +High-performance gateways effectively manage high-concurrency requests, reduce latency, and enhance response times by optimizing data transmission, employing advanced load balancing algorithms, and implementing efficient resource management. | +
vLLM | +✅ | +vLLM exhibits significant advantages in memory management and throughput. By optimizing memory usage and parallel computation, it substantially enhances inference speed and resource efficiency, while maintaining compatibility with various hardware environments. vLLM offers a wide range of configuration options, allowing users to adjust inference strategies based on their needs. Its scalable architecture makes it suitable for both research and enterprise-level applications. | +|
Tensorrt–LLM | +⬜ | +TensorRT-LLM (TensorRT for Large Language Models) is a high-performance, scalable deep learning inference optimization library developed by NVIDIA, specifically designed for large language models (LLMs). | +|
Docker Deployment Capability | +Docker images built with Python 3.10 | +✅ | +TensorRT-LLM is a high-performance, scalable deep learning inference optimization library developed by NVIDIA, specifically designed for large language models (LLMs). | +
Web UI Integration | +OpenUI protocol | +⬜ | +The comprehensive UI open-source protocol facilitates users in integrating diverse components, enhancing product customizability and extensibility. | +
More Core Features | +ModelCache semantic caching | +⬜ | +By caching generated QA pairs, similar requests can achieve millisecond-level responses, enhancing the performance and efficiency of model inference. | +
+
+ 中文 | + English +
+ +分类 | +功能名称 | +状态 | +描述 | +
---|---|---|---|
API Service | +基于Open AI的标准API规范 | +✅ | +服务接口遵循 OpenAI 规范,通过标准化 API 降低接入成本,用户可轻松集成功能,快速响应业务需求,专注于核心开发。 | +
阻塞式访问能力 | +✅ | +适用于需要完整性和准确性的任务,完成时结果进行整体校验或输出的任务,一次性获取完整输出。在整个过程中,用户需要等待直至所有输出内容完全完成。 | +|
流式访问能力 | +✅ | +适用于对响应时间要求较高的实时应用,如代码补全、实时翻译或动态内容加载的场景。模型在生成过程中分段逐步传输内容,用户可在内容生成后立即接收和处理,无需等待全部完成,从而提升效率。 | +|
高性能网络,提升用户开发能力 | +⬜ | +高性能网络通过优化数据传输、采用先进负载均衡算法及高效资源管理,能有效提升数据来源、降低延迟、提升响应速度。 | +|
多引擎支持 | +Ollama | +✅ | +Ollama 以易用和轻量著称,专注于高效稳定的大模型推理服务。其友好 API 和简洁流畅流程,使开发者能够轻松将其手作快速部署应用。 | +
vLLM | +✅ | +vLLM在内存管理和吞吐量上有显著优势,其通过优化存储和并行计算,显著提升推理速度和资源利用率,兼容多种硬件环境。vLLM提供丰富的配置选项,用户可根据需求调整推理策略,适用于实时和企业级应用。 | +|
Tensorrt–LLM | +⬜ | +TensorRT–LLM (TensorRT for Large Language Models) 是NVIDIA优化的高性能、大规模推理优化库,专为大型语言模型(LLM)设计。 | +|
Docker部署能力 | +基于python3.10构建Docker镜像 | +✅ | +将大型模型及其依赖的镜像,确保版本号一致运行,简化部署与配置。利用Docker的版本构建和自动化部署,提高模型更新与迭代效率,加快从开发到生产落地的转化。 | +
Web UI接入 | +OpenUI 协议 | +⬜ | +丰富的UI开源协议便于用户整合多种组件,提升产品的定制性和扩展性。 | +
更多核心功能 | +ModelCache语义缓存 | +⬜ | +通过缓存已有生成的QA Pair,使得请求变更更加细粒度,提高模型推理的性能与效率。 | +
This is a simple Flask web page.
+ +