Skip to content

Commit b10566f

Browse files
committed
[Docs] Add documentation page
1 parent 3c46e33 commit b10566f

16 files changed

+896
-6
lines changed

.github/workflows/build-site.yaml

+3-5
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ jobs:
1616
- name: Configuring build Environment
1717
run: |
1818
sudo apt-get update
19+
python -m pip install -U pip
1920
2021
- name: Setup Ruby
2122
uses: ruby/setup-ruby@v1
@@ -24,13 +25,10 @@ jobs:
2425

2526
- name: Installing dependencies
2627
run: |
28+
python -m pip install -r docs/requirements.txt
2729
gem install jekyll jekyll-remote-theme jekyll-sass-converter
2830
29-
- name: Build site
30-
run: |
31-
cd site && jekyll b && cd ..
32-
33-
- name: Push to gh-pages branch
31+
- name: Build and deploy site
3432
if: github.ref == 'refs/heads/main'
3533
run: |
3634
git remote set-url origin https://x-access-token:${{ secrets.MLC_GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY

docs/Makefile

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Minimal makefile for Sphinx documentation
2+
#
3+
4+
# You can set these variables from the command line, and also
5+
# from the environment for the first two.
6+
SPHINXOPTS ?=
7+
SPHINXBUILD ?= python -m sphinx
8+
SOURCEDIR = .
9+
BUILDDIR = _build
10+
11+
# Put it first so that "make" without argument is like "make help".
12+
help:
13+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14+
15+
.PHONY: help Makefile
16+
17+
# Catch-all target: route all unknown targets to Sphinx using the new
18+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19+
%: Makefile
20+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

docs/README.md

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# MLC-LLM Documentation
2+
3+
The documentation was built upon [Sphinx](https://www.sphinx-doc.org/en/master/).
4+
5+
## Dependencies
6+
7+
Run the following command in this directory to install dependencies first:
8+
9+
```bash
10+
pip3 install -r requirements.txt
11+
```
12+
13+
## Build the Documentation
14+
15+
Then you can build the documentation by running:
16+
17+
```bash
18+
make html
19+
```
20+
21+
## View the Documentation
22+
23+
Run the following command to start a simple HTTP server:
24+
25+
```bash
26+
cd _build/html
27+
python3 -m http.server
28+
```
29+
30+
Then you can view the documentation in your browser at `http://localhost:8000` (the port can be customized by appending ` -p PORT_NUMBER` in the python command above).

docs/_static/img/mlc-logo-with-text-landscape.svg

+87
Loading

docs/conf.py

+102
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
# -*- coding: utf-8 -*-
2+
import os
3+
import sys
4+
5+
import tlcpack_sphinx_addon
6+
7+
# -- General configuration ------------------------------------------------
8+
9+
sys.path.insert(0, os.path.abspath("../python"))
10+
sys.path.insert(0, os.path.abspath("../"))
11+
autodoc_mock_imports = ["torch"]
12+
13+
# General information about the project.
14+
project = "web-llm"
15+
author = "WebLLM Contributors"
16+
copyright = "2023, %s" % author
17+
18+
# Version information.
19+
20+
version = "0.2.77"
21+
release = "0.2.77"
22+
23+
extensions = [
24+
"sphinx_tabs.tabs",
25+
"sphinx_toolbox.collapse",
26+
"sphinxcontrib.httpdomain",
27+
"sphinx.ext.autodoc",
28+
"sphinx.ext.napoleon",
29+
"sphinx_reredirects",
30+
]
31+
32+
redirects = {"get_started/try_out": "../index.html#getting-started"}
33+
34+
source_suffix = [".rst"]
35+
36+
language = "en"
37+
38+
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
39+
40+
# The name of the Pygments (syntax highlighting) style to use.
41+
pygments_style = "sphinx"
42+
43+
# A list of ignored prefixes for module index sorting.
44+
# If true, `todo` and `todoList` produce output, else they produce nothing.
45+
todo_include_todos = False
46+
47+
# -- Options for HTML output ----------------------------------------------
48+
49+
# The theme is set by the make target
50+
import sphinx_rtd_theme
51+
52+
html_theme = "sphinx_rtd_theme"
53+
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
54+
55+
templates_path = []
56+
57+
html_static_path = []
58+
59+
footer_copyright = "© 2023 MLC LLM"
60+
footer_note = " "
61+
62+
html_logo = "_static/img/mlc-logo-with-text-landscape.svg"
63+
64+
html_theme_options = {
65+
"logo_only": True,
66+
}
67+
68+
header_links = [
69+
("Home", "https://webllm.mlc.ai/"),
70+
("Github", "https://github.com/mlc-ai/web-llm"),
71+
("Discord Server", "https://discord.gg/9Xpy2HGBuD"),
72+
]
73+
74+
header_dropdown = {
75+
"name": "Other Resources",
76+
"items": [
77+
("WebLLM Chat", "https://chat.webllm.ai/"),
78+
("MLC Course", "https://mlc.ai/"),
79+
("MLC Blog", "https://blog.mlc.ai/"),
80+
("MLC LLM", "https://llm.mlc.ai/"),
81+
],
82+
}
83+
84+
html_context = {
85+
"footer_copyright": footer_copyright,
86+
"footer_note": footer_note,
87+
"header_links": header_links,
88+
"header_dropdown": header_dropdown,
89+
"display_github": True,
90+
"github_user": "mlc-ai",
91+
"github_repo": "mlc-llm",
92+
"github_version": "main/docs/",
93+
"theme_vcs_pageview_mode": "edit",
94+
# "header_logo": "/path/to/logo",
95+
# "header_logo_link": "",
96+
# "version_selecter": "",
97+
}
98+
99+
100+
# add additional overrides
101+
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
102+
html_static_path += [tlcpack_sphinx_addon.get_static_path()]

docs/developer/add_models.rst

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
Adding Models
2+
=============
3+
4+
WebLLM allows you to compile custom language models using `MLC LLM <https://llm.mlc.ai/>`_ and then serve compiled model through WebLLM.
5+
6+
For instructions of how to compile and add custom models to WebLLM, check the `MLC LLM documentation here <https://llm.mlc.ai/docs/deploy/webllm.html>`_.
+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
Building From Source
2+
====================
3+
4+
Clone the Repository
5+
---------------------
6+
.. code-block:: bash
7+
8+
git clone https://github.com/mlc-ai/web-llm.git
9+
cd web-llm
10+
11+
Install Dependencies
12+
---------------------
13+
.. code-block:: bash
14+
15+
npm install
16+
17+
Build the Project
18+
-----------------
19+
.. code-block:: bash
20+
21+
npm run build
22+
23+
Test Changes
24+
------------
25+
26+
To test you changes, you can reuse any existing example or create a new example for your new functionality to test.
27+
28+
Then, to test the effects of your code change in an example, inside ``examples/<example>/package.json``, change from ``"@mlc-ai/web-llm": "^0.2.xx"`` to ``"@mlc-ai/web-llm": ../...`` to let it reference you local code.
29+
30+
.. code-block:: bash
31+
32+
cd examples/<example>
33+
# Modify the package.json
34+
npm install
35+
npm start

docs/index.rst

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
👋 Welcome to WebLLM
2+
====================
3+
4+
`GitHub <https://github.com/mlc-ai/web-llm>`_ | `WebLLM Chat <https://chat.webllm.ai/>`_ | `NPM <https://www.npmjs.com/package/@mlc-ai/web-llm>`_ | `Discord <https://discord.gg/9Xpy2HGBuD>`_
5+
6+
WebLLM is a high-performance in-browser language model inference engine that brings large language models (LLMs) to web browsers with hardware acceleration. With WebGPU support, it allows developers to build AI-powered applications directly within the browser environment, removing the need for server-side processing and ensuring privacy.
7+
8+
It provides a specialized runtime for the web backend of MLCEngine, leverages
9+
`WebGPU <https://www.w3.org/TR/webgpu/>`_ for local acceleration, offers OpenAI-compatible API,
10+
and provides built-in support for web workers to separate heavy computation from the UI flow.
11+
12+
Key Features
13+
------------
14+
- 🌐 In-Browser Inference: Run LLMs directly in the browser
15+
- 🚀 WebGPU Acceleration: Leverage hardware acceleration for optimal performance
16+
- 🔄 OpenAI API Compatibility: Seamless integration with standard AI workflows
17+
- 📦 Multiple Model Support: Works with Llama, Phi, Gemma, Mistral, and more
18+
19+
Start exploring WebLLM by `chatting with WebLLM Chat <https://chat.webllm.ai/>`_, and start building webapps with high-performance local LLM inference with the following guides and tutorials.
20+
21+
.. toctree::
22+
:maxdepth: 2
23+
:caption: User Guide
24+
25+
user/get_started.rst
26+
user/basic_usage.rst
27+
user/advanced_usage.rst
28+
user/api_reference.rst
29+
30+
.. toctree::
31+
:maxdepth: 2
32+
:caption: Developer Guide
33+
34+
developer/building_from_source.rst
35+
developer/add_models.rst

docs/make.bat

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
@ECHO OFF
2+
3+
pushd %~dp0
4+
5+
REM Command file for Sphinx documentation
6+
7+
if "%SPHINXBUILD%" == "" (
8+
set SPHINXBUILD=sphinx-build
9+
)
10+
set SOURCEDIR=.
11+
set BUILDDIR=_build
12+
13+
%SPHINXBUILD% >NUL 2>NUL
14+
if errorlevel 9009 (
15+
echo.
16+
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17+
echo.installed, then set the SPHINXBUILD environment variable to point
18+
echo.to the full path of the 'sphinx-build' executable. Alternatively you
19+
echo.may add the Sphinx directory to PATH.
20+
echo.
21+
echo.If you don't have Sphinx installed, grab it from
22+
echo.https://www.sphinx-doc.org/
23+
exit /b 1
24+
)
25+
26+
if "%1" == "" goto help
27+
28+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29+
goto end
30+
31+
:help
32+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33+
34+
:end
35+
popd

docs/requirements.txt

+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
sphinx-tabs == 3.4.1
2+
sphinx-rtd-theme
3+
sphinx == 5.2.3
4+
sphinx-toolbox == 3.4.0
5+
tlcpack-sphinx-addon==0.2.2
6+
sphinxcontrib_httpdomain==1.8.1
7+
sphinxcontrib-napoleon==0.7
8+
sphinx-reredirects==0.1.2

0 commit comments

Comments
 (0)