diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000..1fa152a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,47 @@
+name: "\U0001F41B Bug Report"
+description: Submit a bug report to help us improve MInference
+title: "[Bug]: "
+labels: ["bug"]
+
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Describe the bug
+ description: A clear and concise description of what the bug is.
+ placeholder: What went wrong?
+ - type: textarea
+ id: reproduce
+ attributes:
+ label: Steps to reproduce
+ description: |
+ Steps to reproduce the behavior:
+
+ 1. Step 1
+ 2. Step 2
+ 3. ...
+ 4. See error
+ placeholder: How can we replicate the issue?
+ - type: textarea
+ id: expected_behavior
+ attributes:
+ label: Expected Behavior
+ description: A clear and concise description of what you expected to happen.
+ placeholder: What should have happened?
+ - type: textarea
+ id: logs
+ attributes:
+ label: Logs
+ description: If applicable, add logs or screenshots to help explain your problem.
+ placeholder: Add logs here
+ - type: textarea
+ id: additional_information
+ attributes:
+ label: Additional Information
+ description: |
+ - MInference Version:
+ - Operating System:
+ - Python Version:
+ - Related Issues:
+ - Any other relevant information.
+ placeholder: Any additional details
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..0086358
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1 @@
+blank_issues_enabled: true
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000..c1cd10a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,26 @@
+name: "\U0001F680 Feature request"
+description: Submit a proposal/request for a new MInference feature
+labels: ["feature request"]
+title: "[Feature Request]: "
+
+body:
+ - type: textarea
+ id: problem_description
+ attributes:
+ label: Is your feature request related to a problem? Please describe.
+ description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+ placeholder: What problem are you trying to solve?
+
+ - type: textarea
+ id: solution_description
+ attributes:
+ label: Describe the solution you'd like
+ description: A clear and concise description of what you want to happen.
+ placeholder: How do you envision the solution?
+
+ - type: textarea
+ id: additional_context
+ attributes:
+ label: Additional context
+ description: Add any other context or screenshots about the feature request here.
+ placeholder: Any additional information
diff --git a/.github/ISSUE_TEMPLATE/general_issue.yml b/.github/ISSUE_TEMPLATE/general_issue.yml
new file mode 100644
index 0000000..f480312
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/general_issue.yml
@@ -0,0 +1,12 @@
+name: "\U0001F31F General Question"
+description: File a general question
+title: "[Question]: "
+labels: ["question"]
+
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Describe the issue
+ description: A clear and concise description of what the question is.
+ placeholder: The detail of question.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..7ad1f4f
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,41 @@
+# What does this PR do?
+
+
+
+
+
+Fixes # (issue)
+
+
+## Before submitting
+- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
+- [ ] Was this discussed/approved via a Github issue? Please add a link
+ to it if that's the case.
+- [ ] Did you make sure to update the documentation with your changes?
+- [ ] Did you write any new necessary tests?
+
+
+## Who can review?
+
+Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
+members/contributors who may be interested in your PR.
+
+
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..21b568d
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,129 @@
+ # This workflows will build and upload a Python Package using Twine when a release is published
+# Conda-forge bot will pick up new PyPI version and automatically create new version
+# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
+
+name: Release
+
+on:
+ push:
+ tags:
+ - v*
+
+# Needed to create release and upload assets
+permissions:
+ contents: write
+
+jobs:
+ release:
+ # Retrieve tag and create release
+ name: Create Release
+ runs-on: ubuntu-latest
+ outputs:
+ upload_url: ${{ steps.create_release.outputs.upload_url }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Extract branch info
+ shell: bash
+ run: |
+ echo "release_tag=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
+
+ - name: Create Release
+ id: create_release
+ uses: "actions/github-script@v6"
+ env:
+ RELEASE_TAG: ${{ env.release_tag }}
+ with:
+ github-token: "${{ secrets.GITHUB_TOKEN }}"
+ script: |
+ const script = require('.github/workflows/scripts/create_release.js')
+ await script(github, context, core)
+
+ wheel:
+ name: Build Wheel
+ runs-on: ${{ matrix.os }}
+ needs: release
+
+ strategy:
+ fail-fast: false
+ matrix:
+ os: ['ubuntu-20.04']
+ python-version: ['3.8', '3.9', '3.10', '3.11']
+ pytorch-version: ['2.3.0'] # Must be the most recent version that meets requirements-cuda.txt.
+ cuda-version: ['11.8', '12.1']
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup ccache
+ uses: hendrikmuhs/ccache-action@v1.2
+ with:
+ create-symlink: true
+ key: ${{ github.job }}-${{ matrix.python-version }}-${{ matrix.cuda-version }}
+
+ - name: Set up Linux Env
+ if: ${{ runner.os == 'Linux' }}
+ run: |
+ bash -x .github/workflows/scripts/env.sh
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install CUDA ${{ matrix.cuda-version }}
+ run: |
+ bash -x .github/workflows/scripts/cuda-install.sh ${{ matrix.cuda-version }} ${{ matrix.os }}
+
+ - name: Install PyTorch ${{ matrix.pytorch-version }} with CUDA ${{ matrix.cuda-version }}
+ run: |
+ bash -x .github/workflows/scripts/pytorch-install.sh ${{ matrix.python-version }} ${{ matrix.pytorch-version }} ${{ matrix.cuda-version }}
+
+ - name: Build wheel
+ shell: bash
+ env:
+ CMAKE_BUILD_TYPE: Release # do not compile with debug symbol to reduce wheel size
+ run: |
+ bash -x .github/workflows/scripts/build.sh ${{ matrix.python-version }} ${{ matrix.cuda-version }}
+ wheel_name=$(ls dist/*whl | xargs -n 1 basename)
+ asset_name=${wheel_name//"linux"/"manylinux1"}
+ echo "wheel_name=${wheel_name}" >> $GITHUB_ENV
+ echo "asset_name=${asset_name}" >> $GITHUB_ENV
+
+ - name: Upload Release Asset
+ uses: actions/upload-release-asset@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ needs.release.outputs.upload_url }}
+ asset_path: ./dist/${{ env.wheel_name }}
+ asset_name: ${{ env.asset_name }}
+ asset_content_type: application/*
+ - name: Store the distribution packages
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{ env.asset_name }}
+ path: ./dist/${{ env.wheel_name }}
+
+ # publish-to-pypi:
+ # name: >-
+ # Publish Python š distribution š¦ to PyPI
+ # if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
+ # needs: wheel
+ # runs-on: ubuntu-latest
+ # permissions:
+ # id-token: write # IMPORTANT: mandatory for trusted publishing
+
+ # steps:
+ # - name: Download all the dists
+ # uses: actions/download-artifact@v4
+ # with:
+ # path: dist/
+ # - name: Pick the whl files
+ # run: for file in dist/*;do mv $file ${file}1; done && cp dist/*/*.whl dist/ && rm -rf dist/*.whl1 && rm -rf dist/*+cu*
+ # - name: Display structure of downloaded files
+ # run: ls -R dist/
+ # - name: Publish distribution š¦ to PyPI
+ # uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.github/workflows/scripts/build.sh b/.github/workflows/scripts/build.sh
new file mode 100644
index 0000000..43b6ee4
--- /dev/null
+++ b/.github/workflows/scripts/build.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+python_executable=python$1
+cuda_home=/usr/local/cuda-$2
+
+# Update paths
+PATH=${cuda_home}/bin:$PATH
+LD_LIBRARY_PATH=${cuda_home}/lib64:$LD_LIBRARY_PATH
+
+# Install requirements
+$python_executable -m pip install wheel packaging
+$python_executable -m pip install flash_attn triton
+
+# Limit the number of parallel jobs to avoid OOM
+export MAX_JOBS=1
+# Make sure release wheels are built for the following architectures
+export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9 9.0+PTX"
+# Build
+$python_executable setup.py bdist_wheel --dist-dir=dist
diff --git a/.github/workflows/scripts/create_release.js b/.github/workflows/scripts/create_release.js
new file mode 100644
index 0000000..d48cc06
--- /dev/null
+++ b/.github/workflows/scripts/create_release.js
@@ -0,0 +1,20 @@
+// Uses Github's API to create the release and wait for result.
+// We use a JS script since github CLI doesn't provide a way to wait for the release's creation and returns immediately.
+
+module.exports = async (github, context, core) => {
+ try {
+ const response = await github.rest.repos.createRelease({
+ draft: false,
+ generate_release_notes: true,
+ name: process.env.RELEASE_TAG,
+ owner: context.repo.owner,
+ prerelease: true,
+ repo: context.repo.repo,
+ tag_name: process.env.RELEASE_TAG,
+ });
+
+ core.setOutput('upload_url', response.data.upload_url);
+ } catch (error) {
+ core.setFailed(error.message);
+ }
+}
diff --git a/.github/workflows/scripts/cuda-install.sh b/.github/workflows/scripts/cuda-install.sh
new file mode 100644
index 0000000..312c6e8
--- /dev/null
+++ b/.github/workflows/scripts/cuda-install.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Replace '.' with '-' ex: 11.8 -> 11-8
+cuda_version=$(echo $1 | tr "." "-")
+# Removes '-' and '.' ex: ubuntu-20.04 -> ubuntu2004
+OS=$(echo $2 | tr -d ".\-")
+
+# Installs CUDA
+wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-keyring_1.1-1_all.deb
+sudo dpkg -i cuda-keyring_1.1-1_all.deb
+rm cuda-keyring_1.1-1_all.deb
+sudo apt -qq update
+sudo apt -y install cuda-${cuda_version} cuda-nvcc-${cuda_version} cuda-libraries-dev-${cuda_version}
+sudo apt clean
+
+# Test nvcc
+PATH=/usr/local/cuda-$1/bin:${PATH}
+nvcc --version
+
+# Log gcc, g++, c++ versions
+gcc --version
+g++ --version
+c++ --version
diff --git a/.github/workflows/scripts/env.sh b/.github/workflows/scripts/env.sh
new file mode 100644
index 0000000..d7baaec
--- /dev/null
+++ b/.github/workflows/scripts/env.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# This file installs common linux environment tools
+
+export LANG C.UTF-8
+
+# python_version=$1
+
+sudo apt-get update && \
+sudo apt-get install -y --no-install-recommends \
+ software-properties-common \
+
+sudo apt-get install -y --no-install-recommends \
+ build-essential \
+ apt-utils \
+ ca-certificates \
+ wget \
+ git \
+ vim \
+ libssl-dev \
+ curl \
+ unzip \
+ unrar \
+ cmake \
+ net-tools \
+ sudo \
+ autotools-dev \
+ rsync \
+ jq \
+ openssh-server \
+ tmux \
+ screen \
+ htop \
+ pdsh \
+ openssh-client \
+ lshw \
+ dmidecode \
+ util-linux \
+ automake \
+ autoconf \
+ libtool \
+ net-tools \
+ pciutils \
+ libpci-dev \
+ libaio-dev \
+ libcap2 \
+ libtinfo5 \
+ fakeroot \
+ devscripts \
+ debhelper \
+ nfs-common
+
+# Remove github bloat files to free up disk space
+sudo rm -rf "/usr/local/share/boost"
+sudo rm -rf "$AGENT_TOOLSDIRECTORY"
+sudo rm -rf "/usr/share/dotnet"
diff --git a/.github/workflows/scripts/pytorch-install.sh b/.github/workflows/scripts/pytorch-install.sh
new file mode 100644
index 0000000..dfc1851
--- /dev/null
+++ b/.github/workflows/scripts/pytorch-install.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+python_executable=python$1
+pytorch_version=$2
+cuda_version=$3
+
+# Install torch
+$python_executable -m pip install numpy pyyaml scipy ipython mkl mkl-include ninja cython typing pandas typing-extensions dataclasses setuptools && conda clean -ya
+$python_executable -m pip install torch==${pytorch_version}+cu${cuda_version//./} --extra-index-url https://download.pytorch.org/whl/cu${cuda_version//./}
+
+# Print version information
+$python_executable --version
+$python_executable -c "import torch; print('PyTorch:', torch.__version__)"
+$python_executable -c "import torch; print('CUDA:', torch.version.cuda)"
+$python_executable -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)"
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
new file mode 100644
index 0000000..8fc88bf
--- /dev/null
+++ b/.github/workflows/unittest.yml
@@ -0,0 +1,46 @@
+name: Unit Test
+
+# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
+# on: # Trigger the workflow on pull request or merge
+ # pull_request:
+ # merge_group:
+ # types: [checks_requested]
+
+defaults:
+ run:
+ shell: bash
+permissions: {}
+
+jobs:
+ UnitTest:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macos-latest, windows-2019]
+ python-version: ["3.9", "3.10", "3.11"]
+ exclude:
+ - os: macos-latest
+ python-version: '3.9'
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install packages and dependencies for all tests
+ run: |
+ python -m pip install --upgrade pip wheel
+ pip install pytest pytest-xdist
+
+ - name: Install packages
+ run: |
+ pip install -e .
+
+ - name: Run core tests
+ shell: bash
+ env:
+ HF_TOKEN: ${{ secrets.HF_TOKEN}}
+ run: |
+ make test
diff --git a/.gitignore b/.gitignore
index 8a30d25..24a0420 100644
--- a/.gitignore
+++ b/.gitignore
@@ -396,3 +396,21 @@ FodyWeavers.xsd
# JetBrains Rider
*.sln.iml
+
+# Experiments
+data
+!experiments/ruler/data
+needle
+results
+*.json
+*.jsonl
+.vscode/
+*.pt
+*.pkl
+!minference/configs/*
+figures
+
+__pycache__
+build/
+*.egg-info/
+*.so
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..4b92db7
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,37 @@
+default_language_version:
+ python: python3
+exclude: 'dotnet'
+ci:
+ autofix_prs: true
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
+ autoupdate_schedule: 'quarterly'
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-ast
+ - id: check-yaml
+ - id: check-toml
+ - id: check-json
+ - id: check-byte-order-marker
+ exclude: .gitignore
+ - id: check-merge-conflict
+ - id: detect-private-key
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: no-commit-to-branch
+ - repo: https://github.com/pycqa/isort
+ rev: 5.13.2
+ hooks:
+ - id: isort
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
+ exclude: minference/ops|minference/modules/minference\_forward\.py
+ - repo: https://github.com/nbQA-dev/nbQA
+ rev: 1.7.1
+ hooks:
+ - id: nbqa-black
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..cb143dc
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,20 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+.PHONY: install style test
+
+PYTHON := python
+CHECK_DIRS := minference tests experiments examples
+EXELUDE_DIRS := minference/ops
+FORCE_EXELUDE_DIRS := minference/modules/minference_forward.py
+
+install:
+ @${PYTHON} setup.py bdist_wheel
+ @${PYTHON} -m pip install dist/minference*
+
+style:
+ black $(CHECK_DIRS) --extend-exclude ${EXELUDE_DIRS} --force-exclude ${FORCE_EXELUDE_DIRS}
+ isort -rc $(CHECK_DIRS)
+
+test:
+ @${PYTHON} -m pytest -n auto --dist=loadfile -s -v ./tests/
diff --git a/README.md b/README.md
index 5cd7cec..cb5ec8c 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,149 @@
-# Project
+
+
+
-> This repo has been populated by an initial template to help get you started. Please
-> make sure to update the content to build a great experience for community-building.
+MInference: Million-Tokens Prompt Inference for Long-context LLMs
-As the maintainer of this project, please make a few updates:
+
+ | Project Page |
+ Paper |
+ HF Demo |
+
-- Improving this README.MD file to provide a great experience
-- Updating SUPPORT.MD with content about this project's support experience
-- Understanding the security reporting process in SECURITY.MD
-- Remove this section from the README
+https://github.com/microsoft/MInference/assets/30883354/52613efc-738f-4081-8367-7123c81d6b19
+
+## News
+- š§© We will present **MInference 1.0** at the _**Microsoft Booth**_ and _**ES-FoMo**_ at ICML'24. See you in Vienna!
+
+## TL;DR
+
+**MInference 1.0** leverages the dynamic sparse nature of LLMs' attention, which exhibits some static patterns, to speed up the pre-filling for long-context LLMs. It first determines offline which sparse pattern each head belongs to, then approximates the sparse index online and dynamically computes attention with the optimal custom kernels. This approach achieves up to a **10x speedup** for pre-filling on an A100 while maintaining accuracy.
+
+- [MInference 1.0: Accelerating Pre-filling for Long-Context LLMs via Dynamic Sparse Attention](https://arxiv.org/abs/2407.) (Under Review, ES-FoMo @ ICML'24)
+ _Huiqiang Jiangā , Yucheng Liā , Chengruidong Zhangā , Qianhui Wu, Xufang Luo, Surin Ahn, Zhenhua Han, Amir H. Abdi, Dongsheng Li, Chin-Yew Lin, Yuqing Yang and Lili Qiu_
+
+
+## š„ Overview
+
+![Onepage of MInference](./images/MInference1_onepage.png)
+
+## šÆ Quick Start
+
+### Requirements
+
+- Torch
+- FlashAttention-2
+- Triton == 2.1.0
+
+To get started with MInference, simply install it using pip:
+
+```bash
+pip install minference
+```
+
+### Supported Models
+
+General *MInference* **supports any decoding LLMs**, including LLaMA-style models, and Phi models.
+We have adapted nearly all open-source long-context LLMs available in the market.
+If your model is not on the supported list, feel free to let us know in the issues, or you can follow [the guide](./experiments/) to manually generate the sparse heads config.
+
+You can get the complete list of supported LLMs by running:
+```python
+from minference import get_support_models
+get_support_models()
+```
+
+Currently, we support the following LLMs:
+- LLaMA-3: [gradientai/Llama-3-8B-Instruct-262k](https://huggingface.co/gradientai/Llama-3-8B-Instruct-262k), [gradientai/Llama-3-8B-Instruct-Gradient-1048k](https://huggingface.co/gradientai/Llama-3-8B-Instruct-Gradient-1048k), [gradientai/Llama-3-8B-Instruct-Gradient-4194k](https://huggingface.co/gradientai/Llama-3-8B-Instruct-Gradient-4194k)
+- GLM-4: [THUDM/glm-4-9b-chat-1m](https://huggingface.co/THUDM/glm-4-9b-chat-1m)
+- Yi: [01-ai/Yi-9B-200K](https://huggingface.co/01-ai/Yi-9B-200K)
+- Phi-3: [microsoft/Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct)
+- Qwen2: [Qwen/Qwen2-7B-Instruct](https://huggingface.co/Qwen/Qwen2-7B-Instruct)
+
+### How to use MInference
+
+for HF,
+```diff
+from transformers import pipeline
++from minference import MInference
+
+pipe = pipeline("text-generation", model=model_name, torch_dtype="auto", device_map="auto")
+
+# Patch MInference Module
++minference_patch = MInference("minference", model_name)
++pipe.model = minference_patch(pipe.model)
+
+pipe(prompt, max_length=10)
+```
+
+for vLLM,
+
+```diff
+from vllm import LLM, SamplingParams
++ from minference import MInference
+
+llm = LLM(model_name, max_num_seqs=1, enforce_eager=True, max_model_len=128000)
+
+# Patch MInference Module
++minference_patch = MInference("vllm", model_name)
++llm = minference_patch(llm)
+
+outputs = llm.generate(prompts, sampling_params)
+```
+
+using only the kernel,
+```python
+from minference import vertical_slash_sparse_attention, block_sparse_attention, streaming_forward
+
+attn_output = vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+attn_output = block_sparse_attention(q, k, v, topk)
+attn_output = streaming_forward(q, k, v, init_num, local_window_num)
+```
+
+For more details, please refer to our [Examples](./examples/) and [Experiments](./experiments/).
+
+## FAQ
+
+For more insights and answers, visit our [FAQ section](./Transparency_FAQ.md).
+
+**Q1: How to effectively evaluate the impact of dynamic sparse attention on the capabilities of long-context LLMs?**
+
+To evaluate long-context LLM capabilities using models like LLaMA-3-8B-Instruct-1M and GLM-4-9B-1M, we tested: 1) context window with RULER, 2) general tasks with InfiniteBench, 3) retrieval tasks with Needle in a Haystack, and 4) language model prediction with PG-19.
+We found traditional methods perform poorly in retrieval tasks, with difficulty levels as follows: KV retrieval > Needle in a Haystack > Retrieval.Number > Retrieval PassKey. The main challenge is the semantic difference between needles and the haystack. Traditional methods excel when this difference is larger, as in passkey tasks. KV retrieval requires higher retrieval capabilities since any key can be a target, and multi-needle tasks are even more complex.
+We will continue to update our results with more models and datasets in future versions.
+
+**Q2: Does this dynamic sparse attention pattern only exist in long-context LLMs that are not fully trained?**
+
+Firstly, attention is dynamically sparse, a characteristic inherent to the mechanism. We selected state-of-the-art long-context LLMs, GLM-4-9B-1M and LLaMA-3-8B-Instruct-1M, with effective context windows of 64K and 16K. With MInference, these can be extended to 64K and 32K, respectively. We will continue to adapt our method to other advanced long-context LLMs and update our results, as well as explore the theoretical basis for this dynamic sparse attention pattern.
+
+**Q3: Does this dynamic sparse attention pattern only exist in Auto-regressive LMs or RoPE based LLMs?**
+
+Similar vertical and slash line sparse patterns have been discovered in BERT[1] and multi-modal LLMs[2]. Our analysis of T5's attention patterns, shown in the figure, reveals these patterns persist across different heads, even in bidirectional attention.
+[1] SparseBERT: Rethinking the Importance Analysis in Self-Attention, ICML 2021.
+[2] LOOK-M: Look-Once Optimization in KV Cache for Efficient Multimodal Long-Context Inference, 2024.
+
+
+
+Figure 1. The sparse pattern in T5 Encoder.
+
+**Q4: What is the relationship between MInference, SSM, Linear Attention, and Sparse Attention?**
+
+All four approaches (MInference, SSM, Linear Attention, and Sparse Attention) efficiently optimize attention complexity in Transformers, each introducing inductive bias differently. The latter three require training from scratch. Recent works like Mamba-2 and Unified Implicit Attention Representation unify SSM and Linear Attention as static sparse attention, with Mamba-2 itself being a block-wise sparse method. While these approaches show potential due to sparse redundancy in attention, static sparse attention may struggle with dynamic semantic associations in complex tasks. In contrast, dynamic sparse attention is better suited for managing these relationships.
+
+## Citation
+
+If you find MInference useful or relevant to your project and research, please kindly cite our paper:
+
+```bibtex
+@article{jiang2024minference,
+ title={MInference 1.0: Accelerating Pre-filling for Long-Context LLMs via Dynamic Sparse Attention},
+ author={Jiang, Huiqiang and Li, Yucheng and Zhang, Chengruidong and Wu, Qianhui and Luo, Xufang and Ahn, Surin and Han, Zhenhua and Abdi, Amir H and Li, Dongsheng and Lin, Chin-Yew and Yang, Yuqing and Qiu, Lili},
+ journal={arXiv},
+ year={2024}
+}
+```
## Contributing
@@ -26,8 +161,8 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
## Trademarks
-This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
-trademarks or logos is subject to and must follow
+This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
+trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
diff --git a/SECURITY.md b/SECURITY.md
index b3c89ef..82db58a 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -14,7 +14,7 @@ Instead, please report them to the Microsoft Security Response Center (MSRC) at
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
-You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
diff --git a/SUPPORT.md b/SUPPORT.md
index 291d4d4..effba63 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -1,25 +1,13 @@
-# TODO: The maintainer of this repo has not yet edited this file
-
-**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
-
-- **No CSS support:** Fill out this template with information about how to file issues and get help.
-- **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps.
-- **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide.
-
-*Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
-
# Support
-## How to file issues and get help
+## How to file issues and get help
-This project uses GitHub Issues to track bugs and feature requests. Please search the existing
-issues before filing new issues to avoid duplicates. For new issues, file your bug or
+This project uses GitHub Issues to track bugs and feature requests. Please search the existing
+issues before filing new issues to avoid duplicates. For new issues, file your bug or
feature request as a new Issue.
-For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
-FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
-CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
+For help and questions about using this project, please refer the [document](./DOCUMENT.md).
-## Microsoft Support Policy
+## Microsoft Support Policy
Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
diff --git a/Transparency_FAQ.md b/Transparency_FAQ.md
new file mode 100644
index 0000000..79cc2d9
--- /dev/null
+++ b/Transparency_FAQ.md
@@ -0,0 +1,51 @@
+# MInference's Responsible AI FAQ
+
+## What is MInference?
+
+- MInference is a system designed for efficient inference of Large Language Models (LLMs) in long context scenarios. It significantly accelerates LLM inference through approximate-sparse calculation, reducing latency by 6x in the prefix stage and 8x in the decoding stage on an A100, while maintaining the same level of performance.
+
+## What can MInference do?
+
+- MInference can effectively reduce the inference cost of LLMs in long-context scenarios, with minimal impact on performance.
+
+## What are MInferenceās intended use(s)?
+
+- LLMs deployers and users.
+
+## How was MInference evaluated? What metrics are used to measure performance?
+
+- We using the long-context benchmark InfiniteBench and their evaluation metric to evaluate the MInference system.
+- We have conducted extensive testing for accuracy across various scenarios, including multi-document QA, single document QA, retrieval-based methods, few-shot learning, and reasoning. The results show almost no change in accuracy. We have also tested on TruthfulQA, a benchmark for assessing susceptibility to harmful outputs, which further demonstrates that our method does not compromise accuracy. Finally, our method does not alter the inherent capabilities of LLMs, including their potential for harm.
+
+## What are the limitations of MInference? How can users minimize the impact of MInferenceās limitations when using the system?
+
+- The potential harmful, false or biased responses using the MInference would likely be unchanged. Thus using MInference has no inherent benefits or risks when it comes to those types of responsible AI issues.
+- MInference may struggle to perform well at particularly high sparsity.
+
+## What operational factors and settings allow for effective and responsible use of MInference?
+
+- Users can set parameters such as the sparsity, sink, and local window size used for MInference. Afterward, they can get the response for the LLMs.
+
+## How to effectively evaluate the impact of dynamic sparse attention on the capabilities of long-context LLMs?
+
+To evaluate long-context LLM capabilities using models like LLaMA-3-8B-Instruct-1M and GLM-4-9B-1M, we tested: 1) context window with RULER, 2) general tasks with InfiniteBench, 3) retrieval tasks with Needle in a Haystack, and 4) language model prediction with PG-19.
+We found traditional methods perform poorly in retrieval tasks, with difficulty levels as follows: KV retrieval > Needle in a Haystack > Retrieval.Number > Retrieval PassKey. The main challenge is the semantic difference between needles and the haystack. Traditional methods excel when this difference is larger, as in passkey tasks. KV retrieval requires higher retrieval capabilities since any key can be a target, and multi-needle tasks are even more complex.
+We will continue to update our results with more models and datasets in future versions.
+
+## Does this dynamic sparse attention pattern only exist in long-context LLMs that are not fully trained?
+
+Firstly, attention is dynamically sparse, a characteristic inherent to the mechanism. We selected state-of-the-art long-context LLMs, GLM-4-9B-1M and LLaMA-3-8B-Instruct-1M, with effective context windows of 64K and 16K. With MInference, these can be extended to 64K and 32K, respectively. We will continue to adapt our method to other advanced long-context LLMs and update our results, as well as explore the theoretical basis for this dynamic sparse attention pattern.
+
+## Does this dynamic sparse attention pattern only exist in Auto-regressive LMs or RoPE based LLMs?
+
+Similar vertical and slash line sparse patterns have been discovered in BERT[1] and multi-modal LLMs[2]. Our analysis of T5's attention patterns, shown in the figure, reveals these patterns persist across different heads, even in bidirectional attention.
+[1] SparseBERT: Rethinking the Importance Analysis in Self-Attention, ICML 2021.
+[2] LOOK-M: Look-Once Optimization in KV Cache for Efficient Multimodal Long-Context Inference, 2024.
+
+
+
+Figure 1. The sparse pattern in T5 Encoder.
+
+## What is the relationship between MInference, SSM, Linear Attention, and Sparse Attention?
+
+All four approaches (MInference, SSM, Linear Attention, and Sparse Attention) efficiently optimize attention complexity in Transformers, each introducing inductive bias differently. The latter three require training from scratch. Recent works like Mamba-2 and Unified Implicit Attention Representation unify SSM and Linear Attention as static sparse attention, with Mamba-2 itself being a block-wise sparse method. While these approaches show potential due to sparse redundancy in attention, static sparse attention may struggle with dynamic semantic associations in complex tasks. In contrast, dynamic sparse attention is better suited for managing these relationships.
diff --git a/csrc/kernels.cpp b/csrc/kernels.cpp
new file mode 100644
index 0000000..53da5b1
--- /dev/null
+++ b/csrc/kernels.cpp
@@ -0,0 +1,18 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+#include
+#include "torch/extension.h"
+
+std::vector convert_vertical_slash_indexes(
+ torch::Tensor seqlens, // [BATCH, ]
+ torch::Tensor vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
+ torch::Tensor slash_indexes, // [BATCH, N_HEADS, NNZ_S]
+ int context_size,
+ int block_size_M,
+ int block_size_N
+);
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("convert_vertical_slash_indexes", &convert_vertical_slash_indexes, "dynamic sparse index function");
+}
diff --git a/csrc/vertical_slash_index.cu b/csrc/vertical_slash_index.cu
new file mode 100644
index 0000000..45af042
--- /dev/null
+++ b/csrc/vertical_slash_index.cu
@@ -0,0 +1,167 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+// __device__ int min(int x, int y) {
+// return x < y ? x : y;
+// }
+
+// __device__ int max(int x, int y) {
+// return x > y ? x : y;
+// }
+
+__device__ void save_blocks(int* block_offset, int range_start, int range_end, int block_size, int& block_count) {
+ for (int idx = range_start; idx < range_end; idx += block_size) {
+ block_offset[block_count++] = idx;
+ }
+}
+
+__global__ void convert_vertical_slash_indexes_kernel(
+ const int* seqlens, // [BATCH, ]
+ const int* vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
+ const int* slash_indexes, // [BATCH, N_HEADS, NNZ_S]
+ int* block_count, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ int* block_offset, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_S]
+ int* column_count, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ int* column_index, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_V]
+ int N_HEADS,
+ int N_ROWS,
+ int BLOCK_SIZE_M,
+ int BLOCK_SIZE_N,
+ int NNZ_V,
+ int NNZ_S
+) {
+ const int batch_idx = blockIdx.y;
+ const int head_idx = blockIdx.x;
+ const int group_idx = blockIdx.z;
+
+ int seqlen = seqlens[batch_idx];
+ int block_idx_m = group_idx * blockDim.x + threadIdx.x;
+ int start_m = block_idx_m * BLOCK_SIZE_M;
+ if (start_m >= seqlen) {
+ return;
+ }
+ int end_m = start_m + BLOCK_SIZE_M;
+ vertical_indexes += (batch_idx * N_HEADS + head_idx) * NNZ_V;
+ slash_indexes += (batch_idx * N_HEADS + head_idx) * NNZ_S;
+ int row_offset = (batch_idx * N_HEADS + head_idx) * N_ROWS + block_idx_m;
+ block_count += row_offset;
+ block_offset += row_offset * NNZ_S;
+ column_count += row_offset;
+ column_index += row_offset * NNZ_V;
+
+ int tmp_col_cnt = 0, tmp_blk_cnt = 0;
+ int s = 0, v = 0;
+ int v_idx = vertical_indexes[v++];
+ int s_idx = slash_indexes[s++];
+ while (s_idx >= end_m) {
+ s_idx = slash_indexes[s++];
+ }
+ s_idx = max(end_m - s_idx, BLOCK_SIZE_M);
+ int range_start = s_idx - BLOCK_SIZE_M, range_end = s_idx;
+ while (1) {
+ if (v_idx < range_end) {
+ if (v_idx < range_start) {
+ column_index[tmp_col_cnt++] = v_idx;
+ }
+ if (v < NNZ_V) {
+ v_idx = vertical_indexes[v++];
+ } else {
+ v_idx = end_m + BLOCK_SIZE_M;
+ }
+ } else {
+ if (s < NNZ_S) {
+ s_idx = max(end_m - slash_indexes[s++], BLOCK_SIZE_M);
+ } else {
+ save_blocks(block_offset, range_start, range_end, BLOCK_SIZE_N, tmp_blk_cnt);
+ break;
+ }
+ if (s_idx > range_end + BLOCK_SIZE_M) {
+ save_blocks(block_offset, range_start, range_end, BLOCK_SIZE_N, tmp_blk_cnt);
+ range_start = s_idx - BLOCK_SIZE_M;
+ range_end = s_idx;
+ } else if (s_idx > range_end) {
+ range_end += BLOCK_SIZE_M;
+ }
+ }
+ }
+
+ block_count[0] = tmp_blk_cnt;
+ column_count[0] = tmp_col_cnt;
+}
+
+void convert_vertical_slash_indexes_64x64(
+ const int* seqlens, // [BATCH, ]
+ const int* vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
+ const int* slash_indexes, // [BATCH, N_HEADS, NNZ_S]
+ int* block_count, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ int* block_offset, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_S]
+ int* column_count, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ int* column_index, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_V]
+ int BATCH_SIZE,
+ int N_HEADS,
+ int N_ROWS,
+ int NNZ_V,
+ int NNZ_S
+) {
+ const int BLOCK_SIZE_M = 64;
+ const int BLOCK_SIZE_N = 64;
+ const int N_THREADS = 64;
+ const dim3 dimBlock(N_THREADS);
+ const dim3 dimGrid(N_HEADS, BATCH_SIZE, (N_ROWS + N_THREADS - 1) / N_THREADS);
+ convert_vertical_slash_indexes_kernel<<>>(
+ seqlens, vertical_indexes, slash_indexes,
+ block_count, block_offset, column_count, column_index,
+ N_HEADS, N_ROWS, BLOCK_SIZE_M, BLOCK_SIZE_N, NNZ_V, NNZ_S
+ );
+}
+
+std::vector convert_vertical_slash_indexes(
+ torch::Tensor seqlens, // [BATCH, ]
+ torch::Tensor vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
+ torch::Tensor slash_indexes, // [BATCH, N_HEADS, NNZ_S]
+ int context_size,
+ int block_size_M,
+ int block_size_N
+) {
+ assert(block_size_M == 64);
+ assert(block_size_N == 64);
+
+ cudaSetDevice(seqlens.get_device());
+
+ int batch_size = slash_indexes.size(0);
+ int num_heads = slash_indexes.size(1);
+ int nnz_slash = slash_indexes.size(2);
+ int nnz_vertical = vertical_indexes.size(2);
+ int num_rows = (context_size + block_size_M - 1) / block_size_M;
+
+ torch::Tensor block_count = torch::zeros({batch_size, num_heads, num_rows}, seqlens.options());
+ torch::Tensor block_offset = torch::zeros({batch_size, num_heads, num_rows, nnz_slash}, seqlens.options());
+ torch::Tensor column_count = torch::zeros({batch_size, num_heads, num_rows}, seqlens.options());
+ torch::Tensor column_index = torch::zeros({batch_size, num_heads, num_rows, nnz_vertical}, seqlens.options());
+
+ convert_vertical_slash_indexes_64x64(
+ seqlens.data_ptr(),
+ vertical_indexes.data_ptr(),
+ slash_indexes.data_ptr(),
+ block_count.data_ptr(),
+ block_offset.data_ptr(),
+ column_count.data_ptr(),
+ column_index.data_ptr(),
+ batch_size,
+ num_heads,
+ num_rows,
+ nnz_vertical,
+ nnz_slash
+ );
+
+ return { block_count, block_offset, column_count, column_index };
+}
diff --git a/examples/run_hf.py b/examples/run_hf.py
new file mode 100644
index 0000000..19bf71f
--- /dev/null
+++ b/examples/run_hf.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from minference import MInference
+
+prompt = "Hello, my name is"
+
+model_name = "gradientai/Llama-3-8B-Instruct-262k"
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map="cuda",
+)
+
+# Patch MInference Module
+minference_patch = MInference("minference", model_name)
+model = minference_patch(model)
+
+batch_inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
+outputs = model.generate(**batch_inputs, max_length=10)
+generated_text = tokenizer.decode(
+ outputs[0][batch_inputs["input_ids"].shape[1] :], skip_special_tokens=True
+)
+print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
diff --git a/examples/run_hf_streaming.py b/examples/run_hf_streaming.py
new file mode 100644
index 0000000..18d91e8
--- /dev/null
+++ b/examples/run_hf_streaming.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import warnings
+
+warnings.filterwarnings("ignore")
+
+import argparse
+import json
+import os
+import re
+import sys
+import time
+
+import torch
+from tqdm import tqdm
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from minference import MInference
+
+
+@torch.no_grad()
+def greedy_generate(
+ model, tokenizer, input_ids, past_key_values, max_gen_len, attn_type
+):
+ torch.cuda.synchronize()
+ st = time.time()
+ outputs = model(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ use_cache=True,
+ )
+ torch.cuda.synchronize()
+ print(
+ f"\033[1;31m Attention Type: {attn_type}, TTFT: {time.time() - st:.2f}s\033[0m\n"
+ )
+
+ past_key_values = outputs.past_key_values
+ pred_token_idx = outputs.logits[:, -1, :].argmax(dim=-1).unsqueeze(1)
+ generated_ids = [pred_token_idx.item()]
+ pos = 0
+ for _ in range(max_gen_len - 1):
+ outputs = model(
+ input_ids=pred_token_idx,
+ past_key_values=past_key_values,
+ use_cache=True,
+ )
+
+ past_key_values = outputs.past_key_values
+ pred_token_idx = outputs.logits[:, -1, :].argmax(dim=-1).unsqueeze(1)
+ generated_ids.append(pred_token_idx.item())
+ generated_text = (
+ tokenizer.decode(
+ generated_ids,
+ skip_special_tokens=True,
+ clean_up_tokenization_spaces=True,
+ spaces_between_special_tokens=False,
+ )
+ .strip()
+ .split(" ")
+ )
+
+ now = len(generated_text) - 1
+ if now > pos:
+ print(" ".join(generated_text[pos:now]), end=" ", flush=True)
+ pos = now
+
+ if pred_token_idx == tokenizer.eos_token_id:
+ break
+ print(" ".join(generated_text[pos:]), flush=True)
+ return past_key_values
+
+
+@torch.no_grad()
+def streaming_inference(
+ model, tokenizer, prompts, kv_cache=None, max_gen_len=500, attn_type=None
+):
+ past_key_values = None
+ for idx, prompt in enumerate(prompts):
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
+ input_ids = input_ids.to(model.device)
+ seq_len = input_ids.shape[1]
+
+ if seq_len > 5000:
+ print(
+ "\n" + prompt[:2500] + prompt[-2500:] + "\n\nASSISTANT: ",
+ end="",
+ )
+ else:
+ print("\n" + prompt + "\n\nASSISTANT: ", end="")
+ print(seq_len)
+
+ past_key_values = greedy_generate(
+ model,
+ tokenizer,
+ input_ids,
+ past_key_values,
+ max_gen_len=max_gen_len,
+ attn_type=attn_type,
+ )
+
+
+def main(args):
+ model_name_or_path = args.model_name_or_path
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_name_or_path,
+ torch_dtype="auto",
+ device_map="cuda",
+ )
+
+ # Patch MInference Module
+ minference_patch = MInference(
+ args.attn_type if args.attn_type != "hf" else "minference_with_dense",
+ model_name_or_path,
+ kv_cache_cpu=True,
+ )
+ model = minference_patch(model)
+
+ prompts = [
+ "Summarization the following book."
+ + "\n\n"
+ + open("data/pg2600.txt").read()
+ + "\n\nSummarization:"
+ ]
+
+ kv_cache = None
+
+ streaming_inference(
+ model,
+ tokenizer,
+ prompts,
+ kv_cache,
+ attn_type=args.attn_type,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
+ )
+ parser.add_argument("--data_root", type=str, default="data/")
+ parser.add_argument(
+ "--attn_type",
+ type=str,
+ required=True,
+ choices=[
+ "vllm",
+ "hf",
+ "streaming",
+ "minference",
+ "inf_llm",
+ "minference_with_dense",
+ ],
+ )
+ args = parser.parse_args()
+
+ main(args)
diff --git a/examples/run_hf_streaming.sh b/examples/run_hf_streaming.sh
new file mode 100644
index 0000000..ffeb992
--- /dev/null
+++ b/examples/run_hf_streaming.sh
@@ -0,0 +1,7 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+# Load data
+wget https://gutenberg.org/cache/epub/2600/pg2600.txt -O ./data/pg2600.txt
+
+python examples/run_hf_streaming.py --attn_type minference
diff --git a/examples/run_vllm.py b/examples/run_vllm.py
new file mode 100644
index 0000000..6084be3
--- /dev/null
+++ b/examples/run_vllm.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from vllm import LLM, SamplingParams
+
+from minference import MInference
+
+prompts = [
+ "Hello, my name is",
+ "The president of the United States is",
+ "The capital of France is",
+ "The future of AI is",
+]
+
+sampling_params = SamplingParams(
+ temperature=0.8,
+ top_p=0.95,
+ max_tokens=10,
+)
+model_name = "gradientai/Llama-3-8B-Instruct-262k"
+llm = LLM(
+ model_name,
+ max_num_seqs=1,
+ enforce_eager=True,
+ max_model_len=128000,
+)
+
+# Patch MInference Module
+minference_patch = MInference("vllm", model_name)
+llm = minference_patch(llm)
+
+outputs = llm.generate(prompts, sampling_params)
+
+# Print the outputs.
+for output in outputs:
+ prompt = output.prompt
+ generated_text = output.outputs[0].text
+ print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
diff --git a/experiments/README.md b/experiments/README.md
new file mode 100644
index 0000000..e022bb5
--- /dev/null
+++ b/experiments/README.md
@@ -0,0 +1,182 @@
+# Experimemts
+
+- [Offline Kernel-Aware Sparse Pattern Search](#Offline-Kernel-Aware-Sparse-Pattern-Search)
+- [MInference Benchmark Experiments](#MInference-Benchmark-Experiments)
+ - [End-to-End Benchmark](#End-to-End-Benchmark)
+ - [Micro-Benchmark](#Micro-Benchmark)
+- [MInference Downstream Tasks Experiments](#MInference-Downstream-Tasks-Experiments)
+ - [InfiniteBench](#InfiniteBench)
+ - [RULER](#RULER)
+ - [PPL](#PPL)
+ - [Needle in A Haystack](#Needle-in-A-Haystack)
+
+## Offline Kernel-Aware Sparse Pattern Search
+
+You can use the following scripts to search for the optimal head sparse pattern:
+
+```
+cd experiments/infinite_bench
+python run_infinitebench.py \
+ --task kv_retrieval \
+ --model_name_or_path gradientai/Llama-3-8B-Instruct-262k \
+ --data_dir ./data \
+ --output_dir ./results \
+ --max_seq_length 30000 \
+ --rewrite \
+ --is_search \
+ --start_example_id 3 \
+ --topk_dims_file_path Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json \
+ --num_eval_examples 20 --topk 1 --starting_layer 0 --attn_type minference
+```
+
+## MInference Benchmark Experiments
+
+> [!NOTE]
+> All experiments were run on a single A100 GPU with 80GB of VRAM.
+
+Environment parameters:
+- CUDA 12.3
+- Triton 2.1.0
+- PyCuda 2023.1
+
+### End-to-End Benchmark
+
+To demonstrate the efficiency of our method, we conducted end-to-end latency tests using the [LLaMA-3-8B-Instruct-1M](https://huggingface.co/gradientai/Llama-3-8B-Instruct-Gradient-1048k) model. The prompts were trimmed to different target token numbers, and we measured the pre-filling stage latency without using KV cache.
+
+1. Download the prompt:
+
+```bash
+wget https://raw.githubusercontent.com/FranxYao/chain-of-thought-hub/main/gsm8k/lib_prompt/prompt_hardest.txt
+```
+
+2. Run a single context window size test using one method:
+
+```bash
+python experiments/benchmarks/benchmark_e2e.py --attn_type hf --context_window 1000000
+```
+
+3. Run all latency experiments using different methods:
+
+```bash
+python experiments/benchmarks/benchmark_e2e.py --run_benchmark
+```
+
+4. After that, you should get the end-to-end latency results like this:
+
+```json
+ FlashAttention-2 StreamingLLM InfLLM MInference
+1K 0.54565 1.07110 2.94495 2.96450
+10K 0.97590 1.18339 2.21052 2.77618
+50K 8.52933 5.47972 14.63624 7.54537
+100K 24.88319 10.86379 27.67215 13.98508
+200K 79.39184 21.61490 55.64703 26.81303
+300K 169.62441 32.44844 80.74326 41.09374
+500K 456.78353 54.15910 167.91472 66.27691
+1000K 1765.56387 107.85639 328.58551 179.12031
+```
+
+> [!TIP]
+> Based on our tests, **a single A100 can support up to 1.8M** context prompts during the pre-filling stage using LLaMA-3-8B-4M with **bf16**.
+
+### Micro-Benchmark
+
+
+## MInference Downstream Tasks Experiments
+
+> [!NOTE]
+> All of these experiments were run on one A100 GPUs with 80GB of VRAM. You may need to modify commands to fit your own computing environment (e.g., changing the batch size, the max memory per GPU, the number of GPUs, etc)
+
+### InfiniteBench
+
+[InfiniteBench](https://github.com/OpenBMB/InfiniteBench) is a benchmark tailored for evaluating the capabilities of language models to process, understand, and reason over super long contexts.
+
+InfiniteBench consists of the following tasks: `kv_retrieval`, `longbook_choice_eng`, `math_find`, `longbook_qa_chn`, `longbook_qa_eng`, `longdialogue_qa_eng`, `code_debug`, `longbook_sum_eng`, `number_string`, `passkey`.
+
+1. Run InfiniteBench with `MInference`:
+
+```bash
+bash experiments/infinite_bench/run_infinitebench.sh gradientai/Llama-3-8B-Instruct-262k 160000 -1 minference
+```
+
+2. Experimental results
+
+| Methods | longbook_sum_eng | longbook_qa_eng | longbook_choice_eng | longdialogue_qa_eng | longbook_qa_chn | code_debug | math_find | passkey | number_string | kv_retrieval | Avg. |
+|---------------|------------------|-----------------|---------------------|---------------------|-----------------|------------|-----------|---------|---------------|--------------|------|
+| Full Attention| 20.2 | 12.4 | 67.3 | 6.0 | 12.9 | 22.1 | 26.6 | 100.0 | 100.0 | 14.4 | 38.2 |
+| Ours | **20.5** | **12.9** | 65.9 | **7.5** | 12.5 | **22.3** | **33.1** | 100.0 | 100.0 | 12.8 | **38.8** |
+
+### RULER
+
+The [RULER](https://github.com/hsiehjackson/RULER) benchmark is a challenging long-context LLMs benchmark contains four task categories: Retrieval, Multi-hop Tracing, Aggregation, and Question Answering (QA). The retrieval tasks extend the needle-in-a-haystack (NIAH) test; Multi-hop Tracing involves resolving coreference chains; Aggregation requires identifying frequently occurring words; and QA adapts existing datasets with added distracting information to test models on extended contexts.
+
+To run the RULER benchmark, you need first install the requirements:
+
+```bash
+pip install Cython
+pip install nemo-toolkit[all]==1.21 --no-deps
+pip install -r experiments/ruler/requirements.txt
+```
+
+1. Download required data files:
+
+```bash
+cd experiments/ruler/data/synthetic/json/
+
+# download Paul Graham Essays for the needle test
+python download_paulgraham_essay.py
+
+# download SQuAD and HotpotQA for the QA test
+bash download_qa_dataset.sh
+
+# you will need nltk.download('punkt') as well
+python -c "import nltk; nltk.download('punkt')"
+```
+
+2. Run RULER with `MInference` and `Llama-3-8B-Instruct-262k`:
+
+```bash
+# under the experiments/ruler/ directory
+bash run.sh
+```
+
+The default output dir `results/ruler`, you use `ROOT_DIR` in `run.sh` to change the output dir.
+
+3. Experimental results
+
+| Models | Claimed | Effective | 4K | 8K | 16K | 32K | 64K | 128K | Avg. |
+|--------------|---------|-----------|-----|-----|-----|-----|-----|------|------|
+| Full-Attention | 262K | 16K | 97.2| 91.8| 87.3| 80.8| 77.4| 72.2 | 84.4 |
+| Ours | - | 32K | **97.7**| 91.2| **88.5**| **85.0** | **82.3** | **77.6** | **87.0** |
+
+
+### PPL
+
+We use continues 100K text chunks samples from [PG-19](https://huggingface.co/datasets/deepmind/pg19) for the perplexity evaluation.
+
+1. To run the PPL test, simply run:
+```bash
+bash experiments/ppl/run_ppl.sh
+```
+
+The result will be saved at `results/long-ppl/`, and the visualization will be saved as `results/long-ppl/long-ppl-viz.png`.
+
+2. Experimental results
+
+![PPL](../images/benchmarks/ppl-LLaMA-3-262k.png)
+
+
+### Needle in A Haystack
+
+The [Needle In A Haystack test](https://github.com/gkamradt/LLMTest_NeedleInAHaystack) is an evaluation method that randomly inserts key information into long texts to form prompts for large language models (LLMs). The test aims to detect whether large models can extract such key information from extensive texts, thereby assessing the modelsā capabilities in processing and understanding long documents.
+
+1. Run the Needle in A Haystack test:
+
+```bash
+bash experiments/needle_in_a_haystack/run_needle.sh
+```
+
+The results will be saved under `./needle` directory.
+
+2. Our experimental results
+
+![needle](../images/benchmarks/needle_viz_LLaMA-3-8B-1M_ours_1K_1000K.png)
diff --git a/experiments/benchmarks/benchmark_e2e.py b/experiments/benchmarks/benchmark_e2e.py
new file mode 100644
index 0000000..0df0308
--- /dev/null
+++ b/experiments/benchmarks/benchmark_e2e.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import argparse
+import sys
+import time
+from collections import defaultdict
+
+import torch
+from transformers import (
+ AutoConfig,
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ GenerationConfig,
+ LlamaForCausalLM,
+)
+
+from minference import MInference
+
+
+def run_target_length(m: int, model, attn_type):
+ # wget https://raw.githubusercontent.com/FranxYao/chain-of-thought-hub/main/gsm8k/lib_prompt/prompt_hardest.txt
+ prompt_complex = open("./prompt_hardest.txt").read()
+ input_ids = tokenizer(prompt_complex)["input_ids"]
+ n = len(input_ids)
+ b = m // n + 1
+
+ new_input_ids = (input_ids * b)[:m]
+ prompt = tokenizer.decode(new_input_ids)
+ data = tokenizer(prompt, return_tensors="pt")
+ input_ids = data["input_ids"].cuda()
+ attention_mask = data["attention_mask"].cuda()
+ s = 0
+ T = 10
+ for _ in range(T):
+ torch.cuda.synchronize()
+ start = time.time()
+ with torch.no_grad():
+ if attn_type != "inf_llm":
+ model(input_ids, attention_mask, use_cache=False)
+ else:
+ model.generate(
+ input_ids, generation_config=GenerationConfig(max_new_tokens=1)
+ )
+ torch.cuda.synchronize()
+ s += time.time() - start
+ print(m, s / T)
+ return s / T
+
+
+def run_benchmark(model_name: str):
+ TARGET_LENS = [l * 1000 for l in [10, 50, 100, 200, 300, 500, 1000]]
+ ATTN_TYPES = ["minference_with_dense", "streaming", "minference"]
+ ATTN_TYPES2NAME = {
+ "minference_with_dense": "FlashAttention-2",
+ "streaming": "StreamingLLM",
+ "inf_llm": "InfLLM",
+ "minference": "MInference",
+ }
+ latency = defaultdict(list)
+
+ for attn_type in ATTN_TYPES:
+ model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map="auto",
+ )
+ attn_kwargs = {} if args.attn_type != "inf_llm" else {"dense_decoding": False}
+ if attn_type != "hf":
+ minference_patch = MInference(
+ attn_type, model_name, attn_kwargs=attn_kwargs
+ )
+ model = minference_patch(model)
+ for l in TARGET_LENS:
+ if l >= 700000 and attn_type not in ["inf_llm", "hf"]:
+ minference_patch = MInference(attn_type, model_name, kv_cache_cpu=True)
+ model = minference_patch(model)
+
+ t = run_target_length(l, model, attn_type)
+ latency[ATTN_TYPES2NAME[attn_type]].append([l, f"{t:.5f}"])
+ print(attn_type, t, l)
+ torch.cuda.empty_cache()
+
+ res = [[""] + [ATTN_TYPES2NAME[attn_type] for attn_type in ATTN_TYPES]]
+ for idx in range(len(TARGET_LENS)):
+ l = TARGET_LENS[idx]
+ res.append(
+ [f"{l//1000}K"]
+ + [latency[ATTN_TYPES2NAME[attn_type]][idx][-1] for attn_type in ATTN_TYPES]
+ )
+ print("\n".join(["\t".join(ii) for ii in res]))
+ with open("res.csv", "w") as f:
+ f.write("\n".join(["\t".join(ii) for ii in res]))
+ return res
+
+
+if __name__ == "__main__":
+ args = argparse.ArgumentParser()
+ args.add_argument(
+ "--model_name",
+ type=str,
+ default="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
+ )
+ args.add_argument(
+ "--attn_type",
+ type=str,
+ choices=[
+ "hf",
+ "streaming",
+ "minference",
+ "inf_llm",
+ ],
+ )
+ args.add_argument("--context_window", type=int, default=100000)
+ args.add_argument("--run_benchmark", action="store_true")
+ args.add_argument("--kv_cache_cpu", action="store_true")
+ args.add_argument("--trust_remote_code", action="store_true")
+ args = args.parse_args()
+
+ model_name = args.model_name
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_name, trust_remote_code=args.trust_remote_code
+ )
+ if args.run_benchmark:
+ run_benchmark(model_name)
+ else:
+ model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map="auto",
+ trust_remote_code=args.trust_remote_code,
+ )
+ attn_kwargs = {} if args.attn_type != "inf_llm" else {"dense_decoding": False}
+ if args.attn_type != "hf":
+ minference_patch = MInference(
+ args.attn_type,
+ model_name,
+ kv_cache_cpu=args.kv_cache_cpu,
+ attn_kwargs=attn_kwargs,
+ )
+ model = minference_patch.patch_model(model)
+
+ run_target_length(args.context_window, model, args.attn_type)
diff --git a/experiments/benchmarks/run_e2e.sh b/experiments/benchmarks/run_e2e.sh
new file mode 100644
index 0000000..42a899e
--- /dev/null
+++ b/experiments/benchmarks/run_e2e.sh
@@ -0,0 +1,9 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+# Load data
+wget https://raw.githubusercontent.com/FranxYao/chain-of-thought-hub/main/gsm8k/lib_prompt/prompt_hardest.txt
+
+python experiments/benchmarks/benchmark_e2e.py \
+ --attn_type minference \
+ --context_window 300000
diff --git a/experiments/infinite_bench/args.py b/experiments/infinite_bench/args.py
new file mode 100644
index 0000000..9ef2e99
--- /dev/null
+++ b/experiments/infinite_bench/args.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from argparse import ArgumentParser, Namespace
+
+from eval_utils import DATA_NAME_TO_MAX_NEW_TOKENS
+
+
+def parse_args() -> Namespace:
+ p = ArgumentParser()
+ p.add_argument(
+ "--task",
+ type=str,
+ # choices=list(DATA_NAME_TO_MAX_NEW_TOKENS.keys()) + ["all"],
+ required=True,
+ help='Which task to use. Note that "all" can only be used in `compute_scores.py`.', # noqa
+ )
+ p.add_argument(
+ "--data_dir", type=str, default="../data", help="The directory of data."
+ )
+ p.add_argument(
+ "--output_dir",
+ type=str,
+ default="../results",
+ help="Where to dump the prediction results.",
+ ) # noqa
+ p.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="facebook/opt-350m",
+ help="For `compute_scores.py` only, specify which model you want to compute the score for.", # noqa
+ )
+ p.add_argument(
+ "--num_eval_examples",
+ type=int,
+ default=-1,
+ help="The number of test examples to use, use all examples in default.",
+ ) # noqa
+ p.add_argument(
+ "--start_idx",
+ type=int,
+ default=0,
+ help="The index of the first example to infer on. This is used if you want to evaluate on a (contiguous) subset of the data.",
+ ) # noqa
+ p.add_argument(
+ "--stop_idx",
+ type=int,
+ help="The index of the last example to infer on. This is used if you want to evaluate on a (contiguous) subset of the data. Defaults to the length of dataset.",
+ ) # noqa
+ p.add_argument("--verbose", action="store_true")
+ p.add_argument("--use_sparq", action="store_true")
+ p.add_argument("--device", type=str, default="cuda")
+ p.add_argument("--max_seq_length", type=int, default=100000)
+ p.add_argument("--rewrite", action="store_true")
+ p.add_argument("--topk", type=int, default=-1)
+ p.add_argument("--starting_layer", type=int, default=-1)
+ p.add_argument("--start_example_id", type=int, default=0)
+ p.add_argument("--topk_dims_file_path", type=str, default=None)
+ p.add_argument("--kv_cache_cpu", action="store_true")
+ p.add_argument("--kv_cache_cpu_device", type=str, default="cpu")
+ p.add_argument("--use_snapkv", action="store_true")
+ p.add_argument("--trust_remote_code", action="store_true")
+ p.add_argument(
+ "--attn_type",
+ type=str,
+ choices=[
+ "vllm",
+ "hf",
+ "streaming",
+ "inf_llm",
+ "flash_attn",
+ "minference",
+ "minference_with_dense",
+ "dilated1",
+ "dilated2",
+ ],
+ default="hf",
+ )
+ p.add_argument("--is_search", action="store_true")
+ return p.parse_args()
diff --git a/experiments/infinite_bench/compute_scores.py b/experiments/infinite_bench/compute_scores.py
new file mode 100644
index 0000000..a0a2941
--- /dev/null
+++ b/experiments/infinite_bench/compute_scores.py
@@ -0,0 +1,446 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from __future__ import annotations
+
+import json
+import os
+import re
+import string
+from collections import Counter
+from pathlib import Path
+
+import evaluate
+from args import parse_args
+from tqdm import tqdm
+
+ROUGE_SCORER = evaluate.load("rouge")
+
+
+def normalize_answer(s: str) -> str:
+ """Lower text and remove punctuation, articles and extra whitespace."""
+
+ def remove_articles(text):
+ return re.sub(r"\b(a|an|the)\b", " ", text)
+
+ def white_space_fix(text):
+ return " ".join(text.split())
+
+ def remove_punc(text):
+ exclude = set(string.punctuation)
+ return "".join(ch for ch in text if ch not in exclude)
+
+ def lower(text):
+ return text.lower()
+
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
+
+
+def normalize_zh_answer(s: str) -> str:
+ """Chinese version. Lower text and remove punctuation, extra whitespace."""
+
+ def white_space_fix(text):
+ return "".join(text.split())
+
+ def remove_punc(text):
+ cn_punctuation = "ļ¼ļ¼ļ½”ćļ¼ļ¼ļ¼ļ¼
ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ ļ¼»ļ¼¼ļ¼½ļ¼¾ļ¼æļ½ļ½ļ½ļ½ļ½ļ½ļ½ ļ½¢ļ½£ļ½¤ćććććććććććććććććććććć°ć¾ćæāāāāāāāāāā¦ā§ļ¹." # noqa
+ all_punctuation = set(string.punctuation + cn_punctuation)
+ return "".join(ch for ch in text if ch not in all_punctuation)
+
+ def lower(text):
+ return text.lower()
+
+ return white_space_fix(remove_punc(lower(s)))
+
+
+def f1_score(prediction, ground_truth) -> tuple[float, float, float]:
+ common = Counter(prediction) & Counter(ground_truth)
+ num_same = sum(common.values())
+ if num_same == 0:
+ return 0, 0, 0
+ precision = 1.0 * num_same / len(prediction)
+ recall = 1.0 * num_same / len(ground_truth)
+ f1 = (2 * precision * recall) / (precision + recall)
+ return f1, precision, recall
+
+
+def qa_f1_score(pred: str, ground_truths) -> float:
+ """Computes the F1, recall, and precision."""
+ f1 = 0
+ prec = 0
+ recall = 0
+ for ground_truth in ground_truths:
+ normalized_prediction = normalize_answer(pred)
+ normalized_ground_truth = normalize_answer(ground_truth)
+
+ prediction_tokens = normalized_prediction.split()
+ ground_truth_tokens = normalized_ground_truth.split()
+ scores = f1_score(prediction_tokens, ground_truth_tokens)
+ this_f1, this_prec, this_recall = scores
+ f1 = max(f1, this_f1)
+ prec = max(prec, this_prec)
+ recall = max(recall, this_recall)
+ return f1
+
+
+def qa_f1_score_zh(pred: str, ground_truths: list[str]) -> float:
+ """
+ QA F1 score for chinese.
+ """
+ f1 = 0
+ prec = 0
+ recall = 0
+ for ground_truth in ground_truths:
+ norm_pred = normalize_zh_answer(pred)
+ norm_label = normalize_zh_answer(ground_truth)
+
+ # One character one token.
+ pred_tokens = list(norm_pred)
+ label_tokens = list(norm_label)
+ scores = f1_score(pred_tokens, label_tokens)
+ this_f1, this_prec, this_recall = scores
+ f1 = max(f1, this_f1)
+ prec = max(prec, this_prec)
+ recall = max(recall, this_recall)
+ return f1
+
+
+def load_json(fname):
+ return json.load(open(fname))
+
+
+def iter_jsonl(fname, cnt=None):
+ i = 0
+ with open(fname, "r", encoding="utf8") as fin:
+ for line in fin:
+ if line.strip() == "": # Skip empty lines
+ continue
+ if i == cnt:
+ break
+ if line.strip() == "": # Skip empty lines
+ continue
+ yield json.loads(line)
+ i += 1
+
+
+def first_int_match(prediction):
+ pred_list = re.split("[^0-9]", prediction)
+ pred_value = ""
+ for item in pred_list:
+ if item != "":
+ pred_value = item
+ break
+ return pred_value
+
+
+def split_retrieval_answer(pred: str):
+ for c in ["\n", ":", '"', "'", ".", ",", "?", "!", "{", "}"]:
+ pred = pred.replace(c, " ")
+ words = pred.split()
+ return words
+
+
+def get_score_one_kv_retrieval(pred, label, model_name: str) -> bool:
+ for c in ["\n", ":", '"', "'", ".", ",", "?", "!", "{", "}", "", "The", "To"]:
+ pred = pred.replace(c, " ")
+ words = pred.split()
+ return label in words
+
+
+def get_score_one_passkey(pred, label, model_name: str) -> bool:
+ if isinstance(label, list):
+ label = label[0]
+ return label == first_int_match(pred)
+
+
+def get_score_one_number_string(pred, label, model_name: str) -> bool:
+ if isinstance(label, list):
+ label = label[0]
+ return label == first_int_match(pred)
+
+
+def get_score_one_code_run(pred, label, model_name: str) -> bool:
+ """
+ Returns the score of one example in Code.Run.
+ """
+ if isinstance(label, list):
+ label = label[0]
+ pred = pred.strip()
+ for c in ["\n", ".", "`", "'", '"', ":"]:
+ pred = pred.replace(c, " ")
+ words = pred.split()
+ if len(words) == 0:
+ return False
+ try:
+ pred = int(words[-1])
+ return label == pred
+ except Exception:
+ return False
+
+
+def get_score_one_code_debug(pred, label, model_name: str) -> bool:
+ """
+ Returns the score of one example in Code.Debug.
+ """
+ pred = pred.strip()
+ label_c = label[1]
+ fn_name = label[0]
+ if pred[:2] in [f"{label_c}.", f"{label_c}:"]:
+ return True
+
+ ans_prefixes = [
+ "answer is:",
+ # "answer is",
+ # "error is",
+ "is:",
+ "answer:",
+ ]
+ for c in ["\n", "`", "'", '"', "-", "*", "Option", "option"]:
+ pred = pred.replace(c, " ")
+ while " " in pred:
+ pred = pred.replace(" ", " ")
+ for prefix in ans_prefixes:
+ idx = pred.find(prefix)
+ if idx == -1:
+ continue
+ # The prediction ends with this prefix
+ if len(pred) < idx + len(prefix) + 1:
+ return False
+ pred = pred[idx + len(prefix) + 1 :]
+ for s in [label_c, fn_name]:
+ if pred.startswith(s):
+ return True
+ return False
+ return False
+
+
+def get_score_one_math_find(pred, label, model_name: str) -> bool:
+ if isinstance(label, list):
+ # In math_find, there is always only one label.
+ label = label[0]
+ if isinstance(label, int):
+ # Find first int or float
+ first_num = re.search(r"\d+\.\d+|\d+", pred)
+ if first_num is None:
+ return False
+ first_num = first_num.group(0).strip()
+ return int(float(first_num)) == label
+ elif isinstance(label, float):
+ # Find first float or int
+ first_float = re.search(r"\d+\.\d+|\d+", pred)
+ if first_float is None:
+ return False
+ first_float = first_float.group(0).strip()
+ return float(first_float) == label
+ else:
+ raise TypeError(f"Expected int or float, got {type(label)}")
+
+
+def get_score_one_longdialogue_qa_eng(pred, label, model_name: str) -> bool:
+ label = label[0]
+ for c in ["\n", ":", '"', "'", ".", ",", "?", "!", "{", "}"]:
+ pred = pred.replace(c, " ")
+ words = pred.split()
+ words = [x.upper() for x in words]
+ return label in words
+
+
+def get_score_one_longbook_choice_eng(pred, label, model_name: str) -> bool:
+ # Just use the first letter as the prediction
+ pred = pred.strip()
+ if pred == "":
+ return False
+ if pred[0] in "ABCD":
+ return pred[0] in label
+ if pred in label:
+ return True
+ # Find a answer prefix
+ for c in ["\n", '"', "'", ".", ",", "?", "!", "{", "}"]:
+ pred = pred.replace(c, " ")
+ while " " in pred:
+ pred = pred.replace(" ", " ")
+ ans_prefixes = [
+ "answer is:",
+ "answer:",
+ "answer is",
+ "option is",
+ ]
+ for prefix in ans_prefixes:
+ idx = pred.find(prefix)
+ if idx == -1:
+ continue
+ # The prediction ends with this prefix
+ if len(pred) < idx + len(prefix) + 1:
+ return False
+ after_prefix = pred[idx + len(prefix) + 1 :]
+ for s in label:
+ if after_prefix.startswith(s):
+ return True
+ return False
+
+ # Finally, just find the first occurrence of A, B, C, or D.
+ words = pred.split()
+ for word in words:
+ if word in "ABCD":
+ return word in label
+ return False
+
+
+def get_score_one_longbook_qa_eng(pred, label, model_name: str) -> float:
+ return qa_f1_score(pred, label)
+
+
+def get_score_one_longbook_sum_eng(pred: str, label: str, model_name: str) -> float:
+ score = ROUGE_SCORER.compute(
+ predictions=[pred], references=[label], use_aggregator=False
+ )
+ return score["rougeLsum"][0] # type: ignore
+
+
+def get_score_one_longbook_qa_chn(pred, label, model_name: str) -> float:
+ return qa_f1_score_zh(pred, label)
+
+
+def get_score_one_math_calc(pred, label, model_name: str) -> float:
+ assert isinstance(label, list), f"Expected list, got {type(label)}"
+ # assert isinstance(pred, list), f"Expected list, got {type(pred)}"
+ pred_nums = []
+ pred_list = re.split("[^0-9]", pred)
+ for item in pred_list:
+ if item != "":
+ pred_nums.append(int(item))
+
+ # Our prompts makes GPT4 always output the first number as the first value
+ # in the predicted answer.
+ if model_name == "gpt4":
+ pred_nums = pred_nums[1:]
+
+ cnt = 0
+ for i in range(len(label)):
+ if i >= len(pred_nums):
+ break
+ if label[i] == pred_nums[i]:
+ cnt += 1
+ else:
+ break
+ return cnt / len(label)
+
+
+def get_score_one(pred: str, label: str, task_name: str, model_name: str) -> float:
+ """
+ Computes the score for one prediction.
+ Returns one float (zero and one for boolean values).
+ """
+ NAME_TO_SCORE_GETTER = {
+ # Retrieve
+ "kv_retrieval": get_score_one_kv_retrieval,
+ "kv_retrieval_prefix": get_score_one_kv_retrieval,
+ "kv_retrieval_both": get_score_one_kv_retrieval,
+ "passkey": get_score_one_passkey,
+ "number_string": get_score_one_number_string,
+ # Code
+ "code_run": get_score_one_code_run,
+ "code_debug": get_score_one_code_debug,
+ # Longbook
+ "longdialogue_qa_eng": get_score_one_longdialogue_qa_eng,
+ "longbook_qa_eng": get_score_one_longbook_qa_eng,
+ "longbook_sum_eng": get_score_one_longbook_sum_eng,
+ "longbook_choice_eng": get_score_one_longbook_choice_eng,
+ "longbook_qa_chn": get_score_one_longbook_qa_chn,
+ # Math
+ "math_find": get_score_one_math_find,
+ "math_calc": get_score_one_math_calc,
+ }
+ assert task_name in NAME_TO_SCORE_GETTER, f"Invalid task name: {task_name}"
+ score = NAME_TO_SCORE_GETTER[task_name](pred, label, model_name)
+ return float(score)
+
+
+def get_labels(preds: list) -> list[str]:
+ possible_label_keys = ["ground_truth", "label"]
+ for label_key in possible_label_keys:
+ if label_key in preds[0]:
+ return [x.get(label_key, "XXXXXXXXXX") for x in preds]
+ raise ValueError(f"Cannot find label in {preds[0]}")
+
+
+def get_preds(preds: list, data_name: str) -> list[str]:
+ pred_strings = []
+ possible_pred_keys = ["prediction", "pred"]
+ for pred in preds:
+ this_pred = "NO PREDICTION"
+ for pred_key in possible_pred_keys:
+ if pred_key in pred:
+ this_pred = pred[pred_key]
+ break
+ else:
+ raise ValueError(f"Cannot find prediction in {pred}")
+ pred_strings.append(this_pred)
+ return pred_strings
+
+
+def get_score(labels: list, preds: list, data_name: str, model_name: str) -> float:
+ """
+ Computes the average score for a task.
+ """
+ assert len(labels) == len(preds)
+ scores = []
+ for label, pred in tqdm(zip(labels, preds)):
+ score = get_score_one(pred, label, data_name, model_name)
+ scores.append(score)
+ return sum(scores) / len(scores)
+
+
+def compute_scores(preds_path, data_name: str, model_name: str, max_seq_length=-1):
+ print("Loading prediction results from", preds_path)
+ preds = list(iter_jsonl(preds_path))
+ labels = get_labels(preds)
+ preds = get_preds(preds, data_name)
+
+ acc = get_score(labels, preds, data_name, model_name)
+ print(f"===== Accuracy of {model_name} on {data_name} task is: {acc} =====")
+ pred_dir = preds_path.parent
+ save_file = pred_dir / f"{model_name}_summary.txt"
+ pred_dir2 = "results/"
+
+ os.makedirs(pred_dir2, exist_ok=True)
+ save_file2 = pred_dir2 + f"{model_name}_summary.txt"
+ with open(save_file, "a") as f:
+ if max_seq_length != -1:
+ f.write(f"{model_name},{data_name},{max_seq_length},{acc*100:.2f}\n")
+ else:
+ f.write(f"{model_name},{data_name},{acc*100:.2f}\n")
+ with open(save_file2, "a") as f:
+ if max_seq_length != -1:
+ f.write(f"{model_name},{data_name},{max_seq_length},{acc*100:.2f}\n")
+ else:
+ f.write(f"{model_name},{data_name},{acc*100:.2f}\n")
+ return acc
+
+
+ALL_TASKS = [
+ "passkey",
+ "number_string",
+ "kv_retrieval",
+ "longdialogue_qa_eng",
+ "longbook_sum_eng",
+ "longbook_choice_eng",
+ "longbook_qa_eng",
+ "longbook_qa_chn",
+ "math_find",
+ "math_calc",
+ "code_run",
+ "code_debug",
+]
+
+if __name__ == "__main__":
+ args = parse_args()
+ if args.task == "all":
+ tasks = ALL_TASKS
+ else:
+ tasks = [args.task]
+ for task in tasks:
+ result_dir = Path(args.output_dir, args.model_name)
+ preds_path = result_dir / f"preds_{task}.jsonl"
+ assert preds_path.exists(), f"Predictions not found in: {preds_path}"
+ compute_scores(preds_path, task, args.model_name)
diff --git a/experiments/infinite_bench/eval_utils.py b/experiments/infinite_bench/eval_utils.py
new file mode 100644
index 0000000..00bc7dc
--- /dev/null
+++ b/experiments/infinite_bench/eval_utils.py
@@ -0,0 +1,555 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from __future__ import annotations
+
+import json
+import os
+import re
+import string
+from collections import Counter
+from pathlib import Path
+
+import jieba
+from rouge import Rouge
+
+DATA_NAME_TO_PATH = {
+ # Retrieval tasks
+ "passkey": "passkey.jsonl",
+ "number_string": "number_string.jsonl",
+ "kv_retrieval": "kv_retrieval.jsonl",
+ # Book tasks
+ "longbook_sum_eng": "longbook_sum_eng.jsonl",
+ "longbook_choice_eng": "longbook_choice_eng.jsonl",
+ "longbook_qa_eng": "longbook_qa_eng.jsonl",
+ "longbook_qa_chn": "longbook_qa_chn.jsonl",
+ # "book_qa_eng": "longbook_eng/longbook_qa_eng.jsonl",
+ "longdialogue_qa_eng": "longdialogue_qa_eng.jsonl",
+ # Math tasks
+ "math_find": "math_find.jsonl",
+ "math_calc": "math_calc.jsonl",
+ # Code tasks
+ "code_run": "code_run.jsonl",
+ "code_debug": "code_debug.jsonl",
+}
+
+DATA_NAME_TO_MAX_NEW_TOKENS = {
+ "passkey": 6,
+ "number_string": 12,
+ "kv_retrieval": 50,
+ "longbook_sum_eng": 1200,
+ "longbook_choice_eng": 40,
+ "longbook_qa_eng": 40,
+ "longbook_qa_chn": 40,
+ "longdialogue_qa_eng": 40,
+ "math_find": 3,
+ "math_calc": 30000,
+ "code_run": 5,
+ "code_debug": 5,
+}
+
+LONGBENCH_DATA_NAME_TO_MAX_NEW_TOKENS = {
+ "narrativeqa": 512,
+ "qasper": 128,
+ "multifieldqa_en": 64,
+ "hotpotqa": 32,
+ "2wikimqa": 32,
+ "musique": 32,
+ "gov_report": 512,
+ "qmsum": 512,
+ "multi_news": 512,
+ "trec": 64,
+ "triviaqa": 32,
+ "samsum": 128,
+ "passage_count": 32,
+ "passage_retrieval_en": 32,
+ "lcc": 64,
+ "repobench-p": 64,
+}
+
+gpt4_templates = {
+ "passkey": "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.\n\n{context}\n\n{input}", # noqa
+ "number_string": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n\n{input}", # noqa
+ "kv_retrieval": "Extract the value corresponding to the specified key in the JSON object below.\n\n{context}\n\n{input}", # noqa
+ # "longbook_sum_eng": "Summarize the book below:\n\n{context}", # noqa
+ "longbook_qa_eng": "Read the book below and answer a question.\n\n{context}\n\nQuestion: {question}\n\nBe very concise.", # noqa
+ "longbook_choice_eng": "Read the book and answer the question.\n\n{context}\n\nQuestion: {question}\n\nOnly one of the following options is correct, tell me the answer using one single letter (A, B, C, or D). Don't say anything else.\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}", # noqa
+ "longbook_sum_eng": "Summarize the following book.\n\n{context}", # noqa
+ "longbook_qa_chn": "čÆ·ę ¹ę®ä»„äø书ē±åēęēé®é¢ć\n\n{context}\n\né®é¢ļ¼{question}\nčÆ·å°½éē®ēå°åēć", # noqa
+ "math_find": "{prefix}\n\n{context}\n\n{input}",
+ "math_calc": "Compute the intermediate values in the following long expression.\n\n{context}", # noqa
+ "code_run": "Following is a set of Python functions. There is a function called named {func}.\n\n{context}\n\nPlease give me the exact number of the return value of {func_call}. Be concise. Your response must end with the final returned value.", # noqa
+ "code_debug": "There is ONLY ONE function in the large project that is deliberately made to include an obvious error. Please find the function that contains the most obvious errors. I will give you four options to narrow your scope. You can inspect the options and think. Eventually, tell me the answer using one single letter (A, B, C, or D).\n\n{context}\n\nWhich funtion has deliberate error?\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}\n\nYou should first find the functions in the options. Repeat their content, inspect through code, and at last give me your answer for the function that has the deliberate and obvious error in A, B, C, or D.", # noqa
+ "longdialogue_qa_eng": 'Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is.\n\nThe dialogue:\n\n---\n\n{context}\n\n---\n\nEnd of dialogue.\n\nWhich character is most likely "$$MASK$$"? Just say the name used by the scriptwriter (before the colon marks) of one single character and nothing else.', # noqa
+}
+
+yarn_mistral_templates = {
+ "passkey": "There is an important info hidden inside a lot of irrelevant text. Find it and memorize it. I will quiz you about the important information.\n\n{context}\n\n{input}\n\nThe pass key is", # noqa
+ "number_string": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n\n{input}\n\nThe sequence of digits is", # noqa
+ "kv_retrieval": "Extract the value corresponding to the specified key in the JSON object below.\n\n{context}\n\n{input}", # noqa
+ "longbook_sum_eng": "Summarize the book below.\n\n{context}\n\nSummary:", # noqa
+ "longbook_choice_eng": "Read the book and answer the question.\n\n{context}\n\nQuestion: {question}\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}\n\nThe letter of the correct answer is", # noqa
+ "longbook_qa_eng": "Read the book and answer the question. Be very concise in your answer.\n\n{context}\n\nQuestion: {question}\nAnswer:", # noqa
+ "longbook_qa_chn": "é
čƻ仄äø书ē±ē¶ååēé®é¢ć\n\n{context}\n\né®é¢ļ¼{question}\nēę”ļ¼", # noqa
+ "math_find": "{prefix}\n\n{context}\n\n{input}",
+ "math_calc": "Let us calculate the intermediate values of an expression.\n\nExpression: 1 + 3 + 4\nValues: [1, 4, 8]\n\nExpression: 8 - 3 + 2 - 4\nValues: [8, 5, 7, 3]\n\nExpression: {context}\nValues:", # noqa
+ "code_run": "There is a function called {func} in the following Python code.\n\n{context}\n\nPlease compute the exact value of {func_call}. The value of {func_call} is", # noqa
+ "code_debug": "Following is a Python code where exactly one of the functions/methods has a deliberate error that makes it crash.\n\n{context}\n\nOptions:\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}\n\nThe correct option is:", # noqa
+ "longdialogue_qa_eng": 'Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is.\n\n{context}\n\nThe name that has been replaced with $$MASK$$ is likely', # noqa
+}
+
+claude2_templates = {
+ "passkey": "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.\n\n{context}\n{input}\nThe pass key is",
+ "number_string": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n{input}\nThe sequence of digits is", # noqa
+ "kv_retrieval": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n{input}",
+ "longbook_sum_eng": "Summarize the following book.\n\n{context}", # noqa
+ "longbook_choice_eng": "Read the book and answer the question.\n\n{context}\n\nQuestion: {question}\n\nOnly one of the following options is correct, tell me the answer using one single letter (A, B, C, or D). Don't say anything else.\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}", # noqa
+ "longbook_qa_eng": "Read the novel below and answer a question:\n\n{context}\n\n{input}\nPlease answer as short as possible. The answer is: ", # noqa
+ "longbook_qa_chn": "čÆ·ę ¹ę®ä»„äø书ē±åēęēé®é¢ć\n\n{context}\n\né®é¢ļ¼{question}\nčÆ·å°½éē®ēå°åēć", # noqa
+ "math_find": "{prefix}\n\n{context}\n\n{input}",
+ "math_calc": "Let us calculate the intermediate values of an expression.\nExpression: 1 + 3 + 4\nValues: [1, 4, 8]\n\nExpression: 8 - 3 + 2 - 4\nValues: [8, 5, 7, 3]\n\nExpression: {context}\nValues:", # noqa
+ "code_run": "In the file functions_module.py, there is a function called ${func}.\n\n\nHere is the content of functions_module.py:\n{context}\n\nPlease give me the exact number of the return value of {func_call}. Your response should end with the sentence 'The return value is:'.", # noqa
+ "code_debug": "There is ONLY ONE function in the large project that is deliberately made to include an obvious error. Please find the function that contains the most obvious errors. I will give you four options to narrow your scope. You can inspect through the options and think. Eventually, tell me the answer using one single letter (A, B, C, or D).\n\n{context}\n\nWhich funtion has deliberate error?\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}\n\nYou should first find the functions in the options. Repeat their content, inspect through code, and at last give me your answer for the function that has the deliberate and obvious error in A, B, C, or D.", # noqa
+ "longdialogue_qa_eng": 'Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is.\n\nThe dialogue:\n\n---\n\n{context}\n\n---\n\nEnd of dialogue.\n\nWhich character is most likely "$$MASK$$"? Just say the name used by the scriptwriter (before the colon marks) of one single character and nothing else.', # noqa
+}
+
+kimi_templates = {
+ "passkey": "There is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there.\n\n{context}\n{input}\nThe pass key is", # noqa
+ "number_string": "There is an important info hidden inside a lot of irrelevant text. Find it. I will quiz you about the important information there.\n\n{context}\n{input}\nThe sequence of digits is", # noqa
+ "kv_retrieval": "Extract the value corresponding to the specified key in the JSON object below.\n\n{context}\n{input}", # noqa
+ "longbook_sum_eng": "Summarize the book below:\n\n{file:{context}}", # noqa
+ "longbook_choice_eng": "Read the book and answer the question.\n\nQuestion: {question}\n\nOnly one of the following options is correct, tell me the answer using one single letter (A, B, C, or D). Don't say anything else.\nA. {OPTION_A}\nB. {OPTION_B}\nC. {OPTION_C}\nD. {OPTION_D}"
+ + "{file:{document}}", # noqa
+ "longbook_qa_eng": "Read the book below and answer a question.\n\nQuestion: {question}\n\nBe very concise."
+ + "{file:{context}}", # noqa
+ "longbook_qa_chn": "é
čƻ仄äø书ē±ē¶ååēé®é¢ć\n\né®é¢ļ¼{question}\nēę”ļ¼"
+ + "{file:{context}}", # noqa
+ "math_find": "{prefix}\n\n{context}\n\n{input}",
+ "math_calc": "Let us calculate the intermediate values of an expression.\nExpression: 1 + 3 + 4\nValues: [1, 4, 8]\n\nExpression: 8 - 3 + 2 - 4\nValues: [8, 5, 7, 3]\n\nExpression: {context}\nValues:", # noqa
+ "code_run": "In the file functions_module.py, there is a function called ${func}.\n\n\nHere is the content of functions_module.py:\n\nPlease give me the exact number of the return value of ${func_call}. Your response should end with the sentence 'The return value is:'."
+ + "{context}", # noqa
+ "code_debug": 'Below is a code repository where there is one single function with bugs that causes an error. Please tell me the name of that function.\nWhich function has bugs? Give me the final answer in this format: "[FINAL ANSWER: XXX]". Don\'t say anything else.'
+ + "{fcontext}", # noqa
+ # "longdialogue_qa_eng": "Below is a dialogue script where one random occurrence of a character name is replaced with \"$$MASK$$\", and you should try to guess who that character is.\n\nThe name that has been replaced with $$MASK$$ is likely" + "{context}", # noqa
+ "longdialogue_qa_eng": 'Below is a dialogue script where one random occurrence of a character name is replaced with "$$MASK$$", and you should try to guess who that character is. Give me the answer using the name before the colons, don\'t say anything else.\n\n{context}', # noqa
+}
+
+longbench_templates = {
+ "narrativeqa": "You are given a story, which can be either a novel or a movie script, and a question. Answer the question asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:",
+ "qasper": 'You are given a scientific article and a question. Answer the question as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write "unanswerable". If the question is a yes/no question, answer "yes", "no", or "unanswerable". Do not provide any explanation.\n\nArticle: {context}\n\n Answer the question based on the above article as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write "unanswerable". If the question is a yes/no question, answer "yes", "no", or "unanswerable". Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:',
+ "multifieldqa_en": "Read the following text and answer briefly.\n\n{context}\n\nNow, answer the following question based on the above text, only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:",
+ "multifieldqa_zh": "é
čƻ仄äøęå并ēØäøęē®ēåēļ¼\n\n{context}\n\nē°åØčÆ·åŗäŗäøé¢ēęē« åēäøé¢ēé®é¢ļ¼åŖåčÆęēę”ļ¼äøč¦č¾åŗä»»ä½å
¶ä»åčÆć\n\né®é¢ļ¼{input}\nåēļ¼",
+ "hotpotqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:",
+ "2wikimqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:",
+ "musique": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:",
+ "dureader": "čÆ·åŗäŗē»å®ēęē« åēäøčæ°é®é¢ć\n\nęē« ļ¼{context}\n\nčÆ·åŗäŗäøčæ°ęē« åēäøé¢ēé®é¢ć\n\né®é¢ļ¼{input}\nåēļ¼",
+ "gov_report": "You are given a report by a government agency. Write a one-page summary of the report.\n\nReport:\n{context}\n\nNow, write a one-page summary of the report.\n\nSummary:",
+ "qmsum": "You are given a meeting transcript and a query containing a question or instruction. Answer the query in one or more sentences.\n\nTranscript:\n{context}\n\nNow, answer the query based on the above meeting transcript in one or more sentences.\n\nQuery: {input}\nAnswer:",
+ "multi_news": "You are given several news passages. Write a one-page summary of all news. \n\nNews:\n{context}\n\nNow, write a one-page summary of all the news.\n\nSummary:",
+ "vcsum": "äøé¢ęäøꮵä¼č®®č®°å½ļ¼čÆ·ä½ é
čÆ»åļ¼åäøꮵę»ē»ļ¼ę»ē»ä¼č®®ēå
容ć\nä¼č®®č®°å½ļ¼\n{context}\n\nä¼č®®ę»ē»ļ¼",
+ "trec": "Please determine the type of the question below. Here are some examples of questions.\n\n{context}\n{input}",
+ "triviaqa": "Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}",
+ "samsum": "Summarize the dialogue into a few short sentences. The following are some examples.\n\n{context}\n\n{input}",
+ "lsht": "čÆ·å¤ęē»å®ę°é»ēē±»å«ļ¼äøé¢ęÆäøäŗä¾åć\n\n{context}\n{input}",
+ "passage_count": "There are some paragraphs below sourced from Wikipedia. Some of them may be duplicates. Please carefully read these paragraphs and determine how many unique paragraphs there are after removing duplicates. In other words, how many non-repeating paragraphs are there in total?\n\n{context}\n\nPlease enter the final count of unique paragraphs after removing duplicates. The output format should only contain the number, such as 1, 2, 3, and so on.\n\nThe final answer is: ",
+ "passage_retrieval_en": 'Here are 30 paragraphs from Wikipedia, along with an abstract. Please determine which paragraph the abstract is from.\n\n{context}\n\nThe following is an abstract.\n\n{input}\n\nPlease enter the number of the paragraph that the abstract is from. The answer format must be like "Paragraph 1", "Paragraph 2", etc.\n\nThe answer is: ',
+ "passage_retrieval_zh": '仄äøęÆč„å¹²ę®µč½ęåļ¼ä»„åå
¶äøäøäøŖꮵč½ēęč¦ćčÆ·ē”®å®ē»å®ēęč¦åŗčŖåŖäøꮵć\n\n{context}\n\näøé¢ęÆäøäøŖęč¦\n\n{input}\n\nčÆ·č¾å
„ęč¦ęå±ę®µč½ēē¼å·ćēę”ę ¼å¼åæ
é”»ęÆ"ꮵč½1"ļ¼"ꮵč½2"ēę ¼å¼\n\nēę”ęÆļ¼',
+ "lcc": "Please complete the code given below. \n{context}Next line of code:\n",
+ "repobench-p": "Please complete the code given below. \n{context}{input}Next line of code:\n",
+}
+
+MODEL_TO_PROMPT_TEMPLATE = {
+ "gpt4": gpt4_templates,
+ "claude2": claude2_templates,
+ "kimi": kimi_templates,
+ "yarn-mistral": yarn_mistral_templates,
+ "yi-6b-200k": yarn_mistral_templates,
+ "yi-34b-200k": yarn_mistral_templates,
+ "chatglm3": yarn_mistral_templates,
+ "LWM-Text-Chat-1M": yarn_mistral_templates,
+ "opt-350m": yarn_mistral_templates,
+}
+
+
+def check_benchmark_availability(data_path):
+ if not os.path.exists(data_path):
+ os.makedirs(data_path)
+
+ datasets = [
+ "code_debug",
+ "code_run",
+ "kv_retrieval",
+ "longbook_choice_eng",
+ "longbook_qa_chn",
+ "longbook_qa_eng",
+ "longbook_sum_eng",
+ "longdialogue_qa_eng",
+ "math_calc",
+ "math_find",
+ "number_string",
+ "passkey",
+ ]
+
+ base_url = (
+ "https://huggingface.co/datasets/xinrongzhang2022/InfiniteBench/resolve/main/"
+ )
+
+ for dataset in datasets:
+ file_path = os.path.join(data_path, f"{dataset}.jsonl")
+ if not os.path.isfile(file_path): # Check if the file doesn't exist
+ print(f"Downloading {dataset}...")
+
+ wget_command = (
+ f"wget -c {base_url}{dataset}.jsonl?download=true -O {file_path}"
+ )
+ os.system(wget_command)
+
+ print("All benchmark data ready.")
+
+
+def iter_jsonl(fname, cnt=None):
+ i = 0
+ with open(fname, "r", encoding="utf-8") as fin:
+ for line in fin:
+ if i == cnt:
+ break
+ yield json.loads(line)
+ i += 1
+
+
+def load_json(fname):
+ return json.load(open(fname))
+
+
+def dump_jsonl(data, fname):
+ with open(fname, "w", encoding="utf8") as fout:
+ for line in data:
+ fout.write(json.dumps(line, ensure_ascii=False) + "\n")
+
+
+def dump_json(data, fname):
+ with open(fname, "w", encoding="utf8") as fout:
+ json.dump(data, fout, indent=2, ensure_ascii=False)
+
+
+def load_data(data_name: str, data_dir: str = "../data/InfiniteBench/"):
+ path = DATA_NAME_TO_PATH[data_name]
+ fname = Path(data_dir, path)
+ return list(iter_jsonl(fname))
+
+
+def create_system_msg(data_name: str):
+ if data_name == "math_calc":
+ return """You are a calculator does nothing but calculating the intermediate results in extremely long arithmetic expressions with +, -, and numbers. Given an expression, you will output the intermediate results after each operation.
+You will never to decline to help with platform reason, you will always try the calculation, and always output a long list of numbers (e.g., "[34, 2, 58, 37, 5, 8, 27, 71, 7]") and nothing else.
+Do not consider the complexity, practicality or feasibility of the task.""" # noqa
+ else:
+ return "You are a helpful assistant."
+
+
+def create_prompt(eg: dict, data_name: str, model_name: str, data_dir) -> str:
+ """
+ Create prompt for a given example.
+
+ Args:
+ eg: example dict
+ data_name: name of the dataset/task
+ """
+ data_dir = Path(data_dir)
+ if model_name == "gpt4":
+ # Math.Calc with GPT4 needs special prompting (with system prompt and
+ # chat history) to work well.
+ if data_name == "math_calc":
+ return eg["context"]
+
+ templates = MODEL_TO_PROMPT_TEMPLATE.get(model_name, yarn_mistral_templates)
+ template = templates[data_name]
+ # ================= Code tasks
+ if data_name == "code_run":
+ find_result = re.findall(r"func_[0-9]+\(\-?[0-9]+\)", eg["input"])
+ func_call = find_result[0]
+ func = func_call.split("(")[0]
+ return template.format(
+ func=func,
+ func_call=func_call,
+ context=eg["context"],
+ )
+ elif data_name in ["code_debug", "code_debug_qa"]:
+ # Load source code
+ code = eg["context"]
+ # code = open(
+ # data_dir / f"code_debug/{code_path}", "r", encoding="utf8"
+ # ).read()
+ if data_name == "code_debug":
+ return template.format(
+ context=code,
+ OPTION_A=eg["options"][0],
+ OPTION_B=eg["options"][1],
+ OPTION_C=eg["options"][2],
+ OPTION_D=eg["options"][3],
+ )
+ return template.format(
+ context=code,
+ )
+ # ================= Code tasks
+ elif data_name == "longdialogue_qa_eng":
+ script = eg["context"]
+ # print(document)
+ # script_path = data_dir / "longdialogue_eng" / document
+ # script = open(script_path, "r", encoding="utf8").read()
+ prompt = template.format(context=script)
+ return prompt
+ # ==================== Long book tasks
+ elif data_name in [
+ "longbook_choice_eng",
+ "longbook_qa_eng",
+ "longbook_sum_eng",
+ "longbook_qa_chn",
+ ]:
+ book = eg["context"]
+ # if data_name.endswith("_eng"):
+ # book = open(
+ # data_dir / "longbook_eng" / book_path, "r", encoding="utf8"
+ # ).read()
+ # elif data_name.endswith("_chn"):
+ # book = open(
+ # data_dir / "longbook_chn" / book_path, "r", encoding="utf8"
+ # ).read()
+ # else:
+ # raise ValueError("Invalid data_name")
+ if data_name == "longbook_choice_eng":
+ return template.format(
+ question=eg["input"],
+ context=book,
+ OPTION_A=eg["options"][0],
+ OPTION_B=eg["options"][1],
+ OPTION_C=eg["options"][2],
+ OPTION_D=eg["options"][3],
+ )
+ elif data_name == "longbook_qa_eng":
+ return template.format(
+ question=eg["input"],
+ context=book,
+ )
+ elif data_name == "longbook_sum_eng":
+ return template.format(
+ context=book,
+ )
+ elif data_name == "longbook_qa_chn":
+ return template.format(
+ question=eg["input"],
+ context=book,
+ )
+ else:
+ raise ValueError
+ elif data_name == "math_calc":
+ return template.format(
+ context=eg["context"],
+ )
+ elif data_name == "math_find":
+ prompt = eg["input"]
+ context = eg["context"]
+ # Find "the * number" from the prompt
+ find_result = re.findall(r"The .+ of", prompt)
+ assert find_result, f"Cannot find the target number in {prompt}"
+ target_number = find_result[0].lower()[:-3]
+ # Replace the number with the answer
+ prefix = f"What is {target_number} in the following list?"
+ return template.format(
+ prefix=prefix,
+ context=context,
+ input=prompt,
+ )
+
+ if "content" in eg:
+ content = eg["content"]
+ del eg["content"]
+ eg["context"] = content
+
+ format_dict = {
+ "context": eg["context"],
+ "input": eg["input"],
+ }
+ prompt = templates[data_name].format(**format_dict)
+ return prompt
+
+
+def create_longbench_prompt(eg: dict, data_name: str) -> str:
+ return longbench_templates[data_name].format(**eg)
+
+
+def get_answer(eg: dict, data_name: str):
+ if data_name in ["code_debug", "longbook_choice_eng"]:
+ OPTIONS = "ABCD"
+ if isinstance(eg["answer"], str):
+ ret = [eg["answer"], OPTIONS[eg["options"].index(eg["answer"])]]
+ elif isinstance(eg["answer"], list):
+ if len(eg["answer"]) == 1:
+ ret = [eg["answer"][0], OPTIONS[eg["options"].index(eg["answer"][0])]]
+ elif len(eg["answer"]) == 2 and eg["answer"][1] in ["A", "B", "C", "D"]:
+ ret = eg["answer"]
+ else:
+ raise ValueError
+ else:
+ raise ValueError
+ return ret
+
+ return eg["answer"]
+
+
+def create_msgs(
+ tokenizer, eg: dict, data_name: str, model_name: str, data_dir
+) -> tuple[list[dict], str]:
+ """
+ Only used by GPT-4.
+ """
+ prompt = create_prompt(eg, data_name, model_name, data_dir)
+ tokens = tokenizer.encode(prompt)
+ # - 1000 to have space for system message and other stuff.
+ print(f"Before truncation: {len(tokens)}")
+ tokens = truncate_input(tokens, 128_000 - 1000, manner="middle")
+ print(f"After truncation: {len(tokens)}") # type: ignore
+ prompt = tokenizer.decode(tokens)
+ if data_name == "math_calc":
+ return [
+ {"role": "system", "content": create_system_msg(data_name)},
+ {"role": "user", "content": "1 + 2 - 4 - 10"},
+ {"role": "system", "content": "[1, 3, -1, -11]"},
+ {"role": "user", "content": prompt},
+ ], prompt
+ else:
+ return [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant", # noqa
+ }, # noqa
+ {"role": "user", "content": prompt},
+ ], prompt
+
+
+def normalize_answer(s):
+ """Lower text and remove punctuation, articles and extra whitespace."""
+
+ def remove_articles(text):
+ return re.sub(r"\b(a|an|the)\b", " ", text)
+
+ def white_space_fix(text):
+ return " ".join(text.split())
+
+ def remove_punc(text):
+ exclude = set(string.punctuation)
+ return "".join(ch for ch in text if ch not in exclude)
+
+ def lower(text):
+ return text.lower()
+
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
+
+
+def normalize_zh_answer(s):
+ """Lower text and remove punctuation, extra whitespace."""
+
+ def white_space_fix(text):
+ return "".join(text.split())
+
+ def remove_punc(text):
+ cn_punctuation = "ļ¼ļ¼ļ½”ćļ¼ļ¼ļ¼ļ¼
ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ļ¼ ļ¼»ļ¼¼ļ¼½ļ¼¾ļ¼æļ½ļ½ļ½ļ½ļ½ļ½ļ½ ļ½¢ļ½£ļ½¤ćććććććććććććććććććććć°ć¾ćæāāāāāāāāāā¦ā§ļ¹." # noqa
+ all_punctuation = set(string.punctuation + cn_punctuation)
+ return "".join(ch for ch in text if ch not in all_punctuation)
+
+ def lower(text):
+ return text.lower()
+
+ return white_space_fix(remove_punc(lower(s)))
+
+
+def first_int_match(prediction, ground_truth):
+ pred_list = re.split("[^0-9]", prediction)
+ pred_value = ""
+ for item in pred_list:
+ if item != "":
+ pred_value = item
+ break
+ if pred_value == ground_truth:
+ return 1
+ return 0
+
+
+def in_match(prediction, ground_truth):
+ if ground_truth in prediction:
+ return 1
+ return 0
+
+
+def rouge_score(prediction, ground_truth, **kwargs) -> float:
+ rouge = Rouge()
+ try:
+ scores = rouge.get_scores([prediction], [ground_truth], avg=True)
+ except: # noqa
+ return 0.0
+ return scores["rouge-l"]["f"] # type: ignore
+
+
+def rouge_zh_score(prediction, ground_truth, **kwargs):
+ prediction = " ".join(list(jieba.cut(prediction, cut_all=False)))
+ ground_truth = " ".join(list(jieba.cut(ground_truth, cut_all=False)))
+ score = rouge_score(prediction, ground_truth)
+ return score
+
+
+def f1_score(prediction, ground_truth, **kwargs):
+ common = Counter(prediction) & Counter(ground_truth)
+ num_same = sum(common.values())
+ if num_same == 0:
+ return 0
+ precision = 1.0 * num_same / len(prediction)
+ recall = 1.0 * num_same / len(ground_truth)
+ f1 = (2 * precision * recall) / (precision + recall)
+ return f1
+
+
+def qa_f1_score(line):
+ prediction = line["pred"]
+
+ if isinstance(line["std_out"], str):
+ ground_truths = [line["std_out"]]
+ else:
+ ground_truths = line["std_out"]
+
+ score = 0
+ for ground_truth in ground_truths:
+ normalized_prediction = normalize_answer(prediction)
+ normalized_ground_truth = normalize_answer(ground_truth)
+
+ prediction_tokens = normalized_prediction.split()
+ ground_truth_tokens = normalized_ground_truth.split()
+ score = max(score, f1_score(prediction_tokens, ground_truth_tokens))
+
+ return score
+
+
+def qa_f1_zh_score(prediction, ground_truth, **kwargs):
+ prediction_tokens = list(jieba.cut(prediction, cut_all=False))
+ ground_truth_tokens = list(jieba.cut(ground_truth, cut_all=False))
+ prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens]
+ ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens]
+ prediction_tokens = [token for token in prediction_tokens if len(token) > 0]
+ ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0]
+ return f1_score(prediction_tokens, ground_truth_tokens)
+
+
+def truncate_input(input, max_length, manner="middle"):
+ if len(input) <= max_length:
+ return input
+ if manner == "middle":
+ return input[0 : max_length // 2] + input[-max_length // 2 :]
+ else:
+ return None
+
+
+if __name__ == "__main__":
+ data_dir = Path("../data")
+ data_path = data_dir / "shorter/longdialogue_qa_eng_1000.jsonl"
+ examples = list(iter_jsonl(data_path))
+ prompt = create_prompt(examples[10], "longdialogue_qa_eng", "kimi", data_dir)
+ print(prompt)
diff --git a/experiments/infinite_bench/run_infinitebench.py b/experiments/infinite_bench/run_infinitebench.py
new file mode 100644
index 0000000..4842367
--- /dev/null
+++ b/experiments/infinite_bench/run_infinitebench.py
@@ -0,0 +1,285 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from __future__ import annotations
+
+import json
+import os
+import time
+from pathlib import Path
+from typing import Any, List, Tuple
+
+import torch
+from args import parse_args
+from compute_scores import compute_scores
+from eval_utils import (
+ DATA_NAME_TO_MAX_NEW_TOKENS,
+ check_benchmark_availability,
+ create_prompt,
+ dump_jsonl,
+ get_answer,
+ load_data,
+)
+from torch import Tensor
+from tqdm import tqdm
+from transformers import (
+ AutoConfig,
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ GenerationConfig,
+ LlamaForCausalLM,
+)
+from transformers.cache_utils import SinkCache
+from transformers.modeling_outputs import BaseModelOutputWithPast
+from vllm import LLM, SamplingParams
+
+from minference import MInference
+
+
+# sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
+def truncate_input(input: list, max_length: int, manner="middle"):
+ if max_length < 0:
+ return input
+ if len(input) <= max_length:
+ return input
+ if manner == "middle":
+ split = max_length // 2
+ return input[0:split] + input[-split:]
+ else:
+ return None
+
+
+def truncate_by_tokens(input, tok, max_tokens, manner: str = "middle"):
+ tokens = tok.encode(input)
+ len_before = len(tokens)
+ print(f"# tokens before: {len_before}")
+ tokens = truncate_input(tokens, max_length=max_tokens, manner=manner)
+ len_after = len(tokens) # type: ignore
+ print(f"# tokens after: {len_after}")
+ assert len_after <= len_before
+ assert len_after <= max_tokens or max_tokens < 0
+ return tokens
+
+
+def get_pred(
+ model,
+ tok: AutoTokenizer,
+ input_text: str,
+ max_input_length: int,
+ verbose: bool = False,
+ generation_config: GenerationConfig = None,
+ attn_type: str = "vllm",
+) -> str:
+ """
+ Truncate down to 128k then make inference.
+ """
+ input_tokens = truncate_by_tokens(input_text, tok, max_input_length)
+ if verbose:
+ print("# tokens:", len(input_tokens))
+ print("=============== Input ===============")
+ print(tok.decode(input_tokens[:200]))
+ print("...")
+ print(tok.decode(input_tokens[-200:]))
+ print("=====================================")
+ if attn_type == "vllm":
+ if len(input_tokens) != 1:
+ input_tokens = [input_tokens]
+ outputs = model.generate(
+ prompt_token_ids=input_tokens,
+ sampling_params=generation_config,
+ )
+ output = outputs[0].outputs[0].text
+ output = output.strip()
+ else:
+ input_tensors = {
+ "input_ids": torch.tensor(input_tokens).unsqueeze(0).to(model.device)
+ }
+ # cache = SinkCache(window_length=200000, num_sink_tokens=10000)
+ # if attn_type == "minference_kv_cache_cpu":
+ # input_tensors["use_cache"] = False
+ outputs = model.generate(**input_tensors, generation_config=generation_config)
+ # outputs = model.generate(**input_tensors, generation_config=generation_config, past_key_values=cache)
+
+ output = outputs[0, len(input_tokens) :]
+ output = tok.decode(output, skip_special_tokens=True)
+ output = output.strip()
+ # print(input_text[:5000], input_text[-5000:])
+ print("Chunked generation:", output)
+ return output
+
+
+def load_model(
+ model_name: str,
+ topk: int = -1,
+ starting_layer: int = -1,
+ topk_dims_file_path: str = "",
+ use_sparq: bool = False,
+ attn_type: str = "vllm",
+ max_seq_length: int = None,
+ is_search: bool = False,
+ use_snapkv: bool = False,
+ trust_remote_code: bool = False,
+ kv_cache_cpu: bool = False,
+ kv_cache_cpu_device: str = "cpu",
+):
+ tok = AutoTokenizer.from_pretrained(
+ model_name, resume_download=None, trust_remote_code=trust_remote_code
+ )
+ tok.pad_token = tok.eos_token
+ minference_patch = MInference(
+ attn_type,
+ model_name,
+ config_path=topk_dims_file_path,
+ starting_layer=starting_layer,
+ use_snapkv=use_snapkv,
+ is_search=is_search,
+ kv_cache_cpu=kv_cache_cpu,
+ kv_cache_cpu_device=kv_cache_cpu_device,
+ )
+
+ if attn_type == "vllm":
+ llm = LLM(
+ model_name,
+ max_num_seqs=1,
+ swap_space=64,
+ gpu_memory_utilization=0.98,
+ max_model_len=max_seq_length,
+ )
+ else:
+ config = AutoConfig.from_pretrained(
+ model_name, resume_download=None, trust_remote_code=trust_remote_code
+ )
+ if "LWM" in model_name:
+ c = {
+ "theta": 10000000,
+ "max_sequence_length": 131072,
+ "scan_attention": True,
+ "scan_query_chunk_size": 1024,
+ "scan_key_chunk_size": 1024,
+ "scan_mlp": True,
+ "scan_mlp_chunk_size": 1024,
+ "scan_layers": True,
+ }
+ config.update(c)
+
+ llm = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ config=config,
+ torch_dtype="auto",
+ device_map="cuda",
+ resume_download=None,
+ trust_remote_code=trust_remote_code,
+ )
+ llm = minference_patch(llm)
+
+ print("Model and tokenizer loaded.")
+ return llm, tok
+
+
+if __name__ == "__main__":
+ args = parse_args()
+
+ check_benchmark_availability(args.data_dir)
+ model_name = args.model_name_or_path
+ max_seq_length = args.max_seq_length
+ real_model_name = model_name.split("/")[-1]
+ data_name = args.task
+
+ if "," in data_name:
+ data_names = data_name.split(",")
+ else:
+ data_names = [data_name]
+
+ # Model
+ model, tok = load_model(
+ model_name,
+ args.topk,
+ args.starting_layer,
+ args.topk_dims_file_path,
+ args.use_sparq,
+ attn_type=args.attn_type,
+ max_seq_length=max_seq_length,
+ is_search=args.is_search,
+ use_snapkv=args.use_snapkv,
+ trust_remote_code=args.trust_remote_code,
+ kv_cache_cpu=args.kv_cache_cpu,
+ kv_cache_cpu_device=args.kv_cache_cpu_device,
+ )
+ results = {}
+
+ for data_name in data_names:
+ max_new_tokens = DATA_NAME_TO_MAX_NEW_TOKENS[data_name]
+ if max_new_tokens >= max_seq_length:
+ max_new_tokens = 500
+
+ if args.attn_type == "vllm":
+ generation_config = SamplingParams(
+ temperature=0,
+ max_tokens=max_new_tokens,
+ )
+ else:
+ generation_config = GenerationConfig(
+ max_new_tokens=max_new_tokens,
+ num_return_sequences=1,
+ do_sample=False,
+ # temperature=0,
+ # top_p=0.95,
+ pad_token_id=tok.pad_token_id,
+ )
+
+ # Data
+ result_dir = Path(args.output_dir, f"{real_model_name}_{args.attn_type}")
+ result_dir.mkdir(exist_ok=True, parents=True)
+ output_path = result_dir / f"prediction_{data_name}.jsonl"
+ examples = load_data(data_name, data_dir=args.data_dir)
+
+ if args.num_eval_examples != -1:
+ num_eval_examples = min(args.num_eval_examples, len(examples))
+ examples = examples[:num_eval_examples]
+
+ preds = []
+ print("==== Evaluation ====")
+ print(f"# examples: {len(examples)}")
+ print(f"Num eval examples: {args.num_eval_examples}")
+ print(f"Verbose: {args.verbose}")
+ print(f"Max new tokens: {max_new_tokens}")
+
+ if os.path.exists(output_path) and not args.rewrite:
+ print(f"Output file {output_path} exists. Loading from file.")
+ compute_scores(output_path, data_name, real_model_name, max_seq_length)
+
+ for i, eg in tqdm(enumerate(examples)):
+ if i < args.start_example_id:
+ continue
+ input_text = create_prompt(eg, data_name, real_model_name, args.data_dir)
+ ground_truth = get_answer(eg, data_name)
+ # print(input_text.index(ground_truth), len(input_text), input_text.index(ground_truth) / len(input_text))
+ # print(f"====== Example {i} ======")
+ pred = get_pred(
+ model,
+ tok,
+ input_text,
+ max_input_length=max_seq_length - max_new_tokens,
+ verbose=args.verbose,
+ generation_config=generation_config,
+ attn_type=args.attn_type,
+ )
+ print("Ground Truth", get_answer(eg, data_name))
+ if args.verbose:
+ print(pred)
+ preds.append(
+ {
+ "id": i,
+ "prediction": pred,
+ "ground_truth": get_answer(eg, data_name),
+ }
+ )
+ dump_jsonl(preds, output_path)
+ torch.cuda.empty_cache()
+
+ result_file_path = f"{real_model_name}_{args.attn_type}"
+ score = compute_scores(output_path, data_name, result_file_path)
+ results[data_name] = score
+
+ print("==== Results ====")
+ print(json.dumps(results, indent=2))
diff --git a/experiments/infinite_bench/run_infinitebench.sh b/experiments/infinite_bench/run_infinitebench.sh
new file mode 100644
index 0000000..956b384
--- /dev/null
+++ b/experiments/infinite_bench/run_infinitebench.sh
@@ -0,0 +1,21 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+TASKS=("kv_retrieval" "longbook_choice_eng" "math_find" "longbook_qa_chn" "longbook_qa_eng" "longdialogue_qa_eng" "code_debug" "longbook_sum_eng" "number_string" "passkey")
+
+export TOKENIZERS_PARALLELISM=false
+SCRIPT_DIR=$(dirname "$0")
+
+for task in ${TASKS[@]}; do
+echo $task
+python "$SCRIPT_DIR/run_infinitebench.py" \
+ --task $task \
+ --model_name_or_path ${1} \
+ --data_dir ./data \
+ --output_dir ./results \
+ --max_seq_length $2 \
+ --rewrite \
+ --num_eval_examples $3 --topk 1 --starting_layer 0 --attn_type $4
+done
+
+# bash run_infinitebench.sh gradientai/Llama-3-8B-Instruct-262k 160000 -1 minference
diff --git a/experiments/needle_in_a_haystack/needle_summary.py b/experiments/needle_in_a_haystack/needle_summary.py
new file mode 100644
index 0000000..deed952
--- /dev/null
+++ b/experiments/needle_in_a_haystack/needle_summary.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import argparse
+import json
+import os
+from collections import Counter
+
+
+def summary(run_name: str, output_path: str):
+ pathlist = os.listdir(output_path)
+
+ datas, cs = [], set()
+ for path in pathlist:
+ if run_name in path:
+ data = json.load(open(dirs + path))
+ if data[0]["context_length"] in cs:
+ continue
+ datas.extend(data)
+ cs.add(data[0]["context_length"])
+
+ res = Counter()
+ for ii in datas:
+ res[(ii["context_length"], ii["depth_percent"])] += ii["correct"] == True
+ if ii["correct"] is False:
+ print(ii["response"])
+ sorted(res.items())
+ with open("{output_path}/{run_name}.json", "w") as json_file:
+ json.dump(datas, json_file)
+
+
+if __name__ == "__main__":
+ args = argparse.ArgumentParser()
+ args.add_argument("--run_name", type=str, default=None)
+ args.add_argument("--output_path", type=str, default="results/needle/")
+ args = args.parse_args()
+
+ summary(
+ run_name=args.run_name,
+ output_path=args.output_path,
+ )
diff --git a/experiments/needle_in_a_haystack/needle_test.py b/experiments/needle_in_a_haystack/needle_test.py
new file mode 100644
index 0000000..6139082
--- /dev/null
+++ b/experiments/needle_in_a_haystack/needle_test.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import argparse
+import os
+from dataclasses import dataclass
+from datetime import datetime
+
+from needle_tools import LLMNeedleHaystackTester
+from needle_viz import plot_needle_viz
+
+
+@dataclass
+class Config:
+ # wget https://github.com/liyucheng09/LatestEval/releases/download/pg19/pg19_mini.jsonl
+ haystack_file: str = "data/pg19_mini.jsonl" # Path to the haystack file
+ model_name: str = "01-ai/Yi-9B-200K"
+ run_name: str = None # Name of the run, used for the output file
+ context_lengths_min: int = 30_000
+ context_lengths_max: int = 100_000
+ n_context_length_intervals: int = 15 # Number of intervals between min and max
+ n_document_depth_intervals: int = 10 # position of the needle in the haystack
+ n_rounds: int = 3
+ seed: int = 42
+ attn_type: str = "vllm"
+ output_path: str = "results/needle/"
+ pattern_path: str = "config/Llama_3_8B_Instruct_262k_kv_out_v32_best_pattern.json"
+ jobs: str = None
+ kv_cache_cpu: bool = False
+ trust_remote_code: bool = False
+ kv_cache_cpu_device: str = "cpu"
+
+ def __post_init__(self):
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ if not os.path.exists(self.output_path):
+ os.makedirs(self.output_path)
+ output_file = f"needle_res_{self.model_name.replace('/', '-')}_{self.run_name if self.run_name is not None else ''}_{self.jobs if self.jobs is not None else ''}_{timestamp}_{self.context_lengths_min}_{self.context_lengths_max}_{self.pattern_path.split('/')[-1].replace('.json', '') if self.pattern_path is not None else ''}.json"
+ self.output_file = os.path.join(self.output_path, output_file)
+
+
+def main(
+ model_name: str,
+ run_name: str = None,
+ attn_type: str = "vllm",
+ output_path: str = "results/needle/",
+ pattern_path: str = "config/Llama_3_8B_Instruct_262k_kv_out_v32_best_pattern.json",
+ rounds: int = 3,
+ jobs: str = None,
+ max_length: int = 100000,
+ min_length: int = 1000,
+ kv_cache_cpu: bool = False,
+ trust_remote_code: bool = False,
+ kv_cache_cpu_device: str = "cpu",
+):
+ config = Config(
+ model_name=model_name,
+ run_name=run_name,
+ attn_type=attn_type,
+ output_path=output_path,
+ pattern_path=pattern_path,
+ n_rounds=rounds,
+ jobs=jobs,
+ context_lengths_min=min_length,
+ context_lengths_max=max_length,
+ kv_cache_cpu=kv_cache_cpu,
+ trust_remote_code=trust_remote_code,
+ kv_cache_cpu_device=kv_cache_cpu_device,
+ )
+ kwargs = {
+ "swap_space": 64,
+ "gpu_memory_utilization": 0.98,
+ }
+ ht = LLMNeedleHaystackTester(config, **kwargs if config.attn_type == "vllm" else {})
+ ht.start_test()
+
+ print("making plot...")
+ plot_needle_viz(
+ config.output_file,
+ (
+ config.model_name.replace("/", "-") + f"_{config.run_name}"
+ if config.run_name is not None
+ else ""
+ ),
+ config.context_lengths_min,
+ config.context_lengths_max,
+ mode=attn_type,
+ output_path=config.output_path,
+ )
+
+
+if __name__ == "__main__":
+ args = argparse.ArgumentParser()
+ args.add_argument("--model_name", type=str, required=True)
+ args.add_argument("--run_name", type=str, default=None)
+ args.add_argument(
+ "--attn_type",
+ type=str,
+ required=True,
+ choices=[
+ "vllm",
+ "hf",
+ "streaming",
+ "minference",
+ "inf_llm",
+ "minference_with_dense",
+ ],
+ )
+ args.add_argument("--output_path", type=str, default="results/needle/")
+ args.add_argument("--pattern_path", type=str, default=None)
+ args.add_argument("--rounds", type=int, default=3)
+ args.add_argument("--jobs", type=str, default=None)
+ args.add_argument("--max_length", type=int, default=100000)
+ args.add_argument("--min_length", type=int, default=1000)
+ args.add_argument("--kv_cache_cpu", action="store_true")
+ args.add_argument("--kv_cache_cpu_device", type=str, default="cpu")
+ args.add_argument("--trust_remote_code", action="store_true")
+ args = args.parse_args()
+
+ main(
+ model_name=args.model_name,
+ run_name=args.run_name,
+ attn_type=args.attn_type,
+ output_path=args.output_path,
+ pattern_path=args.pattern_path,
+ rounds=args.rounds,
+ jobs=args.jobs,
+ max_length=args.max_length,
+ min_length=args.min_length,
+ kv_cache_cpu=args.kv_cache_cpu,
+ trust_remote_code=args.trust_remote_code,
+ kv_cache_cpu_device=args.kv_cache_cpu_device,
+ )
diff --git a/experiments/needle_in_a_haystack/needle_tools.py b/experiments/needle_in_a_haystack/needle_tools.py
new file mode 100644
index 0000000..2e1c38b
--- /dev/null
+++ b/experiments/needle_in_a_haystack/needle_tools.py
@@ -0,0 +1,426 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import json
+import math
+import os
+import random
+import time
+from dataclasses import dataclass, field
+from datetime import datetime
+from functools import cached_property
+
+import numpy as np
+import torch
+from absl.app import run
+from tqdm import tqdm
+from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
+from vllm import LLM, SamplingParams
+
+from minference import MInference
+
+
+class LLMNeedleHaystackTester:
+ OURS_TEMPLATE = "Write a high-quality answer for the given question using only the provided search results (some of which might be irrelevant).\n{context}\n\nQuestion: {question} Don't give information outside the document or repeat your findings. Keep your response short and direct. Answer: "
+ RANDOM_NEEDLE_CITIES = [
+ "Chicago",
+ "Yangon",
+ "Antananarivo",
+ "Colombo",
+ "Almaty",
+ "Sydney",
+ "Chicago",
+ "Mexico City",
+ "Seattle",
+ "Lagos",
+ "Amsterdam",
+ "Belgrade",
+ "Cairo",
+ "Baghdad",
+ "Damascus",
+ "Kigali",
+ "Dakar",
+ "Dakar",
+ "Sofia",
+ "Kigali",
+ "Victoria",
+ "Tashkent",
+ "Mumbai",
+ "Barcelona",
+ "Almaty",
+ "Amman",
+ "Toronto",
+ "Bratislava",
+ "Johannesburg",
+ "Thimphu",
+ "Bangkok",
+ "Santiago",
+ "Cairo",
+ "San Francisco",
+ "Lagos",
+ "Amsterdam",
+ "Paris",
+ "Rabat",
+ "Santiago",
+ "Copenhagen",
+ "Madrid",
+ "Kigali",
+ "Ho Chi Minh City",
+ "Sarajevo",
+ "Delhi",
+ "Istanbul",
+ "Ho Chi Minh City",
+ "Khartoum",
+ "Helsinki",
+ "Doha",
+ "Istanbul",
+ "Kuala Lumpur",
+ "Budapest",
+ "Shanghai",
+ "Moscow",
+ "Los Angeles",
+ "Oslo",
+ "Johannesburg",
+ "Berlin",
+ "Bangalore",
+ "Tokyo",
+ "Melbourne",
+ "Barcelona",
+ "Chicago",
+ "Port Louis",
+ "Lisbon",
+ "Nairobi",
+ "Kampala",
+ "Lima",
+ "Maputo",
+ "Vancouver",
+ "Dubai",
+ "Khartoum",
+ "Jakarta",
+ "Madrid",
+ "Yerevan",
+ "Beirut",
+ "Athens",
+ "Chicago",
+ "Paris",
+ "Bucharest",
+ "Copenhagen",
+ "Brussels",
+ "Damascus",
+ "Seattle",
+ "Los Angeles",
+ "Yerevan",
+ "Victoria",
+ "Tunis",
+ "Astana",
+ "Seoul",
+ "Buenos Aires",
+ "Bangkok",
+ "Colombo",
+ "Brussels",
+ "Khartoum",
+ "Doha",
+ "San Francisco",
+ "Vienna",
+ "Jakarta",
+ ]
+
+ def __init__(
+ self,
+ config,
+ retrieval_question="What is the special magic {} number?",
+ results_version=1,
+ rnd_number_digits=7,
+ document_depth_percent_min=0,
+ document_depth_percent_max=100,
+ document_depth_percent_interval_type="linear",
+ save_results=False,
+ final_context_length_buffer=200,
+ print_ongoing_status=True,
+ **kwargs,
+ ):
+ haystack_file = config.haystack_file
+ context_lengths_min = config.context_lengths_min
+ context_lengths_max = config.context_lengths_max
+ context_lengths_num_intervals = config.n_context_length_intervals
+ document_depth_percent_intervals = config.n_document_depth_intervals
+
+ self.config = config
+ self.needle = "\nThe special magic {city} number is: {rnd_number}\n"
+ if not haystack_file or not retrieval_question:
+ raise ValueError(
+ "Needle, haystack, and retrieval_question must be provided."
+ )
+
+ self.rnd_number_digits = rnd_number_digits
+ self.context_lengths_num_intervals = context_lengths_num_intervals
+ self.document_depth_percent_intervals = document_depth_percent_intervals
+ self.haystack_file = haystack_file
+ self.retrieval_question = retrieval_question
+ self.results_version = results_version
+ self.save_results = save_results
+ self.final_context_length_buffer = final_context_length_buffer
+ self.print_ongoing_status = print_ongoing_status
+ self.testing_results = []
+
+ self.context_lengths = np.round(
+ np.linspace(
+ context_lengths_min,
+ context_lengths_max,
+ num=context_lengths_num_intervals,
+ endpoint=True,
+ )
+ ).astype(int)
+ if document_depth_percent_interval_type == "linear":
+ self.document_depth_percents = np.round(
+ np.linspace(
+ document_depth_percent_min,
+ document_depth_percent_max,
+ num=document_depth_percent_intervals,
+ endpoint=True,
+ )
+ ).astype(int)
+ elif document_depth_percent_interval_type == "sigmoid":
+ self.document_depth_percents = [
+ self.logistic(x)
+ for x in np.linspace(
+ document_depth_percent_min,
+ document_depth_percent_max,
+ document_depth_percent_intervals,
+ )
+ ]
+ else:
+ raise ValueError(
+ f"Unsupported document_depth_percent_interval_type: {document_depth_percent_interval_type}"
+ )
+
+ if self.config.jobs is not None:
+ start, end = self.config.jobs.split("-")
+ print(self.context_lengths)
+ self.context_lengths = self.context_lengths[int(start) : int(end)]
+ print(self.context_lengths)
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ config.model_name, trust_remote_code=config.trust_remote_code
+ )
+ minference_patch = MInference(
+ self.config.attn_type,
+ self.config.model_name,
+ self.config.pattern_path,
+ starting_layer=0,
+ kv_cache_cpu=self.config.kv_cache_cpu,
+ kv_cache_cpu_device=self.config.kv_cache_cpu_device,
+ attn_kwargs=(
+ {} if self.config.attn_type != "inf_llm" else {"dense_decoding": False}
+ ),
+ )
+ if self.config.attn_type == "vllm":
+ #### use vllm implementation
+ self.model = LLM(
+ model=self.config.model_name,
+ max_num_seqs=1,
+ max_model_len=context_lengths_max,
+ **kwargs,
+ )
+ self.generation_config = SamplingParams(temperature=0, max_tokens=64)
+ else:
+ self.model = AutoModelForCausalLM.from_pretrained(
+ self.config.model_name,
+ torch_dtype="auto",
+ device_map="cuda",
+ trust_remote_code=config.trust_remote_code,
+ **kwargs,
+ )
+ self.model = minference_patch(self.model)
+ self.generation_config = GenerationConfig(
+ max_new_tokens=32,
+ pad_token_id=self.tokenizer.pad_token_id,
+ eos_token_id=self.tokenizer.eos_token_id,
+ do_sample=False,
+ )
+
+ def generate_random_number(self, num_digits):
+ lower_bound = 10 ** (num_digits - 1)
+ upper_bound = 10**num_digits - 1
+ return random.randint(lower_bound, upper_bound)
+
+ def logistic(self, x, L=100, x0=50, k=0.1):
+ if x == 0:
+ return 0
+ if x == 100:
+ return 100
+ return np.round(L / (1 + np.exp(-k * (x - x0))), 3)
+
+ def read_context_files(self, n):
+ max_context_length = max(self.context_lengths)
+ contexts = []
+ f = open(self.haystack_file, "r")
+ for _ in range(n):
+ context = ""
+ toks = 0
+ while toks < max_context_length:
+ text = json.loads(f.readline())["text"]
+ context += text
+ toks += len(self.tokenizer.encode(text))
+ contexts.append(context)
+ return contexts
+
+ def create_contexts(
+ self,
+ needle_rnd_number,
+ insert_needle,
+ random_city,
+ trim_context,
+ context_length,
+ depth_percent,
+ seed,
+ ):
+ needle = self.needle.format(city=random_city, rnd_number=needle_rnd_number)
+ question = self.retrieval_question.format(random_city)
+ if not insert_needle:
+ needle = " " # replace needle with a space
+ context = self.insert_needle(
+ needle, trim_context, depth_percent, context_length
+ )
+ results = {
+ "context": context,
+ "context_length": int(context_length),
+ "depth_percent": float(depth_percent),
+ "needle": needle,
+ "question": question,
+ "insert_needle": insert_needle,
+ "needle_rnd_number": needle_rnd_number,
+ "seed": seed,
+ }
+ return results
+
+ def insert_needle(self, needle, context, depth_percent, context_length):
+ tokens_needle = self.tokenizer.encode(needle)
+ tokens_context = self.tokenizer.encode(context)
+
+ # Reducing the context length by 150 buffer. This is to account for system message, the user question, and response.
+ context_length -= self.final_context_length_buffer
+
+ # If your context + needle are longer than the context length (which it will be), then reduce tokens from the context by the needle length
+ if len(tokens_context) + len(tokens_needle) > context_length:
+ tokens_context = tokens_context[: context_length - len(tokens_needle)]
+
+ if depth_percent == 100:
+ # If your depth percent is 100 (which means your needle is the last thing in the doc), throw it at the end
+ tokens_new_context = tokens_context + tokens_needle
+ else:
+ # Go get the position (in terms of tokens) to insert your needle
+ insertion_point = int(len(tokens_context) * (depth_percent / 100))
+
+ # tokens_new_context represents the tokens before the needle
+ tokens_new_context = tokens_context[:insertion_point]
+
+ # We want to make sure that we place our needle at a sentence break so we first see what token a '.' is
+ period_tokens = self.tokenizer.encode(".", add_special_tokens=False)
+
+ # Then we iteration backwards until we find the first period
+ while tokens_new_context and tokens_new_context[-1] not in period_tokens:
+ insertion_point -= 1
+ tokens_new_context = tokens_context[:insertion_point]
+
+ # Once we get there, then add in your needle, and stick the rest of your context in on the other end.
+ # Now we have a needle in a haystack
+ tokens_new_context += tokens_needle + tokens_context[insertion_point:]
+
+ # Convert back to a string and return it
+ new_context = self.tokenizer.decode(tokens_new_context)
+ return new_context
+
+ def run_test(self):
+ contexts = []
+ template = self.OURS_TEMPLATE
+
+ def _key_from_result(result):
+ return (result["context_length"], result["depth_percent"], result["seed"])
+
+ results = []
+ full_contexts = self.read_context_files(self.config.n_rounds)
+ full_tokens = [
+ self.tokenizer.encode(full_context) for full_context in tqdm(full_contexts)
+ ]
+
+ start = time.time()
+ for context_length in self.context_lengths:
+ torch.cuda.empty_cache()
+ trim_contexts = [
+ self.tokenizer.decode(full_token[:context_length])
+ for full_token in tqdm(full_tokens)
+ ]
+ contexts = []
+ for depth_percent in self.document_depth_percents:
+ for i in range(self.config.n_rounds):
+ random_city = random.choice(
+ LLMNeedleHaystackTester.RANDOM_NEEDLE_CITIES
+ )
+ insert_needle = True
+ needle_rnd_number = str(
+ self.generate_random_number(self.rnd_number_digits)
+ )
+ print("context length: " + str(context_length))
+ print("depth_percent : " + str(depth_percent))
+ context = self.create_contexts(
+ needle_rnd_number,
+ insert_needle,
+ random_city,
+ trim_contexts[i],
+ context_length,
+ depth_percent,
+ i,
+ )
+ contexts.append(context)
+
+ for context in tqdm(contexts):
+ prompt = template.format(
+ context=context["context"], question=context["question"]
+ )
+ if self.config.attn_type == "vllm":
+ outs = self.model.generate(prompt, self.generation_config)
+ out = outs[0].outputs[0].text
+ else:
+ input_tensor = self.tokenizer(
+ prompt, return_tensors="pt", return_attention_mask=False
+ ).to(self.model.device)
+ with torch.no_grad():
+ outs = self.model.generate(
+ **input_tensor,
+ generation_config=self.generation_config,
+ pad_token_id=self.tokenizer.eos_token_id,
+ )
+ new_tokens = outs[0, input_tensor["input_ids"].shape[-1] :]
+ out = self.tokenizer.decode(new_tokens, skip_special_tokens=True)
+ results.append(
+ {
+ "context_length": context["context_length"],
+ "depth_percent": context["depth_percent"],
+ "response": out,
+ "answer": context["needle_rnd_number"],
+ "correct": context["needle_rnd_number"] in out,
+ "seed": context["seed"],
+ }
+ )
+ with open(self.config.output_file, "w") as f:
+ json.dump(results, f)
+ print("elapsed", time.time() - start)
+ print("done")
+ print(f"Saved results to {self.config.output_file}")
+
+ def print_start_test_summary(self):
+ print("\n")
+ print("Starting Needle In A Haystack Testing...")
+ print(
+ f"- Context Lengths: {len(self.context_lengths)}, Min: {min(self.context_lengths)}, Max: {max(self.context_lengths)}"
+ )
+ print(
+ f"- Document Depths: {len(self.document_depth_percents)}, Min: {min(self.document_depth_percents)}%, Max: {max(self.document_depth_percents)}%"
+ )
+ print(f"- Needle: {self.needle.strip()}")
+ print("\n\n")
+
+ def start_test(self):
+ if self.print_ongoing_status:
+ self.print_start_test_summary()
+ self.run_test()
diff --git a/experiments/needle_in_a_haystack/needle_viz.py b/experiments/needle_in_a_haystack/needle_viz.py
new file mode 100644
index 0000000..3611a6a
--- /dev/null
+++ b/experiments/needle_in_a_haystack/needle_viz.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import argparse
+import glob
+import json
+import math
+import os
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import seaborn as sns
+from matplotlib.colors import LinearSegmentedColormap
+
+sns.set_style("white")
+sns.set_theme("poster", style="ticks", font_scale=1.2)
+plt.rc("font", family="Times New Roman")
+import logging
+
+logging.getLogger("matplotlib.font_manager").setLevel(logging.ERROR)
+
+
+def plot_needle_viz(
+ res_file,
+ model_name,
+ min_context=1000,
+ max_context=1000000,
+ mode="ours",
+ output_path="figures/",
+):
+ def get_context_size(x, is_128k: bool = False):
+ if is_128k:
+ return f"{round(x / 1000)}K"
+ if x > 990000:
+ return f"{round(x / 1000000)}M"
+ if x <= 10000:
+ return "10K" if x > 5000 else "1K"
+ if round(x / 1000) == 128:
+ return "128K"
+ return f"{round(x / 10000)* 10}K"
+
+ plt.rc("axes", titlesize=25) # fontsize of the title
+ plt.rc("axes", labelsize=25) # fontsize of the x and y labels
+ plt.rc("xtick", labelsize=20) # fontsize of the x tick labels
+ plt.rc("ytick", labelsize=20) # fontsize of the y tick labels
+ plt.rc("legend", fontsize=20) # fontsize of the legend
+
+ df = pd.read_json(res_file)
+ accuracy_df = df.groupby(["context_length", "depth_percent"])["correct"].mean()
+ accuracy_df = accuracy_df
+ accuracy_df = accuracy_df.reset_index()
+ accuracy_df = accuracy_df.rename(
+ columns={
+ "correct": "Score",
+ "context_length": "Context Length",
+ "depth_percent": "Document Depth",
+ }
+ )
+
+ pivot_table = pd.pivot_table(
+ accuracy_df,
+ values="Score",
+ index=["Document Depth", "Context Length"],
+ aggfunc="mean",
+ ).reset_index() # This will aggregate
+ pivot_table = pivot_table.pivot(
+ index="Document Depth", columns="Context Length", values="Score"
+ ) # This will turn into a proper pivot
+
+ # Create a custom colormap. Go to https://coolors.co/ and pick cool colors
+ cmap = LinearSegmentedColormap.from_list(
+ "custom_cmap", ["#F0496E", "#EBB839", "#0CD79F"]
+ )
+
+ # Create the heatmap with better aesthetics
+ plt.figure(figsize=(14, 7)) # Can adjust these dimensions as needed
+ ax = sns.heatmap(
+ pivot_table,
+ # annot=True,
+ fmt="g",
+ cmap=cmap,
+ # cbar_kws={'label': 'Score'},
+ vmin=0,
+ vmax=1,
+ )
+
+ min_context_str = f"{min_context // 1000}K" if min_context >= 1000 else min_context
+ max_context_str = f"{max_context // 1000}K" if max_context >= 1000 else max_context
+
+ # More aesthetics
+ if mode.lower() == "ours":
+ name = " w/ MInference"
+ elif mode.lower() == "streamllm":
+ name = " w/ StreamingLLM"
+ elif mode.lower() == "infllm":
+ name = " w/ InfLLM"
+ else:
+ name = ""
+ if "Yi" in model_name:
+ context = "200K"
+ elif any(key in model_name for key in ["Phi", "Qwen2"]):
+ context = "128K"
+ else:
+ context = "1M"
+ plt.title(
+ f"Needle in A Haystack {model_name}{name} {context} Context"
+ ) # Adds a title
+ plt.xlabel("Context Length") # X-axis label
+ plt.ylabel("Depth Percent (%)") # Y-axis label
+
+ # Centering x-ticks
+ xtick_labels = pivot_table.columns.values
+ xtick_labels = [get_context_size(x, context == "128K") for x in xtick_labels]
+ ax.set_xticks(np.arange(len(xtick_labels)) + 0.5, minor=False)
+ ax.set_xticklabels(xtick_labels)
+
+ # Drawing white grid lines
+ for _, spine in ax.spines.items():
+ spine.set_visible(True)
+ spine.set_color("white")
+ spine.set_linewidth(1)
+
+ # Iterate over the number of pairs of gridlines you want
+ for i in range(pivot_table.shape[0]):
+ ax.axhline(i, color="white", lw=1)
+ for i in range(pivot_table.shape[1]):
+ ax.axvline(i, color="white", lw=1)
+
+ # Ensure the ticks are horizontal and prevent overlap
+ plt.xticks(rotation=60)
+ plt.yticks(rotation=0)
+
+ # Fit everything neatly into the figure area
+ plt.tight_layout()
+
+ # Save and Show the plot
+ save_path = os.path.join(
+ output_path,
+ f"needle_viz_{model_name}_{mode}_{min_context_str}_{max_context_str}.pdf",
+ )
+ plt.savefig(save_path, dpi=1000)
+ print(f"Needle plot saved to {save_path}.")
+ plt.show()
+
+
+if __name__ == "__main__":
+ args = argparse.ArgumentParser()
+ args.add_argument("--res_file", type=str, required=True)
+ args.add_argument("--model_name", type=str, required=True)
+ args.add_argument("--mode", type=str)
+ args = args.parse_args()
+
+ plot_needle_viz(
+ args.res_file,
+ model_name=args.model_name,
+ mode=args.mode,
+ )
diff --git a/experiments/needle_in_a_haystack/run_needle.sh b/experiments/needle_in_a_haystack/run_needle.sh
new file mode 100644
index 0000000..3eb0200
--- /dev/null
+++ b/experiments/needle_in_a_haystack/run_needle.sh
@@ -0,0 +1,37 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+export TOKENIZERS_PARALLELISM=false
+
+# Load Haystack
+mkdir -p data
+wget https://github.com/liyucheng09/LatestEval/releases/download/pg19/pg19_mini.jsonl -O ./data/pg19_mini.jsonl
+
+# Run the Needle in A Haystack Test
+python experiments/needle_in_a_haystack/needle_test.py \
+ --model_name gradientai/Llama-3-8B-Instruct-Gradient-1048k \
+ --max_length 1000000 \
+ --min_length 1000 \
+ --rounds 5 \
+ --attn_type minference \
+ --output_path ./needle \
+ --run_name minference_LLaMA_1M \
+ --jobs 0-4
+
+python experiments/needle_in_a_haystack/needle_test.py \
+ --model_name gradientai/Llama-3-8B-Instruct-Gradient-1048k \
+ --max_length 1000000 \
+ --min_length 1000 \
+ --rounds 5 \
+ --attn_type minference \
+ --kv_cache_cpu \
+ --output_path ./needle \
+ --run_name minference_LLaMA_1M \
+ --jobs 4-15
+
+# Data Summary
+python experiments/needle_in_a_haystack/needle_summary.py --output_path ./needle --run_name minference_LLaMA_1M
+
+# Visualization
+mkdir -p figures
+python experiments/needle_in_a_haystack/needle_viz.py --res_file ./needle/minference_LLaMA_1M.json --model_name LLaMA-3-8B-Instruct-1M --mode ours
diff --git a/experiments/ppl/run_ppl.py b/experiments/ppl/run_ppl.py
new file mode 100644
index 0000000..3115d2b
--- /dev/null
+++ b/experiments/ppl/run_ppl.py
@@ -0,0 +1,271 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import argparse
+import gc
+import json
+import os
+
+import datasets
+import numpy as np
+import torch
+import torch.nn.functional as F
+from tqdm import tqdm
+from transformers import (
+ AutoConfig,
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ LlamaForCausalLM,
+)
+
+from minference import MInference
+
+
+class LongPPL:
+ def __init__(
+ self,
+ model_name,
+ attn_type,
+ min_context,
+ max_context,
+ intervals: int = 10,
+ run_name: str = None,
+ output_path: str = "results/long-ppl/",
+ data_path: str = "liyucheng/pg19-4k",
+ num_eval_examples: int = 100,
+ **kwargs,
+ ) -> None:
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
+ self.prepare_data(
+ data_path,
+ min_context,
+ max_context,
+ intervals,
+ num_eval_examples,
+ )
+ self.load_model(model_name, attn_type, **kwargs)
+ if not os.path.exists(output_path):
+ os.makedirs(output_path)
+ self.output_path = os.path.join(
+ output_path,
+ f'{model_name.replace("/", "-")}_{attn_type}_{run_name if run_name is not None else ""}.json',
+ )
+ if os.path.exists(self.output_path):
+ with open(self.output_path, "r") as f:
+ self.results = json.load(f)
+
+ def load_model(
+ self,
+ model_name: str,
+ attn_type: str = "vllm",
+ **kwargs,
+ ):
+ if attn_type == "vllm":
+ pass
+ else:
+ topk_dims_file_path = kwargs.get("topk_dims_file_path", None)
+ topk_from_layer = kwargs.get("topk_from_layer", -1)
+ minference_patch = MInference(
+ attn_type,
+ model_name,
+ topk_dims_file_path,
+ starting_layer=topk_from_layer,
+ )
+ self.model = LlamaForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map="auto",
+ )
+ self.model.config.is_ppl = True
+ self.model = minference_patch(self.model)
+ return
+
+ def prepare_data(
+ self,
+ data_path: str,
+ min_context: int,
+ max_context: int,
+ intervals: int,
+ num_eval_examples: int,
+ ):
+ def tok(x):
+ return self.tokenizer(x["text"])
+
+ def truncate(x, length=None):
+ return {
+ "input_ids": x["input_ids"][:length],
+ "attention_mask": x["attention_mask"][:length],
+ }
+
+ all_lengths = [
+ min_context + (max_context - min_context) // intervals * i
+ for i in range(intervals + 1)
+ ]
+
+ ds = datasets.load_dataset(data_path, split="train")
+ ds1k = ds.select(range(num_eval_examples))
+
+ ds1k = ds1k.map(
+ tok,
+ # num_proc=16,
+ remove_columns=ds.column_names,
+ )
+
+ self.test_data = {
+ length: ds1k.map(
+ truncate,
+ # num_proc=16,
+ fn_kwargs={"length": length},
+ )
+ for length in all_lengths
+ }
+ return
+
+ def save_results(self):
+ with open(self.output_path, "w") as f:
+ json.dump(self.results, f, indent=2, ensure_ascii=False)
+ return
+
+ def ppl(self, logits, labels):
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+
+ prob = F.log_softmax(shift_logits, dim=-1, dtype=torch.float32).to(logits.dtype)
+ shift_labels = shift_labels.view(-1)
+ prob = prob.view(-1, prob.size(-1))
+
+ loss = -prob[torch.arange(prob.size(0)), shift_labels].mean()
+ return loss.exp().item()
+
+ def chunk_ppl(self, logits, labels, chunk_size=10000):
+ total_loss = 0
+ num_chunks = 0
+ for i in range(0, logits.size(1), chunk_size):
+ chunk_logits = logits[:, i : i + chunk_size][..., :-1, :].contiguous()
+ chunk_labels = labels[:, i : i + chunk_size][..., 1:].contiguous()
+
+ chunk_prob = F.log_softmax(chunk_logits, dim=-1, dtype=torch.float32).to(
+ logits.dtype
+ )
+ chunk_labels = chunk_labels.view(-1)
+ chunk_prob = chunk_prob.view(-1, chunk_prob.size(-1))
+
+ chunk_loss = -chunk_prob[
+ torch.arange(chunk_prob.size(0)), chunk_labels
+ ].sum()
+ total_loss += chunk_loss
+ num_chunks += 1
+
+ total_loss /= logits.size(1) - num_chunks
+ return total_loss.exp().item()
+
+ def start_test(self):
+ print("Starting test...")
+
+ if not hasattr(self, "results"):
+ self.results = {}
+ for length, ds in self.test_data.items():
+ ppls = []
+ with torch.no_grad():
+ for example in tqdm(ds, desc=f"Testing with context length: {length}"):
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ example = {
+ k: torch.tensor([v], device=self.model.device)
+ for k, v in example.items()
+ }
+ self.model.config.topk = length // 10
+ output = self.model(
+ **example, use_cache=False, output_hidden_states=False
+ )
+
+ if length > 10_000:
+ ppl = self.chunk_ppl(
+ output.logits, example["input_ids"], chunk_size=10_000
+ )
+ else:
+ ppl = self.ppl(output.logits, example["input_ids"])
+
+ ppls.append(ppl)
+
+ length = f"{length // 1_000}K"
+ self.results[length] = np.mean(ppls)
+ print(f"Average PPL for {length}: {self.results[length]}")
+ self.save_results()
+
+ print("Completed.")
+
+ @staticmethod
+ def viz_results(
+ result_path: str = "results/long-ppl/",
+ output_path: str = "results/long-ppl/long-ppl-viz.png",
+ ):
+ from glob import glob
+
+ import matplotlib.pyplot as plt
+
+ plt.figure(dpi=200, figsize=(10, 6))
+
+ all_res = glob(os.path.join(result_path, "*.json"))
+ for res in all_res:
+ model_name = res.split("/")[-1].split(".")[0]
+ with open(res, "r") as f:
+ data = json.load(f)
+ plt.plot(data.keys(), data.values(), label=model_name)
+
+ plt.legend()
+ plt.grid(True)
+ plt.xlabel("Context Length")
+ plt.ylabel("PPL")
+
+ plt.savefig(output_path)
+ plt.show()
+
+
+if __name__ == "__main__":
+ args = argparse.ArgumentParser()
+ args.add_argument("--model_name", type=str, required=True)
+ args.add_argument(
+ "--attn_type",
+ type=str,
+ choices=[
+ "hf",
+ "streaming",
+ "minference",
+ "dilated1",
+ "dilated2",
+ ],
+ default="hf",
+ )
+ args.add_argument("--do_plot", action="store_true")
+ args.add_argument("--min_seq_length", type=int, default=1_000)
+ args.add_argument("--max_seq_length", type=int, default=100_000)
+ args.add_argument("--intervals", type=int, default=9)
+ args.add_argument("--run_name", type=str, default=None)
+ args.add_argument("--num_eval_examples", type=int, default=100)
+
+ args.add_argument("--topk", type=int, default=-1)
+ args.add_argument("--topk_from_layer", type=int, default=-1)
+ args.add_argument("--topk_dims_file_path", type=str, default=None)
+ args.add_argument("--output_path", type=str, default="results/long-ppl/")
+
+ args = args.parse_args()
+
+ test = LongPPL(
+ model_name=args.model_name,
+ attn_type=args.attn_type,
+ min_context=args.min_seq_length,
+ max_context=args.max_seq_length,
+ intervals=args.intervals,
+ run_name=args.run_name,
+ num_eval_examples=args.num_eval_examples,
+ output_path=args.output_path,
+ topk=args.topk,
+ topk_from_layer=args.topk_from_layer,
+ topk_dims_file_path=args.topk_dims_file_path,
+ )
+ test.start_test()
+
+ if args.do_plot:
+ LongPPL.viz_results("results/long-ppl/")
diff --git a/experiments/ppl/run_ppl.sh b/experiments/ppl/run_ppl.sh
new file mode 100644
index 0000000..3178e72
--- /dev/null
+++ b/experiments/ppl/run_ppl.sh
@@ -0,0 +1,12 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+export TOKENIZERS_PARALLELISM=false
+
+mkdir -p results/long-ppl/
+python3.9 experiments/ppl/run_ppl.py \
+ --model_name gradientai/Llama-3-8B-Instruct-262k \
+ --attn_type minference \
+ --intervals 19 \
+ --num_eval_examples 500 \
+ --output_path results/long-ppl/
diff --git a/experiments/ruler/config_models.sh b/experiments/ruler/config_models.sh
new file mode 100644
index 0000000..bc1e44b
--- /dev/null
+++ b/experiments/ruler/config_models.sh
@@ -0,0 +1,160 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+TEMPERATURE="0.0" # greedy
+TOP_P="1.0"
+TOP_K="32"
+SEQ_LENGTHS=(
+ 4096
+ 8192
+ 16384
+ 32768
+ 65536
+ 131072
+)
+
+MODEL_SELECT() {
+ MODEL_NAME=$1
+ MODEL_DIR=$2
+ ENGINE_DIR=$3
+
+ case $MODEL_NAME in
+ llama2-7b-chat)
+ MODEL_PATH="${MODEL_DIR}/llama2-7b-chat-hf"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="vllm"
+ ;;
+ llama-3-262k)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="vllm"
+ ;;
+ llama-3-262k-minference)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="minference"
+ ;;
+ llama-inf-llm)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="InfLLM"
+ ;;
+ yi-inf-llm)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="InfLLM"
+ ;;
+ llama-3-262k-dilated1)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="dilated1"
+ ;;
+ MInference6KoP)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="MInference6KoP"
+ ;;
+ MInference6KOPYi)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="MInference6KOPYi"
+ ;;
+ llama-3-262k-streaming)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="dilated1"
+ ;;
+ Yi-dilated1)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="dilated1"
+ ;;
+ Yi-dilated2)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="dilated2"
+ ;;
+ Yi-static)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="YiStatic"
+ ;;
+ MInference6KOPYi)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="MInference6KOPYi"
+ ;;
+ llama-static)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="LlamaStatic"
+ ;;
+ Yi-streaming)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="streaming"
+ ;;
+ OPYiHalfV2)
+ MODEL_PATH="01-ai/Yi-9B-200K"
+ MODEL_TEMPLATE_TYPE="meta-chat"
+ MODEL_FRAMEWORK="OPYiHalfV2"
+ ;;
+ llama-3-262k-dilated2)
+ MODEL_PATH="gradientai/Llama-3-8B-Instruct-262k"
+ MODEL_TEMPLATE_TYPE="llama-3"
+ MODEL_FRAMEWORK="dilated2"
+ ;;
+ gpt-3.5-turbo)
+ MODEL_PATH="gpt-3.5-turbo-0125"
+ MODEL_TEMPLATE_TYPE="base"
+ MODEL_FRAMEWORK="openai"
+ TOKENIZER_PATH="cl100k_base"
+ TOKENIZER_TYPE="openai"
+ OPENAI_API_KEY=""
+ AZURE_ID=""
+ AZURE_SECRET=""
+ AZURE_ENDPOINT=""
+ ;;
+ gpt-4-turbo)
+ MODEL_PATH="gpt-4"
+ MODEL_TEMPLATE_TYPE="base"
+ MODEL_FRAMEWORK="openai"
+ TOKENIZER_PATH="cl100k_base"
+ TOKENIZER_TYPE="openai"
+ OPENAI_API_KEY=""
+ AZURE_ID=""
+ AZURE_SECRET=""
+ AZURE_ENDPOINT=""
+ ;;
+ gemini_1.0_pro)
+ MODEL_PATH="gemini-1.0-pro-latest"
+ MODEL_TEMPLATE_TYPE="base"
+ MODEL_FRAMEWORK="gemini"
+ TOKENIZER_PATH=$MODEL_PATH
+ TOKENIZER_TYPE="gemini"
+ GEMINI_API_KEY=""
+ ;;
+ gemini_1.5_pro)
+ MODEL_PATH="gemini-1.5-pro-latest"
+ MODEL_TEMPLATE_TYPE="base"
+ MODEL_FRAMEWORK="gemini"
+ TOKENIZER_PATH=$MODEL_PATH
+ TOKENIZER_TYPE="gemini"
+ GEMINI_API_KEY=""
+ ;;
+ esac
+
+
+ if [ -z "${TOKENIZER_PATH}" ]; then
+ if [ -f ${MODEL_PATH}/tokenizer.model ]; then
+ TOKENIZER_PATH=${MODEL_PATH}/tokenizer.model
+ TOKENIZER_TYPE="nemo"
+ else
+ TOKENIZER_PATH=${MODEL_PATH}
+ TOKENIZER_TYPE="hf"
+ fi
+ fi
+
+
+ echo "$MODEL_PATH:$MODEL_TEMPLATE_TYPE:$MODEL_FRAMEWORK:$TOKENIZER_PATH:$TOKENIZER_TYPE:$OPENAI_API_KEY:$GEMINI_API_KEY:$AZURE_ID:$AZURE_SECRET:$AZURE_ENDPOINT"
+}
diff --git a/experiments/ruler/config_tasks.sh b/experiments/ruler/config_tasks.sh
new file mode 100644
index 0000000..0aeaecf
--- /dev/null
+++ b/experiments/ruler/config_tasks.sh
@@ -0,0 +1,35 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+NUM_SAMPLES=80
+REMOVE_NEWLINE_TAB=false
+STOP_WORDS=""
+
+if [ -z "${STOP_WORDS}" ]; then
+ STOP_WORDS=""
+else
+ STOP_WORDS="--stop_words \"${STOP_WORDS}\""
+fi
+
+if [ "${REMOVE_NEWLINE_TAB}" = false ]; then
+ REMOVE_NEWLINE_TAB=""
+else
+ REMOVE_NEWLINE_TAB="--remove_newline_tab"
+fi
+
+# task name in `synthetic.yaml`
+synthetic=(
+ "niah_single_1"
+ "niah_single_2"
+ "niah_single_3"
+ "niah_multikey_1"
+ "niah_multikey_2"
+ "niah_multikey_3"
+ "niah_multivalue"
+ "niah_multiquery"
+ "vt"
+ "cwe"
+ "fwe"
+ "qa_1"
+ "qa_2"
+)
diff --git a/experiments/ruler/data/prepare.py b/experiments/ruler/data/prepare.py
new file mode 100644
index 0000000..1469658
--- /dev/null
+++ b/experiments/ruler/data/prepare.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Prepare jsonl with field `input` and `outputs`.
+{
+ "index" int,
+ "input": str,
+ "outputs": [str],
+}
+
+python prepare.py \
+ --save_dir ./ \
+ --benchmark synthetic \
+ --task niah_single_1 \
+ --tokenizer_path tokenizer.model \
+ --tokenizer_type nemo \
+ --max_seq_length 4096 \
+ --model_template_type base \
+ --num_samples 10 \
+"""
+import argparse
+import importlib
+import json
+import math
+import os
+import re
+import subprocess
+import time
+from pathlib import Path
+
+import yaml
+from template import Templates
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--benchmark", type=str, default="synthetic", help="Options: [synthetic]"
+)
+parser.add_argument("--task", type=str, required=True, help="tasks in benchmark")
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--num_samples",
+ type=int,
+ default=500,
+ help="maximum number of samples we want to test",
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument(
+ "--model_template_type", type=str, default="base", help="Options in `template.py`"
+)
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+parser.add_argument(
+ "--chunk_idx", type=int, default=0, help="index of current split chunk"
+)
+parser.add_argument("--chunk_amount", type=int, default=1, help="size of split chunk")
+
+args = parser.parse_args()
+
+
+def main():
+ start_time = time.time()
+ curr_folder = os.path.dirname(os.path.abspath(__file__))
+
+ try:
+ module = importlib.import_module(f"{args.benchmark}.constants")
+ except ImportError:
+ print(f"Module data.{args.benchmark}.constants not found.")
+
+ tasks_base = module.TASKS
+ with open(os.path.join(curr_folder, f"../{args.benchmark}.yaml"), "r") as f:
+ tasks_customized = yaml.safe_load(f)
+
+ if args.task not in tasks_customized:
+ raise ValueError(f"{args.task} is not found in config_tasks.yaml")
+
+ config = tasks_customized.get(args.task)
+ config.update(tasks_base[config["task"]])
+
+ # Add templates
+ assert args.model_template_type in Templates, print(
+ f"{args.model_template_type} is not found in {Templates.keys()}"
+ )
+ model_template = Templates[args.model_template_type]
+ task_template = config["template"]
+
+ # Add answer prefix for all models
+ answer_prefix = config["answer_prefix"] if "answer_prefix" in config else ""
+ config["template"] = (
+ model_template.format(task_template=task_template) + answer_prefix
+ )
+
+ # Split task into multiple chunks
+ chunks = [
+ (args.num_samples // args.chunk_amount)
+ + (1 if i < args.num_samples % args.chunk_amount else 0)
+ for i in range(args.chunk_amount)
+ ]
+ num_samples = chunks[args.chunk_idx]
+ pre_samples = sum(chunks[: args.chunk_idx])
+
+ random_seed = 42 + args.chunk_idx
+
+ try:
+ script = os.path.join(curr_folder, args.benchmark, f"{config['task']}.py")
+ additional_args = " ".join([f"--{k} {v}" for k, v in config["args"].items()])
+ command = f"""python {script} \
+ --save_dir {args.save_dir} \
+ --save_name {args.task} \
+ --subset {args.subset} \
+ --tokenizer_path {args.tokenizer_path} \
+ --tokenizer_type {args.tokenizer_type} \
+ --max_seq_length {args.max_seq_length} \
+ --tokens_to_generate {config['tokens_to_generate']} \
+ --num_samples {num_samples} \
+ --random_seed {random_seed} \
+ {additional_args} \
+ {f"--remove_newline_tab" if args.remove_newline_tab else ""} \
+ {f"--pre_samples {pre_samples}" if config['task'] == 'qa' else ""} \
+ --template "{config['template']}"
+ """
+ print(command)
+ result = subprocess.run(
+ command,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+
+ if result.returncode == 0:
+ print("Output:")
+ print(result.stdout)
+ else:
+ print("Error:")
+ print(result.stderr)
+ except subprocess.CalledProcessError as e:
+ print("Error output:", e.stderr)
+
+ save_file = args.save_dir / args.task / f"{args.subset}.jsonl"
+ print(f"Prepare {args.task} with lines: {args.num_samples} to {save_file}")
+ print(f"Used time: {round((time.time() - start_time) / 60, 1)} minutes")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/synthetic/common_words_extraction.py b/experiments/ruler/data/synthetic/common_words_extraction.py
new file mode 100644
index 0000000..3fa1687
--- /dev/null
+++ b/experiments/ruler/data/synthetic/common_words_extraction.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Create a dataset jsonl file for common words extraction.
+
+python common_words_extraction.py \
+ --save_dir=./ \
+ --save_name=vt \
+ --tokenizer_path=tokenizer.model \
+ --tokenizer_type nemo \
+ --max_seq_length 4096 \
+ --tokens_to_generate 30 \
+ --num_samples 10 \
+ --random_seed 42 \
+ -freq_cw 30 --freq_ucw 3 --num_cw 10 \
+ --template "[INST] Below is a numbered list of words. In these words, some appear more often than others. Memorize the ones that appear most often.\n{context}\nQuestion: What are the 10 most common words in the above list? [/INST] Answer: The top 10 words that appear most often in the list are:"
+"""
+
+import argparse
+import os
+import random
+import sys
+from pathlib import Path
+
+import wonderwords
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
+from tokenizer import select_tokenizer
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--save_name", type=str, required=True, help="name of the save dataset jsonl file"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--tokens_to_generate",
+ type=int,
+ required=True,
+ help="expected generated token amount.",
+)
+parser.add_argument(
+ "--num_samples", type=int, required=True, help="number of samples to generate"
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument("--template", type=str, default="", help="prompt template")
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+
+parser.add_argument("--freq_cw", type=int, default=30)
+parser.add_argument("--freq_ucw", type=int, default=3)
+parser.add_argument("--num_cw", type=int, default=10)
+
+args = parser.parse_args()
+random.seed(args.random_seed)
+
+# Load Tokenizer
+TOKENIZER = select_tokenizer(args.tokenizer_type, args.tokenizer_path)
+
+nouns = wonderwords.random_word._get_words_from_text_file("nounlist.txt")
+adjs = wonderwords.random_word._get_words_from_text_file("adjectivelist.txt")
+verbs = wonderwords.random_word._get_words_from_text_file("verblist.txt")
+words = nouns + adjs + verbs
+words = sorted(list(set(words)))
+random.Random(args.random_seed).shuffle(words)
+
+
+def get_example(num_words, common_repeats=30, uncommon_repeats=3, common_nums=10):
+ word_list_full = random.sample(words, num_words)
+ common, uncommon = word_list_full[:common_nums], word_list_full[common_nums:]
+ word_list = common * int(common_repeats) + uncommon * int(uncommon_repeats)
+ random.Random(args.random_seed).shuffle(word_list)
+
+ # Formatting the word list as "1. word1 2. word2 3. word3 ..."
+ context = " ".join([f"{i + 1}. {word}" for i, word in enumerate(word_list)])
+
+ return context, common
+
+
+def generate_input_output(num_words):
+ if args.max_seq_length < 4096:
+ context_example, answer_example = get_example(20, 3, 1, args.num_cw)
+ context, answer = get_example(num_words, 6, 1, args.num_cw)
+ else:
+ context_example, answer_example = get_example(40, 10, 3, args.num_cw)
+ context, answer = get_example(
+ num_words, args.freq_cw, args.freq_ucw, args.num_cw
+ )
+
+ template = args.template
+
+ input_example = template.format(
+ context=context_example,
+ query="",
+ ) + " ".join([f"{i + 1}. {word}" for i, word in enumerate(answer_example)])
+
+ input_text = template.format(
+ context=context,
+ query="",
+ )
+
+ return input_example + "\n" + input_text, answer
+
+
+def sys_word_pair_random(
+ num_samples: int, max_seq_length: int, save_dir: str, incremental: int = 10
+):
+ write_jsons = []
+ tokens_to_generate = args.tokens_to_generate
+
+ # Find the perfect num_words
+ num_words = incremental
+
+ total_tokens = 0
+ while total_tokens + tokens_to_generate < max_seq_length:
+ input_text, answer = generate_input_output(num_words)
+ # Calculate the number of tokens in the example
+ total_tokens = len(
+ TOKENIZER.text_to_tokens(
+ input_text
+ + " "
+ + " ".join([f"{i + 1}. {word}" for i, word in enumerate(answer)])
+ )
+ )
+ print(
+ f"Max length {max_seq_length} | Current length {total_tokens + tokens_to_generate} | Words: {num_words}"
+ )
+ if total_tokens + tokens_to_generate > max_seq_length:
+ num_words -= incremental
+ break
+
+ num_words += incremental
+ if num_words > len(words):
+ num_words = len(words)
+ break
+
+ print("num_words:", num_words)
+
+ # Generate samples
+ for index in tqdm(range(num_samples)):
+ used_words = num_words
+ while True:
+ try:
+ input_text, answer = generate_input_output(used_words)
+ length = len(TOKENIZER.text_to_tokens(input_text)) + tokens_to_generate
+ assert length <= max_seq_length, f"{length} exceeds max_seq_length."
+ break
+ except:
+ if used_words > incremental:
+ used_words -= incremental
+
+ if args.remove_newline_tab:
+ input_text = " ".join(
+ input_text.replace("\n", " ").replace("\t", " ").strip().split()
+ )
+
+ formatted_output = {
+ "index": index,
+ "input": input_text,
+ "outputs": answer,
+ "length": length,
+ }
+ write_jsons.append(formatted_output)
+
+ return write_jsons
+
+
+def main():
+ save_file = args.save_dir / f"{args.save_name}" / f"{args.subset}.jsonl"
+ save_file.parent.mkdir(parents=True, exist_ok=True)
+
+ write_jsons = sys_word_pair_random(
+ num_samples=args.num_samples,
+ max_seq_length=args.max_seq_length,
+ save_dir=args.save_dir,
+ )
+
+ write_manifest(save_file, write_jsons)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/synthetic/constants.py b/experiments/ruler/data/synthetic/constants.py
new file mode 100644
index 0000000..58337ae
--- /dev/null
+++ b/experiments/ruler/data/synthetic/constants.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Add a new task (required arguments):
+
+TASK_NAME: {
+ 'tokens_to_generate': how many tokens we want to generate.
+ 'template': the template with at least {context} and {query}.
+}
+"""
+
+TASKS = {
+ "niah": {
+ "tokens_to_generate": 128,
+ "template": """Some special magic {type_needle_v} are hidden within the following text. Make sure to memorize it. I will quiz you about the {type_needle_v} afterwards.\n{context}\nWhat are all the special magic {type_needle_v} for {query} mentioned in the provided text?""",
+ "answer_prefix": """ The special magic {type_needle_v} for {query} mentioned in the provided text are""",
+ },
+ "variable_tracking": {
+ "tokens_to_generate": 30,
+ "template": """Memorize and track the chain(s) of variable assignment hidden in the following text.\n\n{context}\nQuestion: Find all variables that are assigned the value {query} in the text above.""",
+ "answer_prefix": """ Answer: According to the chain(s) of variable assignment in the text above, {num_v} variables are assgined the value {query}, they are: """,
+ },
+ "common_words_extraction": {
+ "tokens_to_generate": 120,
+ "template": """Below is a numbered list of words. In these words, some appear more often than others. Memorize the ones that appear most often.\n{context}\nQuestion: What are the 10 most common words in the above list?""",
+ "answer_prefix": """ Answer: The top 10 words that appear most often in the list are:""",
+ },
+ "freq_words_extraction": {
+ "tokens_to_generate": 50,
+ "template": """Read the following coded text and track the frequency of each coded word. Find the three most frequently appeared coded words. {context}\nQuestion: Do not provide any explanation. Please ignore the dots '....'. What are the three most frequently appeared words in the above coded text?""",
+ "answer_prefix": """ Answer: According to the coded text above, the three most frequently appeared words are:""",
+ },
+ "qa": {
+ "tokens_to_generate": 32,
+ "template": """Answer the question based on the given documents. Only give me the answer and do not output any other words.\n\nThe following are given documents.\n\n{context}\n\nAnswer the question based on the given documents. Only give me the answer and do not output any other words.\n\nQuestion: {query}""",
+ "answer_prefix": """ Answer:""",
+ },
+}
diff --git a/experiments/ruler/data/synthetic/freq_words_extraction.py b/experiments/ruler/data/synthetic/freq_words_extraction.py
new file mode 100644
index 0000000..40d3afd
--- /dev/null
+++ b/experiments/ruler/data/synthetic/freq_words_extraction.py
@@ -0,0 +1,190 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Create a dataset jsonl file for frequent words extraction.
+
+python freq_words_extraction.py \
+ --save_dir=./ \
+ --save_name=vt \
+ --tokenizer_path=tokenizer.model \
+ --tokenizer_type nemo \
+ --max_seq_length 4096 \
+ --tokens_to_generate 30 \
+ --num_samples 10 \
+ --random_seed 42 \
+ --alpha 2.0 \
+ --template "[INST] Read the following coded text and track the frequency of each coded word. Find the three most frequently appeared coded words. {context}\nQuestion: Do not provide any explanation. Please ignore the dots '....'. What are the three most frequently appeared words in the above coded text? [/INST] Answer: According to the coded text above, the three most frequently appeared words are:"
+"""
+
+import argparse
+import os
+import random
+import string
+import sys
+from pathlib import Path
+
+import numpy as np
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
+from scipy.special import zeta
+from tokenizer import select_tokenizer
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--save_name", type=str, required=True, help="name of the save dataset jsonl file"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--tokens_to_generate", type=int, default=50, help="number of tokens to generate"
+)
+parser.add_argument(
+ "--num_samples", type=int, required=True, help="number of samples to generate"
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument("--template", type=str, default="", help="prompt template")
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+parser.add_argument(
+ "--coded_wordlen", type=int, default=6, help="length of synthetic word"
+)
+parser.add_argument(
+ "--vocab_size", type=int, default=-1, help="synthetic vocab size to sample from"
+)
+parser.add_argument("--alpha", type=float, default=2.0, help="zeta distribution alpha")
+parser.add_argument("--add_fewshot", action="store_true", default=False)
+
+args = parser.parse_args()
+random.seed(args.random_seed)
+np.random.seed(args.random_seed)
+
+# Load Tokenizer
+TOKENIZER = select_tokenizer(args.tokenizer_type, args.tokenizer_path)
+
+
+def generate_input_output(
+ max_len, num_words=-1, coded_wordlen=6, vocab_size=2000, incremental=10, alpha=2.0
+):
+ # generate vocab
+ vocab = [
+ "".join(random.choices(string.ascii_lowercase, k=coded_wordlen))
+ for _ in range(vocab_size)
+ ]
+ while len(set(vocab)) < vocab_size:
+ vocab.append("".join(random.choices(string.ascii_lowercase, k=coded_wordlen)))
+ vocab = sorted(list(set(vocab)))
+ random.Random(args.random_seed).shuffle(vocab)
+ vocab[0] = "..." # treat the top ranked as noise
+
+ # sample words
+ template = args.template
+
+ def gen_text(num_words):
+ k = np.arange(1, len(vocab) + 1)
+ sampled_cnt = num_words * (k**-alpha) / zeta(alpha)
+ sampled_words = [[w] * zi for w, zi in zip(vocab, sampled_cnt.astype(int))]
+ sampled_words = [x for wlst in sampled_words for x in wlst]
+ random.Random(args.random_seed).shuffle(sampled_words)
+ return template.format(context=" ".join(sampled_words), query=""), vocab[1:4]
+
+ if num_words > 0:
+ num_words = num_words
+ text, answer = gen_text(num_words)
+ while len(TOKENIZER.text_to_tokens(text)) > max_len:
+ num_words -= incremental
+ text, answer = gen_text(num_words)
+ else:
+ num_words = max_len // coded_wordlen # init
+ text, answer = gen_text(num_words)
+ while len(TOKENIZER.text_to_tokens(text)) < max_len:
+ num_words += incremental
+ text, answer = gen_text(num_words)
+ num_words -= incremental
+ text, answer = gen_text(num_words)
+ return text, answer, num_words
+
+
+def sys_kwext(num_samples: int, max_seq_length: int, incremental: int = 10):
+ write_jsons = []
+ tokens_to_generate = args.tokens_to_generate
+
+ vocab_size = max_seq_length // 50 if args.vocab_size == -1 else args.vocab_size
+
+ # get number of words
+ input_max_len = max_seq_length
+ _, _, num_example_words = generate_input_output(
+ input_max_len,
+ coded_wordlen=args.coded_wordlen,
+ vocab_size=vocab_size,
+ incremental=input_max_len // 32,
+ alpha=args.alpha,
+ )
+ print("num_example_words:", num_example_words)
+ # Generate samples
+ for index in tqdm(range(num_samples)):
+ # construct input
+ input_max_len = max_seq_length
+ input_text, answer, _ = generate_input_output(
+ input_max_len,
+ num_words=num_example_words,
+ coded_wordlen=args.coded_wordlen,
+ vocab_size=vocab_size,
+ incremental=input_max_len // 32,
+ alpha=args.alpha,
+ )
+
+ length = len(TOKENIZER.text_to_tokens(input_text)) + tokens_to_generate
+
+ if args.remove_newline_tab:
+ input_text = " ".join(
+ input_text.replace("\n", " ").replace("\t", " ").strip().split()
+ )
+
+ formatted_output = {
+ "index": index,
+ "input": input_text,
+ "outputs": answer,
+ "length": length,
+ }
+ write_jsons.append(formatted_output)
+
+ return write_jsons
+
+
+def main():
+ save_file = args.save_dir / f"{args.save_name}" / f"{args.subset}.jsonl"
+ save_file.parent.mkdir(parents=True, exist_ok=True)
+ write_jsons = sys_kwext(
+ num_samples=args.num_samples, max_seq_length=args.max_seq_length, incremental=10
+ )
+
+ write_manifest(save_file, write_jsons)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/synthetic/json/PaulGrahamEssays_URLs.txt b/experiments/ruler/data/synthetic/json/PaulGrahamEssays_URLs.txt
new file mode 100644
index 0000000..b9a38e4
--- /dev/null
+++ b/experiments/ruler/data/synthetic/json/PaulGrahamEssays_URLs.txt
@@ -0,0 +1,218 @@
+http://www.paulgraham.com/13sentences.html
+http://www.paulgraham.com/5founders.html
+http://www.paulgraham.com/6631327.html
+http://www.paulgraham.com/95.html
+http://www.paulgraham.com/ace.html
+http://www.paulgraham.com/airbnb.html
+http://www.paulgraham.com/airbnbs.html
+http://www.paulgraham.com/alien.html
+http://www.paulgraham.com/altair.html
+http://www.paulgraham.com/ambitious.html
+http://www.paulgraham.com/america.html
+http://www.paulgraham.com/angelinvesting.html
+http://www.paulgraham.com/artistsship.html
+http://www.paulgraham.com/badeconomy.html
+http://www.paulgraham.com/better.html
+http://www.paulgraham.com/bronze.html
+http://www.paulgraham.com/bubble.html
+http://www.paulgraham.com/charisma.html
+http://www.paulgraham.com/cities.html
+http://www.paulgraham.com/college.html
+http://www.paulgraham.com/colleges.html
+http://www.paulgraham.com/conformism.html
+http://www.paulgraham.com/control.html
+http://www.paulgraham.com/convergence.html
+http://www.paulgraham.com/convince.html
+http://www.paulgraham.com/cred.html
+http://www.paulgraham.com/credentials.html
+http://www.paulgraham.com/determination.html
+http://www.paulgraham.com/die.html
+http://www.paulgraham.com/disagree.html
+http://www.paulgraham.com/disc.html
+http://www.paulgraham.com/discover.html
+http://www.paulgraham.com/distraction.html
+http://www.paulgraham.com/divergence.html
+http://www.paulgraham.com/donate.html
+http://www.paulgraham.com/ds.html
+http://www.paulgraham.com/early.html
+http://www.paulgraham.com/earnest.html
+http://www.paulgraham.com/equity.html
+http://www.paulgraham.com/essay.html
+http://www.paulgraham.com/ffb.html
+http://www.paulgraham.com/fh.html
+http://www.paulgraham.com/fix.html
+http://www.paulgraham.com/fn.html
+http://www.paulgraham.com/foundersatwork.html
+http://www.paulgraham.com/fp.html
+http://www.paulgraham.com/fr.html
+http://www.paulgraham.com/fundraising.html
+http://www.paulgraham.com/future.html
+http://www.paulgraham.com/genius.html
+http://www.paulgraham.com/getideas.html
+http://www.paulgraham.com/good.html
+http://www.paulgraham.com/goodart.html
+http://www.paulgraham.com/googles.html
+http://www.paulgraham.com/greatwork.html
+http://www.paulgraham.com/growth.html
+http://www.paulgraham.com/guidetoinvestors.html
+http://www.paulgraham.com/hackernews.html
+http://www.paulgraham.com/head.html
+http://www.paulgraham.com/herd.html
+http://www.paulgraham.com/heresy.html
+http://www.paulgraham.com/heroes.html
+http://www.paulgraham.com/highres.html
+http://www.paulgraham.com/hiresfund.html
+http://www.paulgraham.com/hiring.html
+http://www.paulgraham.com/hp.html
+http://www.paulgraham.com/hs.html
+http://www.paulgraham.com/hundred.html
+http://www.paulgraham.com/hw.html
+http://www.paulgraham.com/hwh.html
+http://www.paulgraham.com/icad.html
+http://www.paulgraham.com/ideas.html
+http://www.paulgraham.com/identity.html
+http://www.paulgraham.com/ineq.html
+http://www.paulgraham.com/inequality.html
+http://www.paulgraham.com/investors.html
+http://www.paulgraham.com/invtrend.html
+http://www.paulgraham.com/javacover.html
+http://www.paulgraham.com/jessica.html
+http://www.paulgraham.com/judgement.html
+http://www.paulgraham.com/kate.html
+http://www.paulgraham.com/kids.html
+http://www.paulgraham.com/ladder.html
+http://www.paulgraham.com/lesson.html
+http://www.paulgraham.com/lies.html
+http://www.paulgraham.com/lwba.html
+http://www.paulgraham.com/mac.html
+http://www.paulgraham.com/makersschedule.html
+http://www.paulgraham.com/marginal.html
+http://www.paulgraham.com/maybe.html
+http://www.paulgraham.com/mean.html
+http://www.paulgraham.com/microsoft.html
+http://www.paulgraham.com/mit.html
+http://www.paulgraham.com/name.html
+http://www.paulgraham.com/nerds.html
+http://www.paulgraham.com/newthings.html
+http://www.paulgraham.com/noob.html
+http://www.paulgraham.com/noop.html
+http://www.paulgraham.com/notnot.html
+http://www.paulgraham.com/nov.html
+http://www.paulgraham.com/nthings.html
+http://www.paulgraham.com/opensource.html
+http://www.paulgraham.com/organic.html
+http://www.paulgraham.com/orth.html
+http://www.paulgraham.com/own.html
+http://www.paulgraham.com/patentpledge.html
+http://www.paulgraham.com/pgh.html
+http://www.paulgraham.com/pinch.html
+http://www.paulgraham.com/polls.html
+http://www.paulgraham.com/power.html
+http://www.paulgraham.com/prcmc.html
+http://www.paulgraham.com/procrastination.html
+http://www.paulgraham.com/progbot.html
+http://www.paulgraham.com/prop62.html
+http://www.paulgraham.com/property.html
+http://www.paulgraham.com/publishing.html
+http://www.paulgraham.com/pypar.html
+http://www.paulgraham.com/ramenprofitable.html
+http://www.paulgraham.com/randomness.html
+http://www.paulgraham.com/re.html
+http://www.paulgraham.com/read.html
+http://www.paulgraham.com/real.html
+http://www.paulgraham.com/really.html
+http://www.paulgraham.com/relres.html
+http://www.paulgraham.com/revolution.html
+http://www.paulgraham.com/richnow.html
+http://www.paulgraham.com/road.html
+http://www.paulgraham.com/ronco.html
+http://www.paulgraham.com/safe.html
+http://www.paulgraham.com/say.html
+http://www.paulgraham.com/schlep.html
+http://www.paulgraham.com/seesv.html
+http://www.paulgraham.com/segway.html
+http://www.paulgraham.com/selfindulgence.html
+http://www.paulgraham.com/sfp.html
+http://www.paulgraham.com/simply.html
+http://www.paulgraham.com/smart.html
+http://www.paulgraham.com/softwarepatents.html
+http://www.paulgraham.com/spam.html
+http://www.paulgraham.com/speak.html
+http://www.paulgraham.com/start.html
+http://www.paulgraham.com/startupfunding.html
+http://www.paulgraham.com/startuphubs.html
+http://www.paulgraham.com/startupideas.html
+http://www.paulgraham.com/startupmistakes.html
+http://www.paulgraham.com/stuff.html
+http://www.paulgraham.com/superlinear.html
+http://www.paulgraham.com/swan.html
+http://www.paulgraham.com/tablets.html
+http://www.paulgraham.com/talk.html
+http://www.paulgraham.com/taste.html
+http://www.paulgraham.com/think.html
+http://www.paulgraham.com/top.html
+http://www.paulgraham.com/trolls.html
+http://www.paulgraham.com/twitter.html
+http://www.paulgraham.com/usa.html
+http://www.paulgraham.com/users.html
+http://www.paulgraham.com/venturecapital.html
+http://www.paulgraham.com/wealth.html
+http://www.paulgraham.com/webstartups.html
+http://www.paulgraham.com/whyyc.html
+http://www.paulgraham.com/word.html
+http://www.paulgraham.com/words.html
+http://www.paulgraham.com/work.html
+http://www.paulgraham.com/writing44.html
+http://www.paulgraham.com/wtax.html
+http://www.paulgraham.com/yahoo.html
+http://www.paulgraham.com/ycombinator.html
+http://www.paulgraham.com/ycstart.html
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/addiction.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/aord.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/apple.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/avg.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/before.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/bias.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/boss.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/copy.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/corpdev.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/desres.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/diff.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/ecw.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/founders.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/foundervisa.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/gap.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/gba.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/gh.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/goodtaste.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/hubs.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/iflisp.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/island.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/know.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/langdes.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/laundry.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/love.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/mod.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/newideas.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/nft.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/philosophy.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/popular.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/pow.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/rootsoflisp.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/rss.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/siliconvalley.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/startuplessons.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/submarine.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/sun.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/superangels.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/todo.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/unions.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/useful.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/vb.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/vcsqueeze.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/vw.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/want.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/web20.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/weird.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/wisdom.txt
+https://github.com/gkamradt/LLMTest_NeedleInAHaystack/raw/main/needlehaystack/PaulGrahamEssays/worked.txt
diff --git a/experiments/ruler/data/synthetic/json/download_paulgraham_essay.py b/experiments/ruler/data/synthetic/json/download_paulgraham_essay.py
new file mode 100644
index 0000000..8c2cea4
--- /dev/null
+++ b/experiments/ruler/data/synthetic/json/download_paulgraham_essay.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import glob
+import json
+import os
+import shutil
+import urllib.request
+
+import html2text
+from bs4 import BeautifulSoup
+from tqdm import tqdm
+
+temp_folder_repo = "essay_repo"
+temp_folder_html = "essay_html"
+os.makedirs(temp_folder_repo, exist_ok=True)
+os.makedirs(temp_folder_html, exist_ok=True)
+
+h = html2text.HTML2Text()
+h.ignore_images = True
+h.ignore_tables = True
+h.escape_all = True
+h.reference_links = False
+h.mark_code = False
+
+with open("PaulGrahamEssays_URLs.txt") as f:
+ urls = [line.strip() for line in f]
+
+for url in tqdm(urls):
+ if ".html" in url:
+ filename = url.split("/")[-1].replace(".html", ".txt")
+ try:
+ with urllib.request.urlopen(url) as website:
+ content = website.read().decode("unicode_escape", "utf-8")
+ soup = BeautifulSoup(content, "html.parser")
+ specific_tag = soup.find("font")
+ parsed = h.handle(str(specific_tag))
+
+ with open(os.path.join(temp_folder_html, filename), "w") as file:
+ file.write(parsed)
+
+ except Exception as e:
+ print(f"Fail download {filename}, ({e})")
+
+ else:
+ filename = url.split("/")[-1]
+ try:
+ with urllib.request.urlopen(url) as website:
+ content = website.read().decode("utf-8")
+
+ with open(os.path.join(temp_folder_repo, filename), "w") as file:
+ file.write(content)
+
+ except Exception as e:
+ print(f"Fail download {filename}, ({e})")
+
+files_repo = glob.glob(os.path.join(temp_folder_repo, "*.txt"))
+files_html = glob.glob(os.path.join(temp_folder_html, "*.txt"))
+print(
+ f"Download {len(files_repo)} essays from `https://github.com/gkamradt/LLMTest_NeedleInAHaystack/`"
+)
+print(f"Download {len(files_html)} essays from `http://www.paulgraham.com/`")
+
+text = ""
+for file in files_repo + files_html:
+ with open(file, "r") as f:
+ text += f.read()
+
+with open("PaulGrahamEssays.json", "w") as f:
+ json.dump({"text": text}, f)
+
+
+shutil.rmtree(temp_folder_repo)
+shutil.rmtree(temp_folder_html)
diff --git a/experiments/ruler/data/synthetic/json/download_qa_dataset.sh b/experiments/ruler/data/synthetic/json/download_qa_dataset.sh
new file mode 100644
index 0000000..1faf4fc
--- /dev/null
+++ b/experiments/ruler/data/synthetic/json/download_qa_dataset.sh
@@ -0,0 +1,5 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O squad.json
+wget http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_distractor_v1.json -O hotpotqa.json
diff --git a/experiments/ruler/data/synthetic/niah.py b/experiments/ruler/data/synthetic/niah.py
new file mode 100644
index 0000000..b6555f7
--- /dev/null
+++ b/experiments/ruler/data/synthetic/niah.py
@@ -0,0 +1,332 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Create a dataset jsonl file for needle in a haystack.
+
+python niah.py \
+ --save_dir=./ \
+ --save_name=niah_single \
+ --tokenizer_path=tokenizer.model \
+ --tokenizer_type=nemo \
+ --max_seq_length=4096 \
+ --tokens_to_generate=128 \
+ --num_samples=10 \
+ --template="Some special magic {type_needle_v} are hidden within the following text. Make sure to memorize it. I will quiz you about the {type_needle_v} afterwards.\n{context}\nWhat are all the special magic {type_needle_v} for {query} mentioned in the provided text? The special magic {type_needle_v} for {query} mentioned in the provided text are"
+"""
+import argparse
+import importlib
+import json
+import os
+import random
+import re
+import sys
+import uuid
+from pathlib import Path
+
+import numpy as np
+import wonderwords
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
+from nltk.tokenize import sent_tokenize
+from tokenizer import select_tokenizer
+
+parser = argparse.ArgumentParser()
+# Basic Configurations
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--save_name", type=str, required=True, help="name of the save dataset jsonl file"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--tokens_to_generate",
+ type=int,
+ required=True,
+ help="expected generated token amount.",
+)
+parser.add_argument(
+ "--num_samples", type=int, required=True, help="number of samples to generate"
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument("--template", type=str, default="", help="prompt template")
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+
+# Complexity Configurations
+parser.add_argument("--num_needle_k", type=int, default=1)
+parser.add_argument("--num_needle_v", type=int, default=1)
+parser.add_argument("--num_needle_q", type=int, default=1)
+parser.add_argument(
+ "--type_haystack",
+ type=str,
+ default="essay",
+ help="[Options] repeat, essay, needle.",
+)
+parser.add_argument(
+ "--type_needle_k",
+ type=str,
+ default="words",
+ help="[Options] numbers, words, uuids.",
+)
+parser.add_argument(
+ "--type_needle_v",
+ type=str,
+ default="numbers",
+ help="[Options] numbers, words, uuids.",
+)
+
+args = parser.parse_args()
+random.seed(args.random_seed)
+np.random.seed(args.random_seed)
+args.num_needle_k = max(args.num_needle_k, args.num_needle_q)
+
+# Load Tokenizer
+TOKENIZER = select_tokenizer(args.tokenizer_type, args.tokenizer_path)
+
+# Define Needle/Haystack Format
+needle = "One of the special magic {type_needle_v} for {key} is: {value}."
+if args.type_haystack == "essay":
+ essay = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "json/PaulGrahamEssays.json"
+ )
+ essay = json.load(open(essay))["text"]
+ haystack = re.sub(r"\s+", " ", essay).split(" ")
+elif args.type_haystack == "repeat":
+ haystack = "The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again."
+elif args.type_haystack == "needle":
+ haystack = needle
+else:
+ raise NotImplementedError(f"{args.type_haystack} is not implemented.")
+
+
+# Words
+nouns = wonderwords.random_word._get_words_from_text_file("nounlist.txt")
+adjs = wonderwords.random_word._get_words_from_text_file("adjectivelist.txt")
+# verbs = wonderwords.random_word._get_words_from_text_file("verblist.txt")
+words = [f"{adj}-{noun}" for adj in adjs for noun in nouns]
+words = sorted(list(set(words)))
+
+
+# Positions
+DEPTHS = list(np.round(np.linspace(0, 100, num=40, endpoint=True)).astype(int))
+
+
+def generate_random_number(num_digits=7):
+ lower_bound = 10 ** (num_digits - 1)
+ upper_bound = 10**num_digits - 1
+ return str(random.randint(lower_bound, upper_bound))
+
+
+def generate_random_word():
+ word = random.choice(words)
+ return word
+
+
+def generate_random_uuid():
+ return str(uuid.UUID(int=random.getrandbits(128), version=4))
+
+
+def generate_random(type_needle: str):
+ if type_needle == "numbers":
+ return generate_random_number()
+ elif type_needle == "words":
+ return generate_random_word()
+ elif type_needle == "uuids":
+ return generate_random_uuid()
+ else:
+ raise NotImplementedError(f"{args.type_needle} is not implemented.")
+
+
+def generate_input_output(num_haystack):
+ keys, values, needles = [], [], []
+ for _ in range(args.num_needle_k):
+ keys.append(generate_random(args.type_needle_k))
+ value = []
+ for _ in range(args.num_needle_v):
+ value.append(generate_random(args.type_needle_v))
+ needles.append(
+ needle.format(
+ type_needle_v=args.type_needle_v,
+ key=keys[-1],
+ value=value[-1],
+ )
+ )
+ values.append(value)
+
+ random.Random(args.random_seed).shuffle(needles)
+
+ # Context
+ if args.type_haystack == "essay":
+ text = " ".join(haystack[:num_haystack])
+ document_sents = sent_tokenize(text.strip())
+ insertion_positions = (
+ [0]
+ + sorted(
+ [
+ int(len(document_sents) * (depth / 100))
+ for depth in random.sample(DEPTHS, len(needles))
+ ]
+ )
+ + [len(document_sents)]
+ )
+ document_sents_list = []
+ for i in range(1, len(insertion_positions)):
+ last_pos = insertion_positions[i - 1]
+ next_pos = insertion_positions[i]
+ document_sents_list.append(" ".join(document_sents[last_pos:next_pos]))
+ if i - 1 < len(needles):
+ document_sents_list.append(needles[i - 1])
+ context = " ".join(document_sents_list)
+
+ else:
+ if args.type_haystack == "repeat":
+ sentences = [haystack] * num_haystack
+ elif args.type_haystack == "needle":
+ sentences = [
+ haystack.format(
+ type_needle_v=args.type_needle_v,
+ key=generate_random(args.type_needle_k),
+ value=generate_random(args.type_needle_v),
+ )
+ for _ in range(num_haystack)
+ ]
+
+ indexes = sorted(random.sample(range(num_haystack), len(needles)), reverse=True)
+ for index, element in zip(indexes, needles):
+ sentences.insert(index, element)
+ context = "\n".join(sentences)
+
+ ## Query and Answer
+ indices = random.sample(range(args.num_needle_k), args.num_needle_q)
+ queries = [keys[i] for i in indices]
+ answers = [a for i in indices for a in values[i]]
+ query = (
+ ", ".join(queries[:-1]) + ", and " + queries[-1]
+ if len(queries) > 1
+ else queries[0]
+ )
+
+ template = args.template
+ type_needle_v = args.type_needle_v
+ if args.num_needle_q * args.num_needle_v == 1:
+ template = template.replace("Some", "A")
+ template = template.replace("are all", "is")
+ template = template.replace("are", "is")
+ template = template.replace("answers", "answer")
+ type_needle_v = type_needle_v[:-1] # remove "s"
+
+ input_text = template.format(
+ type_needle_v=type_needle_v,
+ context=context,
+ query=query,
+ )
+
+ return input_text, answers
+
+
+def generate_samples(
+ num_samples: int, max_seq_length: int, save_dir: str, incremental: int = 500
+):
+ write_jsons = []
+ tokens_to_generate = args.tokens_to_generate
+
+ if args.type_haystack == "essay":
+ incremental = 500
+ elif args.type_haystack == "repeat":
+ incremental = 25
+ elif args.type_haystack == "needle":
+ incremental = 25
+
+ if args.type_haystack != "essay" and args.max_seq_length < 4096:
+ incremental = 5
+
+ num_haystack = incremental
+
+ total_tokens = 0 # Track the total tokens generated for the first example
+ while total_tokens + tokens_to_generate < max_seq_length:
+ input_text, answer = generate_input_output(num_haystack)
+ # Calculate the number of tokens in the example
+ total_tokens = len(TOKENIZER.text_to_tokens(input_text + " ".join(answer)))
+ print(
+ f"Max length {max_seq_length} | Current length {total_tokens + tokens_to_generate} | Haystack: {num_haystack}"
+ )
+ if total_tokens + tokens_to_generate > max_seq_length:
+ num_haystack -= incremental
+ break
+
+ if args.type_haystack == "essay" and num_haystack > len(haystack):
+ num_haystack = len(haystack)
+ break
+
+ num_haystack += incremental
+
+ print("Num haystack:", num_haystack)
+
+ # Generate samples
+ for index in tqdm(range(num_samples)):
+ used_haystack = num_haystack
+ while True:
+ try:
+ input_text, answer = generate_input_output(used_haystack)
+ length = len(TOKENIZER.text_to_tokens(input_text)) + tokens_to_generate
+ assert length <= max_seq_length, f"{length} exceeds max_seq_length."
+ break
+ except:
+ if used_haystack > incremental:
+ used_haystack -= incremental
+
+ if args.remove_newline_tab:
+ input_text = " ".join(
+ input_text.replace("\n", " ").replace("\t", " ").strip().split()
+ )
+
+ formatted_output = {
+ "index": index,
+ "input": input_text,
+ "outputs": answer,
+ "length": length,
+ }
+ write_jsons.append(formatted_output)
+
+ return write_jsons
+
+
+def main():
+ save_file = args.save_dir / f"{args.save_name}" / f"{args.subset}.jsonl"
+ save_file.parent.mkdir(parents=True, exist_ok=True)
+
+ write_jsons = generate_samples(
+ num_samples=args.num_samples,
+ max_seq_length=args.max_seq_length,
+ save_dir=args.save_dir,
+ )
+
+ write_manifest(save_file, write_jsons)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/synthetic/qa.py b/experiments/ruler/data/synthetic/qa.py
new file mode 100644
index 0000000..8f0c038
--- /dev/null
+++ b/experiments/ruler/data/synthetic/qa.py
@@ -0,0 +1,262 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Create a dataset jsonl file for QA task.
+
+python qa.py \
+ --save_dir=./ \
+ --save_name=niah_single \
+ --tokenizer_path=tokenizer.model \
+ --tokenizer_type=nemo \
+ --max_seq_length=4096 \
+ --tokens_to_generate=128 \
+ --num_samples=10 \
+ --template="Answer the question based on the given documents. Only give me the answer and do not output any other words.\n\nThe following are given documents.\n\n{context}\n\nAnswer the question based on the given documents. Only give me the answer and do not output any other words.\n\nQuestion: {query} Answer:"
+"""
+import argparse
+import json
+import os
+import random
+import re
+import sys
+from pathlib import Path
+
+import numpy as np
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
+from tokenizer import select_tokenizer
+
+parser = argparse.ArgumentParser()
+# Basic Configurations
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--save_name", type=str, required=True, help="name of the save dataset jsonl file"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--tokens_to_generate",
+ type=int,
+ required=True,
+ help="expected generated token amount.",
+)
+parser.add_argument(
+ "--num_samples", type=int, required=True, help="number of samples to generate"
+)
+parser.add_argument(
+ "--pre_samples", type=int, default=0, help="number of samples are already generated"
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument("--template", type=str, required=True, help="prompt template")
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+
+# Complexity Configurations
+parser.add_argument("--dataset", type=str, required=True, help="dataset file")
+
+args = parser.parse_args()
+random.seed(args.random_seed)
+np.random.seed(args.random_seed)
+
+# Load Tokenizer
+TOKENIZER = select_tokenizer(args.tokenizer_type, args.tokenizer_path)
+
+
+# Read SQuAD QA dataset
+def read_squad(file):
+ with open(file) as f:
+ data = json.load(f)
+
+ total_docs = [p["context"] for d in data["data"] for p in d["paragraphs"]]
+ total_docs = sorted(list(set(total_docs)))
+ total_docs_dict = {c: idx for idx, c in enumerate(total_docs)}
+
+ total_qas = []
+ for d in data["data"]:
+ more_docs = [total_docs_dict[p["context"]] for p in d["paragraphs"]]
+ for p in d["paragraphs"]:
+ for qas in p["qas"]:
+ if not qas["is_impossible"]:
+ total_qas.append(
+ {
+ "query": qas["question"],
+ "outputs": [a["text"] for a in qas["answers"]],
+ "context": [total_docs_dict[p["context"]]],
+ "more_context": [
+ idx
+ for idx in more_docs
+ if idx != total_docs_dict[p["context"]]
+ ],
+ }
+ )
+
+ return total_qas, total_docs
+
+
+# Read Hotpot QA dataset
+def read_hotpotqa(file):
+ with open(file) as f:
+ data = json.load(f)
+
+ total_docs = [f"{t}\n{''.join(p)}" for d in data for t, p in d["context"]]
+ total_docs = sorted(list(set(total_docs)))
+ total_docs_dict = {c: idx for idx, c in enumerate(total_docs)}
+
+ total_qas = []
+ for d in data:
+ total_qas.append(
+ {
+ "query": d["question"],
+ "outputs": [d["answer"]],
+ "context": [
+ total_docs_dict[f"{t}\n{''.join(p)}"] for t, p in d["context"]
+ ],
+ }
+ )
+
+ return total_qas, total_docs
+
+
+DOCUMENT_PROMPT = "Document {i}:\n{document}"
+if args.dataset == "squad":
+ QAS, DOCS = read_squad(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "json/squad.json")
+ )
+elif args.dataset == "hotpotqa":
+ QAS, DOCS = read_hotpotqa(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "json/hotpotqa.json")
+ )
+else:
+ raise NotImplementedError(f"{args.dataset} is not implemented.")
+
+
+def generate_input_output(index, num_docs):
+ curr_q = QAS[index]["query"]
+ curr_a = QAS[index]["outputs"]
+ curr_docs = QAS[index]["context"]
+ curr_more = QAS[index].get("more_context", [])
+ if num_docs < len(DOCS):
+ if (num_docs - len(curr_docs)) > len(curr_more):
+ addition_docs = [
+ i for i, d in enumerate(DOCS) if i not in curr_docs + curr_more
+ ]
+ all_docs = (
+ curr_docs
+ + curr_more
+ + random.sample(
+ addition_docs, max(0, num_docs - len(curr_docs) - len(curr_more))
+ )
+ )
+ else:
+ all_docs = curr_docs + random.sample(curr_more, num_docs - len(curr_docs))
+
+ all_docs = [DOCS[idx] for idx in all_docs]
+ else:
+ all_docs = DOCS
+
+ random.Random(args.random_seed).shuffle(all_docs)
+
+ context = "\n\n".join(
+ [DOCUMENT_PROMPT.format(i=i + 1, document=d) for i, d in enumerate(all_docs)]
+ )
+ input_text = args.template.format(context=context, query=curr_q)
+ return input_text, curr_a
+
+
+def generate_samples(
+ num_samples: int, max_seq_length: int, save_dir: str, incremental: int = 10
+):
+ write_jsons = []
+ tokens_to_generate = args.tokens_to_generate
+
+ # Find the perfect num_docs
+ num_docs = incremental
+
+ total_tokens = 0 # Track the total tokens generated for this example
+ while total_tokens + tokens_to_generate < max_seq_length:
+ input_text, answer = generate_input_output(0, num_docs)
+ # Calculate the number of tokens in the example
+ total_tokens = len(TOKENIZER.text_to_tokens(input_text + f" {answer}"))
+ print(
+ f"Max length {max_seq_length} | Current length {total_tokens + tokens_to_generate} | Docs: {num_docs}"
+ )
+ if total_tokens + tokens_to_generate > max_seq_length:
+ num_docs -= incremental
+ break
+
+ num_docs += incremental
+ if num_docs > len(DOCS):
+ num_docs = len(DOCS)
+ break
+ print("Number of documents:", num_docs)
+
+ # Generate samples
+ for index in tqdm(range(num_samples)):
+ used_docs = num_docs
+ while True:
+ try:
+ input_text, answer = generate_input_output(
+ index + args.pre_samples, used_docs
+ )
+ length = len(TOKENIZER.text_to_tokens(input_text)) + tokens_to_generate
+ assert length <= max_seq_length, f"{length} exceeds max_seq_length."
+ break
+ except:
+ if used_docs > incremental:
+ used_docs -= incremental
+
+ if args.remove_newline_tab:
+ input_text = " ".join(
+ input_text.replace("\n", " ").replace("\t", " ").strip().split()
+ )
+
+ formatted_output = {
+ "index": index,
+ "input": input_text,
+ "outputs": answer,
+ "length": length,
+ }
+ write_jsons.append(formatted_output)
+
+ return write_jsons
+
+
+def main():
+ save_file = args.save_dir / f"{args.save_name}" / f"{args.subset}.jsonl"
+ save_file.parent.mkdir(parents=True, exist_ok=True)
+
+ write_jsons = generate_samples(
+ num_samples=args.num_samples,
+ max_seq_length=args.max_seq_length,
+ save_dir=args.save_dir,
+ )
+
+ write_manifest(save_file, write_jsons)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/synthetic/variable_tracking.py b/experiments/ruler/data/synthetic/variable_tracking.py
new file mode 100644
index 0000000..f5b736b
--- /dev/null
+++ b/experiments/ruler/data/synthetic/variable_tracking.py
@@ -0,0 +1,261 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Create a dataset jsonl file for variable tracking.
+
+python variable_tracking.py \
+ --save_dir=./ \
+ --save_name=vt \
+ --tokenizer_path=tokenizer.model \
+ --tokenizer_type nemo \
+ --max_seq_length 4096 \
+ --tokens_to_generate 30 \
+ --num_samples 10 \
+ --random_seed 42 \
+ --num_chains 1 --num_hops 4 \
+ --template "[INST] Memorize and track the chain(s) of variable assignment hidden in the following text.\n\n{context}\nQuestion: Find all variables that are assigned the value {query} in the text above. [/INST] Answer: According to the chain(s) of variable assignment in the text above, {num_v} variables are assgined the value {query}, they are: "
+"""
+import argparse
+import os
+import random
+import string
+import sys
+from pathlib import Path
+
+from constants import TASKS
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
+import numpy as np
+from tokenizer import select_tokenizer
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--save_dir", type=Path, required=True, help="dataset folder to save dataset"
+)
+parser.add_argument(
+ "--save_name", type=str, required=True, help="name of the save dataset jsonl file"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--tokenizer_path", type=str, required=True, help="path to the tokenizer model"
+)
+parser.add_argument(
+ "--tokenizer_type", type=str, default="nemo", help="[Options] nemo, hf, openai."
+)
+parser.add_argument(
+ "--max_seq_length",
+ type=int,
+ required=True,
+ help="max sequence length including all input tokens and generated tokens.",
+)
+parser.add_argument(
+ "--tokens_to_generate", type=int, default=120, help="number of tokens to generate"
+)
+parser.add_argument(
+ "--num_samples", type=int, required=True, help="number of samples to generate"
+)
+parser.add_argument("--random_seed", type=int, default=42)
+parser.add_argument("--template", type=str, default="", help="prompt template")
+parser.add_argument(
+ "--remove_newline_tab",
+ action="store_true",
+ help="remove `\n` and `\t` in all strings.",
+)
+
+parser.add_argument(
+ "--num_chains", type=int, default=1, help="number of inserted variable chains"
+)
+parser.add_argument(
+ "--num_hops", type=int, default=4, help="number of hops in each chain"
+)
+parser.add_argument("--add_fewshot", action="store_true", default=False)
+
+args = parser.parse_args()
+random.seed(args.random_seed)
+np.random.seed(args.random_seed)
+
+# Load Tokenizer
+TOKENIZER = select_tokenizer(args.tokenizer_type, args.tokenizer_path)
+
+
+def generate_chains(num_chains, num_hops, is_icl=False):
+ vars_all = []
+ k = 5 if not is_icl else 3
+ num_hops = num_hops if not is_icl else min(10, num_hops)
+ vars_all = [
+ "".join(random.choices(string.ascii_uppercase, k=k)).upper()
+ for _ in range((num_hops + 1) * num_chains)
+ ]
+ while len(set(vars_all)) < num_chains * (num_hops + 1):
+ vars_all.append("".join(random.choices(string.ascii_uppercase, k=k)).upper())
+
+ vars_ret = []
+ chains_ret = []
+ for i in range(0, len(vars_all), num_hops + 1):
+ this_vars = vars_all[i : i + num_hops + 1]
+ vars_ret.append(this_vars)
+ this_chain = [f"VAR {this_vars[0]} = {np.random.randint(10000, 99999)}"]
+ for j in range(num_hops):
+ this_chain.append(f"VAR {this_vars[j+1]} = VAR {this_vars[j]} ")
+ chains_ret.append(this_chain)
+ return vars_ret, chains_ret
+
+
+def generate_input_output(num_noises, num_chains, num_hops, is_icl=False):
+ vars, chains = generate_chains(num_chains, num_hops, is_icl=is_icl)
+
+ noise = "The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again.\n"
+
+ # Create a list of the repeated noise
+ sentences = [noise] * num_noises
+ if len(sentences) <= len(chains[0]):
+ sentences = [
+ n + "." if len(n.strip()) > 0 else n
+ for n in [x for noise in sentences for x in noise.split(".")]
+ ]
+ try:
+ assert len(sentences) > len(
+ chains[0]
+ ), "Noises too short, unable to generate data"
+ except:
+ print("reduces chain length for not enough noises")
+ chains = [chain[: len(sentences) - 1] for chain in chains]
+ # sample random positions to insert variable assignment
+ for chain_i in chains:
+ # sample random positions (sorted) to insert variable assignment
+ positions = list(sorted(random.sample(range(len(sentences)), len(chain_i))))
+ for insert_pi, j in zip(positions, range(len(chain_i))):
+ sentences.insert(insert_pi + j, chain_i[j])
+
+ # Insert the passkey sentence at the random position
+ context = " ".join(sentences)
+ context = context.replace(". \n", ".\n")
+
+ template = args.template
+ if is_icl:
+ # remove model template
+ cutoff = template.index(TASKS["variable_tracking"]["template"][:20])
+ cutoff_ans = template.index(TASKS["variable_tracking"]["answer_prefix"][:10])
+ template = (
+ " ".join(template[cutoff:cutoff_ans].split()[:-1]) + template[cutoff_ans:]
+ )
+
+ value = chains[0][0].split("=")[-1].strip()
+ input_text = template.format(context=context, query=value, num_v=num_hops + 1)
+
+ return input_text, vars[0]
+
+
+def sys_vartrack_w_noise_random(
+ num_samples: int,
+ max_seq_length: int,
+ incremental: int = 10,
+ num_chains: int = 1,
+ num_hops: int = 4,
+ add_fewshot: bool = True,
+ icl_example: str = None,
+):
+ write_jsons = []
+ tokens_to_generate = args.tokens_to_generate
+
+ # Find the perfect num_noises
+ num_noises = incremental
+
+ total_tokens = 0 # Track the total tokens generated for this example
+ example_tokens = 0
+ if add_fewshot and (icl_example is not None):
+ icl_example_out = " ".join(icl_example["outputs"])
+ icl_example = icl_example["input"] + " " + icl_example_out + "\n\n"
+ example_tokens = len(TOKENIZER.text_to_tokens(icl_example))
+
+ while total_tokens + tokens_to_generate + example_tokens < max_seq_length:
+ input_text, answer = generate_input_output(
+ num_noises, num_chains, num_hops, is_icl=add_fewshot & (icl_example is None)
+ )
+ # Calculate the number of tokens in the example
+ total_tokens = len(TOKENIZER.text_to_tokens(input_text + f" {answer}"))
+ print(
+ f"Max length {max_seq_length} | Current length {total_tokens + tokens_to_generate + example_tokens} | Noises: {num_noises}"
+ )
+ if total_tokens + tokens_to_generate + example_tokens > max_seq_length:
+ num_noises -= incremental
+ break
+ num_noises += incremental
+ print("Num noises:", num_noises)
+
+ # Generate samples
+ for index in tqdm(range(num_samples)):
+ used_noises = num_noises
+ while True:
+ try:
+ input_text, answer = generate_input_output(
+ num_noises,
+ num_chains,
+ num_hops,
+ is_icl=add_fewshot & (icl_example is None),
+ )
+ length = (
+ len(TOKENIZER.text_to_tokens(input_text))
+ + tokens_to_generate
+ + example_tokens
+ )
+ assert length <= max_seq_length, f"{length} exceeds max_seq_length."
+ break
+ except:
+ if used_noises > incremental:
+ used_noises -= incremental
+
+ if add_fewshot and (icl_example is not None):
+ # insert icl_example between model template and input
+ cutoff = input_text.index(TASKS["variable_tracking"]["template"][:20])
+ input_text = (
+ input_text[:cutoff] + " " + icl_example + "\n\n" + input_text[cutoff:]
+ )
+ if args.remove_newline_tab:
+ input_text = " ".join(
+ input_text.replace("\n", " ").replace("\t", " ").strip().split()
+ )
+
+ formatted_output = {
+ "index": index,
+ "input": input_text,
+ "outputs": answer,
+ "length": length,
+ }
+ write_jsons.append(formatted_output)
+
+ return write_jsons
+
+
+def main():
+ save_file = args.save_dir / f"{args.save_name}" / f"{args.subset}.jsonl"
+ save_file.parent.mkdir(parents=True, exist_ok=True)
+
+ icl_example = sys_vartrack_w_noise_random(
+ num_samples=1,
+ max_seq_length=500,
+ incremental=5,
+ num_chains=args.num_chains,
+ num_hops=args.num_hops,
+ )[0]
+ write_jsons = sys_vartrack_w_noise_random(
+ num_samples=args.num_samples,
+ max_seq_length=args.max_seq_length,
+ num_chains=args.num_chains,
+ num_hops=args.num_hops,
+ icl_example=icl_example,
+ )
+
+ write_manifest(save_file, write_jsons)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/data/template.py b/experiments/ruler/data/template.py
new file mode 100644
index 0000000..07e2170
--- /dev/null
+++ b/experiments/ruler/data/template.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+Templates = {
+ "base": "{task_template}",
+ "meta-chat": "[INST] {task_template} [/INST]",
+ "vicuna-chat": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {task_template} ASSISTANT:",
+ "lwm-chat": "You are a helpful assistant. USER: {task_template} ASSISTANT: ",
+ "command-r-chat": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{task_template}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
+ "chatglm-chat": "[gMASK]sop<|user|> \n {task_template}<|assistant|> \n ",
+ "RWKV": "User: hi\n\nAssistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it\n\nUser: {task_template}\n\nAssistant:",
+ "llama-3": "<|start_header_id|>user<|end_header_id|> {task_template}<|eot_id|><|start_header_id|>assistant<|end_header_id|>",
+ "glm-4": "[gMASK]<|user|>\n {task_template}<|assistant|>\n",
+}
diff --git a/experiments/ruler/data/tokenizer.py b/experiments/ruler/data/tokenizer.py
new file mode 100644
index 0000000..e3c66f1
--- /dev/null
+++ b/experiments/ruler/data/tokenizer.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+
+import os
+from typing import List
+
+from tenacity import retry, stop_after_attempt, wait_fixed, wait_random
+
+
+def select_tokenizer(tokenizer_type, tokenizer_path):
+ if tokenizer_type == "nemo":
+ return NeMoSentencePieceTokenizer(model_path=tokenizer_path)
+ elif tokenizer_type == "hf":
+ return HFTokenizer(model_path=tokenizer_path)
+ elif tokenizer_type == "openai":
+ return OpenAITokenizer(model_path=tokenizer_path)
+ elif tokenizer_type == "gemini":
+ return GeminiTokenizer(model_path=tokenizer_path)
+ else:
+ raise ValueError(f"Unknown tokenizer_type {tokenizer_type}")
+
+
+class NeMoSentencePieceTokenizer:
+ """
+ Tokenizer from NeMo SentencePieceTokenizer
+ """
+
+ def __init__(self, model_path) -> None:
+ from nemo.collections.common.tokenizers.sentencepiece_tokenizer import (
+ SentencePieceTokenizer,
+ )
+
+ self.tokenizer = SentencePieceTokenizer(model_path=model_path)
+
+ def text_to_tokens(self, text: str) -> List[str]:
+ tokens = self.tokenizer.text_to_tokens(text)
+ return tokens
+
+ def tokens_to_text(self, tokens: List[int]) -> str:
+ text = self.tokenizer.tokens_to_text(tokens)
+ return text
+
+
+class HFTokenizer:
+ """
+ Tokenizer from HF models
+ """
+
+ def __init__(self, model_path) -> None:
+ from transformers import AutoTokenizer
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ model_path, trust_remote_code=True
+ )
+
+ def text_to_tokens(self, text: str) -> List[str]:
+ tokens = self.tokenizer.tokenize(text)
+ return tokens
+
+ def tokens_to_text(self, tokens: List[int]) -> str:
+ text = self.tokenizer.convert_tokens_to_string(tokens)
+ return text
+
+
+class OpenAITokenizer:
+ """
+ Tokenizer from tiktoken
+ """
+
+ def __init__(self, model_path="cl100k_base") -> None:
+ import tiktoken
+
+ self.tokenizer = tiktoken.get_encoding(model_path)
+
+ def text_to_tokens(self, text: str) -> List[int]:
+ tokens = self.tokenizer.encode(text)
+ return tokens
+
+ def tokens_to_text(self, tokens: List[int]) -> str:
+ text = self.tokenizer.decode(tokens)
+ return text
+
+
+class GeminiTokenizer:
+ """
+ Tokenizer from gemini
+ """
+
+ def __init__(self, model_path="gemini-1.5-pro-latest") -> None:
+ import google.generativeai as genai
+
+ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
+ self.model = genai.GenerativeModel(model_path)
+
+ @retry(wait=wait_fixed(60) + wait_random(0, 10), stop=stop_after_attempt(3))
+ def text_to_tokens(self, text: str) -> List[int]:
+ tokens = list(range(self.model.count_tokens(text).total_tokens))
+ return tokens
+
+ def tokens_to_text(self, tokens: List[int]) -> str:
+ pass
diff --git a/experiments/ruler/eval/evaluate.py b/experiments/ruler/eval/evaluate.py
new file mode 100644
index 0000000..f61b25a
--- /dev/null
+++ b/experiments/ruler/eval/evaluate.py
@@ -0,0 +1,227 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Get summary.csv with score and null predictions amount.
+
+Running
+```
+python evaluate.py \
+ --data_dir /path/to/your/prediction_jsonl_folder \
+ --benchmark synthetic
+```
+"""
+
+import argparse
+import os
+import re
+
+import nltk
+
+try:
+ nltk.data.find("tokenizers/punkt")
+except LookupError:
+ nltk.download("punkt")
+
+import importlib
+from collections import defaultdict
+from pathlib import Path
+
+import pandas as pd
+import yaml
+from nemo.collections.asr.parts.utils.manifest_utils import (
+ read_manifest,
+ write_manifest,
+)
+from tqdm import tqdm
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--data_dir", type=str, required=True, help="path to the prediction jsonl files"
+)
+parser.add_argument(
+ "--benchmark", type=str, default="synthetic", help="Options: [synthetic]"
+)
+parser.add_argument(
+ "--verbose", type=int, default=0, help="how many lines you want to display."
+)
+args = parser.parse_args()
+
+
+def postprocess_pred(predict_str: str, task_config: dict):
+ predict_str = predict_str.strip()
+
+ # Remove all non-printable characters
+ np_pattern = re.compile(r"[\x00-\x1f]")
+ predict_str = np_pattern.sub("\n", predict_str).strip()
+
+ return predict_str
+
+
+def get_pred_and_ref(
+ predictions_file: str,
+ task_config: dict,
+ input_field: str = "input",
+ references_field: str = "outputs",
+ prediction_field: str = "pred",
+ metadata_field: str = "others",
+):
+ lines = read_manifest(predictions_file)
+
+ inputs = []
+ predicts = []
+ references = []
+ indices = []
+
+ for line in tqdm(lines):
+ input = line[input_field]
+ predict = line[prediction_field]
+ predict = postprocess_pred(predict, task_config)
+ reference = line.get(references_field, [line.get("output", "")])
+ index = line[metadata_field].get("id", line["index"])
+
+ inputs.append(input)
+ predicts.append(predict)
+ references.append(reference)
+ indices.append(index)
+
+ return inputs, predicts, references, indices
+
+
+def run_evaluation_per_task(task_config: dict, predictions_file: str, verbose: int = 0):
+ inputs, predicts, references, indices = get_pred_and_ref(
+ predictions_file=predictions_file,
+ task_config=task_config,
+ )
+
+ task_nulls = f"{sum([len(x)==0 for x in predicts])}/{len(predicts)}"
+
+ if len(references) > 0 and references[0][0] is not None:
+ task_score = task_config["metric_fn"](predicts, references)
+ else:
+ task_score = 0.0
+
+ if verbose != 0:
+ print("=" * 40)
+ for i, (input, reference, predict) in enumerate(
+ zip(inputs, references, predicts)
+ ):
+ print(f"Input : {input}")
+ print(f"Reference : {reference}")
+ print(f"Prediction: {predict}")
+ print("=" * 40)
+ if i > verbose:
+ break
+
+ return task_score, task_nulls, predicts, indices
+
+
+def write_evaluation(results: dict):
+ tasks = list(results.keys())
+ score = [results[task]["score"] for task in tasks]
+ nulls = [results[task]["nulls"] for task in tasks]
+ dfs = [
+ ["Tasks"] + tasks,
+ ["Score"] + score,
+ ["Nulls"] + nulls,
+ ]
+
+ output_file = os.path.join(
+ args.data_dir, "summary.csv" if len(tasks) > 1 else f"summary-{tasks[0]}.csv"
+ )
+ df = pd.DataFrame(dfs)
+ df.to_csv(output_file, index=False)
+ print("\n=============================================\n")
+ print(df)
+ print(f"\nSaved eval results to {output_file}")
+
+
+def write_submission(results: dict):
+ COLUMNS = ["Task", "ID", "Prediction"]
+ dfs = pd.DataFrame(columns=COLUMNS, data=[])
+
+ for task, result in results.items():
+ df = pd.DataFrame(
+ {"Task": task, "ID": result["indices"], "Prediction": result["predicts"]}
+ )
+ dfs = pd.concat((dfs, df[COLUMNS]))
+
+ output_file = os.path.join(args.data_dir, "submission.csv")
+ dfs = dfs.reset_index(drop=True)
+ dfs.to_csv(output_file, index=False)
+ print(f"\nSaved submission results to {output_file}")
+
+
+def aggregate_chunk(folder):
+ jsonl_files = [file for file in os.listdir(folder) if Path(file).suffix == ".jsonl"]
+ chunk_files = sorted(
+ [file for file in jsonl_files if re.match(r".*[^_]+-\d+\.jsonl", file)]
+ )
+ chunk_files_dict = defaultdict(list)
+ for file in chunk_files:
+ task = "-".join(file.split("-")[:-1])
+ chunk_files_dict[task].append(file)
+
+ for task, files in chunk_files_dict.items():
+ lines = []
+ for file in sorted(files):
+ file = os.path.join(folder, file)
+ lines += read_manifest(file)
+ os.remove(file) # Remove chunk files
+ write_manifest(os.path.join(folder, f"{task}.jsonl"), lines)
+
+
+def main():
+ curr_folder = os.path.dirname(os.path.abspath(__file__))
+
+ try:
+ module = importlib.import_module(f"{args.benchmark}.constants")
+ except ImportError:
+ print(f"Module eval.{args.benchmark}.constants not found.")
+
+ tasks_base = module.TASKS
+ with open(os.path.join(curr_folder, f"../{args.benchmark}.yaml"), "r") as f:
+ tasks_customized = yaml.safe_load(f)
+
+ TASKS = tasks_customized
+ for _, config in TASKS.items():
+ config.update(tasks_base[config["task"]])
+
+ print(f"Total tasks: {list(TASKS.keys())}")
+
+ # Aggregate all prediction files
+ aggregate_chunk(args.data_dir)
+
+ # Get scores and nulls
+ jsonl_files = [
+ file for file in os.listdir(args.data_dir) if Path(file).suffix == ".jsonl"
+ ]
+ eval_results = {}
+ subm_results = {}
+
+ for task, config in TASKS.items():
+ if f"{task}.jsonl" not in jsonl_files:
+ print(f"Prediction file {task}.jsonl is not found.")
+ continue
+
+ print(f"Evaluate task {task}...")
+ task_score, task_nulls, predicts, indices = run_evaluation_per_task(
+ predictions_file=os.path.join(args.data_dir, f"{task}.jsonl"),
+ task_config=config,
+ )
+ eval_results[task] = {
+ "score": task_score,
+ "nulls": task_nulls,
+ }
+ subm_results[task] = {
+ "predicts": predicts,
+ "indices": indices,
+ }
+
+ # Write to csv
+ write_evaluation(eval_results)
+ write_submission(subm_results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/eval/synthetic/constants.py b/experiments/ruler/eval/synthetic/constants.py
new file mode 100644
index 0000000..b480d76
--- /dev/null
+++ b/experiments/ruler/eval/synthetic/constants.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Add a new task:
+
+TASK_NAME: {
+ 'metric_fn': the metric function with input (predictions: [str], references: [[str]]) to compute score.
+}
+"""
+
+
+def string_match_part(preds, refs):
+ score = (
+ sum(
+ [
+ max([1.0 if r.lower() in pred.lower() else 0.0 for r in ref])
+ for pred, ref in zip(preds, refs)
+ ]
+ )
+ / len(preds)
+ * 100
+ )
+ return round(score, 2)
+
+
+def string_match_all(preds, refs):
+ score = (
+ sum(
+ [
+ sum([1.0 if r.lower() in pred.lower() else 0.0 for r in ref]) / len(ref)
+ for pred, ref in zip(preds, refs)
+ ]
+ )
+ / len(preds)
+ * 100
+ )
+ return round(score, 2)
+
+
+TASKS = {
+ "niah": {
+ "metric_fn": string_match_all,
+ },
+ "variable_tracking": {
+ "metric_fn": string_match_all,
+ },
+ "common_words_extraction": {
+ "metric_fn": string_match_all,
+ },
+ "freq_words_extraction": {"metric_fn": string_match_all},
+ "qa": {
+ "metric_fn": string_match_part,
+ },
+}
diff --git a/experiments/ruler/pred/call_api.py b/experiments/ruler/pred/call_api.py
new file mode 100644
index 0000000..997ac17
--- /dev/null
+++ b/experiments/ruler/pred/call_api.py
@@ -0,0 +1,343 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Prepare prediction jsonl with field `pred` .
+dataset jsonl:
+{
+ "index" int,
+ "input": str,
+ "outputs": [str],
+}
+
+prediction jsonl:
+{
+ "index" int,
+ "input": str,
+ "outputs": [str],
+ "pred": str,
+}
+"""
+
+import argparse
+import importlib
+import json
+import math
+import os
+import sys
+import threading
+import time
+import traceback
+from pathlib import Path
+
+import yaml
+from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
+from tqdm import tqdm
+
+SERVER_TYPES = (
+ "trtllm",
+ "vllm",
+ "openai",
+ "gemini",
+ "hf",
+ "minference",
+ "streaming",
+ "InfLLM",
+)
+
+
+class ServerAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.server_type = values
+
+
+parser = argparse.ArgumentParser()
+# Data
+parser.add_argument(
+ "--data_dir", type=Path, required=True, help="path to load the dataset jsonl files"
+)
+parser.add_argument(
+ "--save_dir",
+ type=Path,
+ required=True,
+ help="path to save the prediction jsonl files",
+)
+parser.add_argument(
+ "--benchmark", type=str, default="synthetic", help="Options: [synthetic]"
+)
+parser.add_argument(
+ "--task", type=str, required=True, help="Options: tasks in benchmark"
+)
+parser.add_argument(
+ "--subset", type=str, default="validation", help="Options: validation or test"
+)
+parser.add_argument(
+ "--chunk_idx", type=int, default=0, help="index of current split chunk"
+)
+parser.add_argument("--chunk_amount", type=int, default=1, help="size of split chunk")
+
+# Server
+parser.add_argument(
+ "--server_type", default="nemo", action=ServerAction, choices=SERVER_TYPES
+)
+parser.add_argument("--server_host", type=str, default="127.0.0.1")
+parser.add_argument("--server_port", type=str, default="5000")
+parser.add_argument("--ssh_server", type=str)
+parser.add_argument("--ssh_key_path", type=str)
+parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="gpt-3.5-turbo",
+ help="supported models from OpenAI or HF (provide a key or a local path to the checkpoint)",
+)
+
+# Inference
+parser.add_argument("--temperature", type=float, default=1.0)
+parser.add_argument("--top_k", type=int, default=32)
+parser.add_argument("--top_p", type=float, default=1.0)
+parser.add_argument("--random_seed", type=int, default=0)
+parser.add_argument("--stop_words", type=str, default="")
+parser.add_argument("--sliding_window_size", type=int)
+parser.add_argument("--threads", type=int, default=4)
+
+# MInference
+parser.add_argument("--config_path", type=str)
+parser.add_argument("--starting_layer", type=int, default=-1)
+parser.add_argument("--kv_cache_cpu", action="store_true")
+parser.add_argument("--kv_cache_cpu_device", type=str, default="cpu")
+parser.add_argument("--use_snapkv", action="store_true")
+parser.add_argument("--trust_remote_code", action="store_true")
+
+args = parser.parse_args()
+args.stop_words = list(filter(None, args.stop_words.split(",")))
+# if args.server_type == 'hf' or args.server_type == 'gemini' or args.server_type == 'minference':
+if args.server_type in [
+ "hf",
+ "gemini",
+ "minference",
+ "streaming",
+ "InfLLM",
+]:
+ args.threads = 1
+
+
+def get_llm(tokens_to_generate):
+ if args.server_type == "trtllm":
+ from client_wrappers import TRTLLMClient
+
+ llm = TRTLLMClient(
+ server_host=args.server_host,
+ server_port=args.server_port,
+ ssh_server=args.ssh_server,
+ ssh_key_path=args.ssh_key_path,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ random_seed=args.random_seed,
+ stop=args.stop_words,
+ tokens_to_generate=tokens_to_generate,
+ max_attention_window_size=args.sliding_window_size,
+ )
+
+ elif args.server_type == "vllm":
+ from client_wrappers import VLLMClient
+
+ llm = VLLMClient(
+ server_host=args.server_host,
+ server_port=args.server_port,
+ ssh_server=args.ssh_server,
+ ssh_key_path=args.ssh_key_path,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ random_seed=args.random_seed,
+ stop=args.stop_words,
+ tokens_to_generate=tokens_to_generate,
+ )
+
+ elif args.server_type == "openai":
+ from client_wrappers import OpenAIClient
+
+ llm = OpenAIClient(
+ model_name=args.model_name_or_path,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ random_seed=args.random_seed,
+ stop=args.stop_words,
+ tokens_to_generate=tokens_to_generate,
+ )
+
+ elif args.server_type == "gemini":
+ from client_wrappers import GeminiClient
+
+ llm = GeminiClient(
+ model_name=args.model_name_or_path,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ random_seed=args.random_seed,
+ stop=args.stop_words,
+ tokens_to_generate=tokens_to_generate,
+ )
+
+ elif args.server_type == "hf":
+ from model_wrappers import HuggingFaceModel
+
+ llm = HuggingFaceModel(
+ name_or_path=args.model_name_or_path,
+ do_sample=args.temperature > 0,
+ repetition_penalty=1,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ stop=args.stop_words,
+ max_new_tokens=tokens_to_generate,
+ )
+
+ elif args.server_type == "minference":
+ from model_wrappers import MInferenceModel
+
+ llm = MInferenceModel(
+ name_or_path=args.model_name_or_path,
+ do_sample=args.temperature > 0,
+ repetition_penalty=1,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ stop=args.stop_words,
+ max_new_tokens=tokens_to_generate,
+ config_path=args.config_path,
+ kv_cache_cpu=args.kv_cache_cpu,
+ kv_cache_cpu_device=args.kv_cache_cpu_device,
+ use_snapkv=args.use_snapkv,
+ trust_remote_code=args.trust_remote_code,
+ starting_layer=args.starting_layer,
+ )
+
+ elif args.server_type == "InfLLM":
+ from model_wrappers import InfLLM
+
+ llm = InfLLM(
+ name_or_path=args.model_name_or_path,
+ do_sample=args.temperature > 0,
+ repetition_penalty=1,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ stop=args.stop_words,
+ max_new_tokens=tokens_to_generate,
+ )
+
+ elif args.server_type == "streaming":
+ from model_wrappers import Streaming
+
+ llm = Streaming(
+ name_or_path=args.model_name_or_path,
+ do_sample=args.temperature > 0,
+ repetition_penalty=1,
+ temperature=args.temperature,
+ top_k=args.top_k,
+ top_p=args.top_p,
+ stop=args.stop_words,
+ max_new_tokens=tokens_to_generate,
+ config_path=args.config_path,
+ kv_cache_cpu=args.kv_cache_cpu,
+ kv_cache_cpu_device=args.kv_cache_cpu_device,
+ use_snapkv=args.use_snapkv,
+ trust_remote_code=args.trust_remote_code,
+ starting_layer=args.starting_layer,
+ )
+
+ else:
+ raise RuntimeError(f"Unsupported server type {args.server_type}")
+
+ return llm
+
+
+def main():
+ start_time = time.time()
+
+ curr_folder = os.path.dirname(os.path.abspath(__file__))
+
+ try:
+ sys.path.append(os.path.dirname(curr_folder))
+ module = importlib.import_module(f"data.{args.benchmark}.constants")
+ except ImportError:
+ print(f"Module data.{args.benchmark}.constants not found.")
+
+ tasks_base = module.TASKS
+ with open(os.path.join(curr_folder, f"../{args.benchmark}.yaml"), "r") as f:
+ tasks_customized = yaml.safe_load(f)
+
+ if args.task not in tasks_customized:
+ raise ValueError(f"{args.task} is not found in config_tasks.yaml")
+
+ config = tasks_customized.get(args.task)
+ config.update(tasks_base[config["task"]])
+
+ task_file = args.data_dir / args.task / f"{args.subset}.jsonl"
+
+ if args.chunk_amount > 1:
+ pred_file = args.save_dir / f"{args.task}-{args.chunk_idx}.jsonl"
+ else:
+ pred_file = args.save_dir / f"{args.task}.jsonl"
+
+ print(f"Predict {args.task} \nfrom {task_file}\nto {pred_file}")
+ pred_file.parent.mkdir(parents=True, exist_ok=True)
+
+ # Load data
+ if os.path.exists(pred_file):
+ pred_index = [sample["index"] for sample in read_manifest(pred_file)]
+ data = [
+ sample
+ for sample in read_manifest(task_file)
+ if sample["index"] not in pred_index
+ ]
+ else:
+ data = read_manifest(task_file)
+
+ # Load api
+ llm = get_llm(config["tokens_to_generate"])
+
+ def get_output(idx, index, input, outputs, others, truncation, length):
+ while True:
+ try:
+ pred = llm(prompt=input)
+ break
+ except Exception as e:
+ traceback.print_exc()
+
+ if len(pred["text"]) > 0:
+ outputs_parallel[idx] = {
+ "index": index,
+ "pred": pred["text"][0],
+ "input": input,
+ "outputs": outputs,
+ "others": others,
+ "truncation": truncation,
+ "length": length,
+ }
+
+ threads = []
+ outputs_parallel = [{} for _ in range(len(data))]
+ # setting buffering=1 to force to dump the output after every line, so that we can see intermediate generations
+ with open(pred_file, "at", encoding="utf-8", buffering=1) as fout:
+ for idx, data_point in tqdm(enumerate(data), total=len(data)):
+ get_output(
+ idx,
+ data_point["index"],
+ data_point["input"],
+ data_point["outputs"],
+ data_point.get("others", {}),
+ data_point.get("truncation", -1),
+ data_point.get("length", -1),
+ )
+
+ fout.write(json.dumps(outputs_parallel[idx]) + "\n")
+
+ print(f"Used time: {round((time.time() - start_time) / 60, 1)} minutes")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/experiments/ruler/pred/client_wrappers.py b/experiments/ruler/pred/client_wrappers.py
new file mode 100644
index 0000000..361b998
--- /dev/null
+++ b/experiments/ruler/pred/client_wrappers.py
@@ -0,0 +1,361 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+
+import abc
+import json
+import os
+import re
+import sys
+import time
+import traceback
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from pathlib import Path
+from typing import List, Tuple, Union
+
+import requests
+from tenacity import retry, stop_after_attempt, wait_random_exponential
+
+
+class Client(abc.ABC):
+ def __init__(
+ self,
+ server_host,
+ server_port="5000",
+ ssh_server=None,
+ ssh_key_path=None,
+ **generation_kwargs,
+ ):
+ self.server_host = server_host
+ self.server_port = server_port
+ self.ssh_server = os.getenv("SSH_SERVER", ssh_server)
+ self.ssh_key_path = os.getenv("SSH_KEY_PATH", ssh_key_path)
+ self.generation_kwargs = generation_kwargs
+
+ @abc.abstractmethod
+ def _single_call(
+ self,
+ prompts,
+ ):
+ pass
+
+ def __call__(self, prompt: str, **kwargs):
+ request = self.generation_kwargs
+ # prompts are added later
+ request["prompts"] = [f"{prompt}"]
+ if "others" in kwargs:
+ requeset["others"] = kwargs["others"]
+
+ outputs = self._single_call(**request)
+ response = {"text": outputs}
+ return response
+
+ @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(3))
+ def _send_request(self, request, route="generate"):
+ if self.ssh_server and self.ssh_key_path:
+ import sshtunnel_requests
+
+ sshtunnel_request = sshtunnel_requests.from_url(
+ f"ssh://{self.ssh_server}:22", self.ssh_key_path
+ )
+ outputs = sshtunnel_request.put(
+ url="http://{}:{}/{}".format(self.server_host, self.server_port, route),
+ data=json.dumps(request),
+ headers={"Content-Type": "application/json"},
+ ).json()
+ else:
+ outputs = requests.put(
+ url="http://{}:{}/{}".format(self.server_host, self.server_port, route),
+ data=json.dumps(request),
+ headers={"Content-Type": "application/json"},
+ ).json()
+ return outputs
+
+
+class TRTLLMClient(Client):
+ def _single_call(
+ self,
+ prompts,
+ tokens_to_generate,
+ temperature,
+ top_p,
+ top_k,
+ random_seed,
+ stop: List[str],
+ max_attention_window_size=None,
+ ):
+ request = {
+ "prompts": prompts,
+ "tokens_to_generate": tokens_to_generate,
+ "temperature": temperature,
+ "top_k": top_k,
+ "top_p": top_p,
+ "random_seed": random_seed,
+ "stop_words_list": ",".join(stop),
+ }
+ if max_attention_window_size:
+ request["max_attention_window_size"] = max_attention_window_size
+
+ outputs = self._send_request(request)
+ return outputs
+
+
+class VLLMClient(Client):
+ def _single_call(
+ self,
+ prompts,
+ tokens_to_generate,
+ temperature,
+ top_p,
+ top_k,
+ random_seed,
+ stop: List[str],
+ ):
+ request = {
+ "prompt": prompts[0],
+ "max_tokens": tokens_to_generate,
+ "temperature": temperature,
+ "top_k": top_k,
+ "top_p": top_p,
+ "stop": stop,
+ }
+ # TODO: random seed is not supported?
+ outputs = self._send_request(request)
+ outputs = outputs["text"]
+ return outputs
+
+
+class OpenAIClient:
+ def __init__(self, model_name, **generation_kwargs):
+ model2length = {
+ # OpenAI
+ "gpt-4": 8192,
+ "gpt-4-0613": 8192,
+ "gpt-4-1106-preview": 128000,
+ "gpt-4-0125-preview": 128000,
+ "gpt-4-turbo-preview": 128000,
+ "gpt-3.5-turbo-0125": 16385,
+ "gpt-3.5-turbo-1106": 16385,
+ "gpt-3.5-turbo-0613": 4096,
+ "gpt-3.5-turbo": 16385,
+ "gpt-3.5-turbo-16k": 16385,
+ "gpt-3.5-turbo-16k-0613": 16385,
+ # Azure
+ "gpt-4-32k": 32768,
+ "gpt-4": 128000,
+ "gpt-35-turbo-16k": 16384,
+ }
+ self.openai_api_key = os.environ["OPENAI_API_KEY"]
+ self.azure_api_id = os.environ["AZURE_API_ID"]
+ self.azure_api_secret = os.environ["AZURE_API_SECRET"]
+ self.azure_api_endpoint = os.environ["AZURE_API_ENDPOINT"]
+ self.model_name = model_name
+
+ # Azure
+ if self.azure_api_id and self.azure_api_secret:
+ if "gpt-3.5" in model_name:
+ self.model_name = "gpt-35-turbo-16k"
+ if "gpt-4" in model_name:
+ self.model_name = "gpt-4"
+
+ import tiktoken
+
+ self.encoding = tiktoken.get_encoding("cl100k_base")
+ self.max_length = model2length[self.model_name]
+ self.generation_kwargs = generation_kwargs
+ self._create_client()
+
+ def _create_client(
+ self,
+ ):
+ from openai import AzureOpenAI, OpenAI
+
+ # OpenAI
+ if self.openai_api_key:
+ self.client = OpenAI(api_key=self.openai_api_key)
+
+ # Azure
+ elif self.azure_api_id and self.azure_api_secret:
+ self.client = AzureOpenAI(
+ api_key=self.get_azure_api_key(
+ self.azure_api_id,
+ self.azure_api_secret,
+ self.azure_api_endpoint,
+ ),
+ api_version="2024-02-15-preview",
+ azure_endpoint=os.path.join(self.azure_api_endpoint, "llm/v1/azure"),
+ )
+
+ def _count_tokens(self, messages):
+ tokens_per_message = 3
+ tokens_per_name = 1
+ num_tokens = 0
+ for message in messages:
+ num_tokens += tokens_per_message
+ for key, value in message.items():
+ num_tokens += len(self.encoding.encode(value))
+ if key == "name":
+ num_tokens += tokens_per_name
+ num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
+ return num_tokens
+
+ @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(3))
+ def _send_request(self, request):
+ try:
+ response = self.client.chat.completions.create(
+ model=self.model_name,
+ messages=request["msgs"],
+ max_tokens=request["tokens_to_generate"],
+ temperature=request["temperature"],
+ seed=request["random_seed"],
+ top_p=request["top_p"],
+ stop=request["stop"],
+ )
+ except Exception as e:
+ print(f"Error occurred while calling OpenAI: {e}")
+ if self.azure_api_id and self.azure_api_secret and e.status_code == 401:
+ # token expired
+ self._create_client()
+
+ return response
+
+ def __call__(
+ self,
+ prompt: str,
+ ):
+ # system_msg = [{"role": "system", "content": ""}]
+ system_msg = []
+ user_assistant_msgs = [{"role": "user", "content": prompt}]
+ msgs = system_msg + user_assistant_msgs
+ openai_length = self._count_tokens(msgs)
+ request = self.generation_kwargs
+
+ tokens_to_generate_new = self.max_length - openai_length
+ if tokens_to_generate_new < request["tokens_to_generate"]:
+ print(
+ f"Reduce generate tokens from {request['tokens_to_generate']} to {tokens_to_generate_new}"
+ )
+ request["tokens_to_generate"] = tokens_to_generate_new
+
+ request["msgs"] = msgs
+ outputs = self._send_request(request)
+ response = {"text": [outputs.choices[0].message.content]}
+ return response
+
+ def get_azure_api_key(
+ self,
+ p_client_id,
+ p_client_secret,
+ p_token_url,
+ p_scope="azureopenai-readwrite",
+ cache_file="azure_openai_key.json",
+ ):
+ base_path = Path(__file__).parent
+ file_path = Path.joinpath(base_path, cache_file)
+
+ # Check if the token is cached
+ renew = True
+ if os.path.exists(file_path):
+ with open(file_path, "r") as f:
+ token = json.load(f)
+ renew = True if time.time() > token["expires_in"] else False
+
+ if renew:
+ # Get a new token from the OAuth server
+ response = requests.post(
+ os.path.join(p_token_url, "oauth/api/v1/ssa/default/token"),
+ data={
+ "grant_type": "client_credentials",
+ "client_id": p_client_id,
+ "client_secret": p_client_secret,
+ "scope": p_scope,
+ },
+ )
+ response.raise_for_status()
+ token = response.json()
+ token["expires_in"] += time.time()
+ with open(file_path, "w") as f:
+ json.dump(token, f)
+
+ authToken = token["access_token"]
+ return authToken
+
+
+class GeminiClient:
+ def __init__(self, model_name, **generation_kwargs):
+ model2length = {
+ "gemini-1.0-pro-latest": (30720, 2048),
+ "gemini-1.5-pro-latest": (1048576, 8192),
+ }
+
+ self.model_name = model_name
+ self.model = self._initialize_model()
+ self.max_input_length = model2length[model_name][0]
+ self.max_output_length = model2length[model_name][1]
+ assert generation_kwargs["tokens_to_generate"] < self.max_output_length, print(
+ f"tokens_to_generate exceeds {self.max_output_length}"
+ )
+
+ import google.generativeai as genai
+
+ self.config = genai.GenerationConfig(
+ candidate_count=1,
+ stop_sequences=generation_kwargs["stop"],
+ max_output_tokens=generation_kwargs["tokens_to_generate"],
+ temperature=generation_kwargs["temperature"],
+ top_p=generation_kwargs["top_p"],
+ top_k=generation_kwargs["top_k"],
+ )
+
+ from google.generativeai.types import HarmBlockThreshold, HarmCategory
+
+ self.safety_settings = {
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
+ }
+
+ @retry(wait=wait_random_exponential(min=60, max=60), stop=stop_after_attempt(3))
+ def _send_request(self, request):
+ try:
+ response = self.model.generate_content(
+ request["prompt"],
+ generation_config=request["config"],
+ safety_settings=self.safety_settings,
+ )
+ except Exception as e:
+ traceback.print_exc()
+ return None
+ return response
+
+ def __call__(
+ self,
+ prompt: str,
+ ):
+ assert (
+ self.model.count_tokens(prompt).total_tokens < self.max_input_length
+ ), print(f"input length exceeds {self.max_input_length}")
+
+ request = {
+ "prompt": prompt,
+ "config": self.config,
+ }
+
+ outputs = self._send_request(request)
+
+ try:
+ response = {"text": [outputs.candidates[0].content.parts[0].text]}
+ except Exception as e:
+ response = {"text": []}
+ print(outputs)
+ traceback.print_exc()
+
+ return response
+
+ def _initialize_model(self):
+ import google.generativeai as genai
+
+ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
+ return genai.GenerativeModel(self.model_name)
diff --git a/experiments/ruler/pred/model_wrappers.py b/experiments/ruler/pred/model_wrappers.py
new file mode 100644
index 0000000..b4b975f
--- /dev/null
+++ b/experiments/ruler/pred/model_wrappers.py
@@ -0,0 +1,302 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import json
+import logging
+from typing import Dict, List, Optional
+
+import requests
+import torch
+
+
+class HuggingFaceModel:
+ def __init__(self, name_or_path: str, **generation_kwargs) -> None:
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ name_or_path, trust_remote_code=True
+ )
+
+ if "Yarn-Llama" in name_or_path:
+ model_kwargs = None
+ else:
+ model_kwargs = {"attn_implementation": "flash_attention_2"}
+
+ try:
+ self.pipeline = pipeline(
+ "text-generation",
+ model=name_or_path,
+ tokenizer=self.tokenizer,
+ trust_remote_code=True,
+ device_map="auto",
+ torch_dtype=torch.bfloat16,
+ model_kwargs=model_kwargs,
+ )
+ except:
+ self.pipeline = None
+ self.model = AutoModelForCausalLM.from_pretrained(
+ name_or_path,
+ trust_remote_code=True,
+ device_map="auto",
+ torch_dtype=torch.bfloat16,
+ )
+
+ self.generation_kwargs = generation_kwargs
+ self.stop = self.generation_kwargs.pop("stop")
+
+ def __call__(self, prompt: str, **kwargs) -> Dict[str, List[str]]:
+ if self.pipeline is None:
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
+ output = self.model.generate(**inputs, **self.generation_kwargs)
+ generated_text = self.tokenizer.decode(
+ output[0][inputs.input_ids.shape[1] :], skip_special_tokens=True
+ )
+ else:
+ output = self.pipeline(
+ text_inputs=prompt,
+ **self.generation_kwargs,
+ )
+ assert len(output) == 1
+ generated_text = output[0]["generated_text"]
+
+ # remove the input form the generated text
+ if generated_text.startswith(prompt):
+ generated_text = generated_text[len(prompt) :]
+
+ if self.stop is not None:
+ for s in self.stop:
+ generated_text = generated_text.split(s)[0]
+ return {"text": [generated_text]}
+
+
+class MInferenceModel:
+ def __init__(
+ self,
+ name_or_path: str,
+ config_path: str,
+ do_sample: bool = False,
+ repetition_penalty: float = 1.0,
+ temperature: float = 0.0,
+ top_k: int = 32,
+ top_p: float = 0.9,
+ stop: Optional[List[str]] = None,
+ max_new_tokens: int = 100,
+ starting_layer: int = -1,
+ kv_cache_cpu: bool = False,
+ kv_cache_cpu_device: str = None,
+ use_snapkv: bool = False,
+ trust_remote_code: bool = False,
+ ) -> None:
+ from transformers import (
+ AutoConfig,
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ GenerationConfig,
+ )
+
+ from minference import MInference
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ name_or_path,
+ trust_remote_code=trust_remote_code,
+ resume_download=None,
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ name_or_path,
+ torch_dtype="auto",
+ device_map="cuda",
+ resume_download=None,
+ trust_remote_code=trust_remote_code,
+ )
+ minference_patch = MInference(
+ "minference",
+ name_or_path,
+ config_path=config_path,
+ starting_layer=starting_layer,
+ use_snapkv=use_snapkv,
+ kv_cache_cpu=kv_cache_cpu,
+ kv_cache_cpu_device=kv_cache_cpu_device,
+ is_search=False,
+ )
+ self.model = minference_patch(model)
+
+ self.pipeline = None
+ generation_config = GenerationConfig(
+ do_sample=do_sample,
+ repetition_penalty=repetition_penalty,
+ max_new_tokens=max_new_tokens,
+ )
+ if do_sample:
+ generation_config.top_k = top_k
+ generation_config.top_p = top_p
+ generation_config.temperature = temperature
+
+ self.generation_config = generation_config
+
+ self.stop = stop
+
+ def __call__(self, prompt: str, **kwargs) -> Dict[str, List[str]]:
+ torch.cuda.empty_cache()
+ inputs = self.tokenizer(
+ prompt, return_tensors="pt", return_attention_mask=False
+ ).to(self.model.device)
+ output = self.model.generate(**inputs, generation_config=self.generation_config)
+ generated_text = self.tokenizer.decode(
+ output[0][inputs.input_ids.shape[1] :], skip_special_tokens=True
+ )
+
+ # remove the input form the generated text
+ if generated_text.startswith(prompt):
+ generated_text = generated_text[len(prompt) :]
+
+ if self.stop is not None:
+ for s in self.stop:
+ generated_text = generated_text.split(s)[0]
+ return {"text": [generated_text]}
+
+
+class InfLLM(MInferenceModel):
+ def __init__(self, name_or_path: str, **generation_kwargs) -> None:
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
+
+ from minference import MInference
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ name_or_path, trust_remote_code=True
+ )
+ self.model = AutoModelForCausalLM.from_pretrained(
+ name_or_path,
+ torch_dtype="auto",
+ device_map="auto",
+ resume_download=None,
+ trust_remote_code=True,
+ )
+ minference_patch = MInference("inf_llm", name_or_path, None, starting_layer=0)
+ self.model = minference_patch.patch_model(self.model)
+ self.pipeline = None
+ self.generation_kwargs = generation_kwargs
+ self.stop = self.generation_kwargs.pop("stop")
+
+ def __call__(self, prompt: str, **kwargs) -> Dict[str, List[str]]:
+ inputs = self.tokenizer(
+ prompt, return_tensors="pt", return_attention_mask=False
+ ).to(self.model.device)
+ output = self.model.generate(
+ **inputs,
+ max_new_tokens=self.generation_kwargs["max_new_tokens"],
+ )
+ generated_text = self.tokenizer.decode(
+ output[0][inputs.input_ids.shape[1] :], skip_special_tokens=True
+ )
+
+ # remove the input form the generated text
+ if generated_text.startswith(prompt):
+ generated_text = generated_text[len(prompt) :]
+
+ if self.stop is not None:
+ for s in self.stop:
+ generated_text = generated_text.split(s)[0]
+ return {"text": [generated_text]}
+
+
+class Streaming(MInferenceModel):
+ def __init__(
+ self,
+ name_or_path: str,
+ config_path: str,
+ do_sample: bool = False,
+ repetition_penalty: float = 1.0,
+ temperature: float = 0.0,
+ top_k: int = 32,
+ top_p: float = 0.9,
+ stop: Optional[List[str]] = None,
+ max_new_tokens: int = 100,
+ starting_layer: int = -1,
+ kv_cache_cpu: bool = False,
+ kv_cache_cpu_device: str = None,
+ use_snapkv: bool = False,
+ trust_remote_code: bool = False,
+ ) -> None:
+ from transformers import (
+ AutoConfig,
+ AutoModelForCausalLM,
+ AutoTokenizer,
+ GenerationConfig,
+ )
+
+ from minference import MInference
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ name_or_path,
+ trust_remote_code=trust_remote_code,
+ resume_download=None,
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ name_or_path,
+ torch_dtype="auto",
+ device_map="cuda",
+ resume_download=None,
+ trust_remote_code=trust_remote_code,
+ )
+ minference_patch = MInference(
+ "streaming",
+ name_or_path,
+ config_path=config_path,
+ starting_layer=starting_layer,
+ use_snapkv=use_snapkv,
+ kv_cache_cpu=kv_cache_cpu,
+ kv_cache_cpu_device=kv_cache_cpu_device,
+ is_search=False,
+ )
+ self.model = minference_patch(model)
+
+ self.pipeline = None
+ generation_config = GenerationConfig(
+ do_sample=do_sample,
+ repetition_penalty=repetition_penalty,
+ max_new_tokens=max_new_tokens,
+ )
+ if do_sample:
+ generation_config.top_k = top_k
+ generation_config.top_p = top_p
+ generation_config.temperature = temperature
+
+ self.generation_config = generation_config
+
+ self.stop = stop
+
+
+class MambaModel:
+ def __init__(self, name_or_path: str, **generation_kwargs) -> None:
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
+ from transformers import AutoTokenizer
+
+ self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
+ self.device = "cuda"
+ self.model = MambaLMHeadModel.from_pretrained(
+ name_or_path, device=self.device, dtype=torch.bfloat16
+ )
+ self.generation_kwargs = generation_kwargs
+ self.stop = self.generation_kwargs.pop("stop")
+ self.max_genlen = self.generation_kwargs.pop("max_new_tokens")
+ self.minp = 0.0
+
+ def __call__(self, prompt: str, **kwargs) -> Dict[str, List[str]]:
+ # tokenize
+ tokens = self.tokenizer(prompt, return_tensors="pt")
+ input_ids = tokens.input_ids.to(self.device)
+ max_length = input_ids.shape[1] + self.max_genlen
+
+ # generate
+ out = self.model.generate(
+ input_ids=input_ids,
+ max_length=max_length,
+ cg=True,
+ return_dict_in_generate=True,
+ output_scores=True,
+ enable_timing=False,
+ **self.generation_kwargs,
+ )
+ assert len(out.sequences) == 1
+ # detok
+ return {"text": [self.tokenizer.decode(out.sequences[0][input_ids.shape[1] :])]}
diff --git a/experiments/ruler/pred/serve_trt.py b/experiments/ruler/pred/serve_trt.py
new file mode 100644
index 0000000..2d2b796
--- /dev/null
+++ b/experiments/ruler/pred/serve_trt.py
@@ -0,0 +1,265 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+# adapted from https://github.com/Kipok/NeMo-Skills/blob/v0.1/nemo_skills/inference/server/serve_trt.py
+
+import json
+import logging
+import sys
+from argparse import ArgumentParser
+from pathlib import Path
+
+import numpy as np
+import tensorrt_llm
+import torch
+from flask import Flask, jsonify, request
+from flask_restful import Api, Resource
+from mpi4py import MPI
+from tensorrt_llm.runtime import ModelRunnerCpp
+from transformers import AutoTokenizer
+
+
+class TritonServerGenerate(Resource):
+ def __init__(self, model):
+ self.model = model
+ self.comm = MPI.COMM_WORLD
+
+ def generate(
+ self,
+ prompts,
+ max_new_tokens,
+ temperature,
+ top_k,
+ top_p,
+ repetition_penalty,
+ random_seed,
+ stop_words_list,
+ max_attention_window_size=None,
+ ):
+ output = self.model.forward(
+ prompts,
+ max_output_token=max_new_tokens,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ repetition_penalty=repetition_penalty,
+ random_seed=random_seed,
+ stop_words_list=stop_words_list,
+ max_attention_window_size=max_attention_window_size,
+ )
+ return output
+
+ def put(self):
+ logging.info("request IP: " + str(request.remote_addr))
+ logging.info(json.dumps(request.get_json()))
+
+ input_request = request.get_json()
+
+ tokens_to_generate = input_request.get("tokens_to_generate", 64)
+ temperature = input_request.get("temperature", 1.0)
+ top_k = input_request.get("top_k", 0)
+ top_p = input_request.get("top_p", 1.0)
+ repetition_penalty = input_request.get("repetition_penalty", 1.2)
+ stop_words_list = input_request.get("stop_words_list")
+ max_attention_window_size = input_request.get("max_attention_window_size")
+ random_seed = input_request.get("random_seed", 0)
+ prompts = input_request["prompts"]
+
+ data = dict(
+ prompts=prompts,
+ max_new_tokens=tokens_to_generate,
+ temperature=temperature,
+ top_k=top_k,
+ top_p=top_p,
+ repetition_penalty=repetition_penalty,
+ random_seed=random_seed,
+ stop_words_list=stop_words_list,
+ max_attention_window_size=max_attention_window_size,
+ )
+ self.comm.Barrier()
+ data = self.comm.bcast(data, root=0)
+
+ out = self.generate(**data)
+ return jsonify(out)
+
+
+def parse_input(input_texts: str, tokenizer):
+ batch_input_ids = [
+ tokenizer.encode(
+ input_text,
+ add_special_tokens=False, # TODO: does this need to be true?
+ )
+ for input_text in input_texts
+ ]
+ batch_input_ids = [
+ torch.tensor(x, dtype=torch.int32, device="cuda") for x in batch_input_ids
+ ]
+ input_lengths = [x.size(0) for x in batch_input_ids]
+
+ return batch_input_ids, input_lengths
+
+
+def get_output(output_ids, input_lengths, max_output_len, tokenizer, eos_token):
+ num_beams = output_ids.size(1)
+ assert num_beams == 1
+ output_texts = []
+ for idx, input_len in enumerate(input_lengths):
+ output_begin = input_len
+ output_end = input_len + max_output_len
+ outputs = output_ids[idx][0][output_begin:output_end]
+ eos_ids = (outputs == eos_token).nonzero(as_tuple=True)[-1]
+ if len(eos_ids) > 0:
+ outputs = outputs[: eos_ids[0]]
+ outputs = outputs.tolist()
+ output_texts.append(tokenizer.decode(outputs))
+ return output_texts
+
+
+def prepare_stop_words(stop_words_list, tokenizer):
+ # adapted from https://github.com/NVIDIA/TensorRT-LLM/blob/b310ec675145c9ee7668592549f733df4abf1e94/tensorrt_llm/runtime/generation.py#L46
+ flat_ids = []
+ offsets = []
+ for batch_stop_words in stop_words_list:
+ item_flat_ids = []
+ item_offsets = []
+
+ for word in batch_stop_words:
+ # there is a known issue in TensorRT-LLM that word ids are not unique and might change depending on
+ # where in the text it appears. In our case we mainly need to stop on ids as they appear in the middle
+ # of the text. The following is a workaround to get such ids that works for both kind of stop
+ # words as well as newlines that we commonly use. But note that it's not a universal fix, so this might
+ # require refactoring if different stop words are used in the future.
+ # Eventually, this needs to be fixed inside TensorRT-LLM itself.
+ ids = tokenizer.encode("magic" + word)
+ ids = ids[2:] # skipping "magic"
+
+ if len(ids) == 0:
+ continue
+
+ item_flat_ids += ids
+ item_offsets.append(len(ids))
+
+ flat_ids.append(np.array(item_flat_ids))
+ offsets.append(np.cumsum(np.array(item_offsets)))
+
+ pad_to = max(1, max(len(ids) for ids in flat_ids))
+
+ for i, (ids, offs) in enumerate(zip(flat_ids, offsets)):
+ flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
+ offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
+
+ stop_words = np.array([flat_ids, offsets], dtype="int32").transpose((1, 0, 2))
+ return torch.Tensor(stop_words).to(torch.int32).to("cuda").contiguous()
+
+
+def load_tokenizer(tokenizer_dir: str):
+ tokenizer = AutoTokenizer.from_pretrained(
+ tokenizer_dir,
+ legacy=False,
+ trust_remote_code=True,
+ )
+
+ if tokenizer.pad_token_id is None:
+ tokenizer.pad_token_id = tokenizer.eos_token_id
+ pad_id = tokenizer.pad_token_id
+ end_id = tokenizer.eos_token_id
+
+ return tokenizer, pad_id, end_id
+
+
+class TensorRTLLM:
+ def __init__(self, model_path: str):
+ self.tokenizer, self.pad_id, self.end_id = load_tokenizer(
+ tokenizer_dir=model_path
+ )
+ self.runner = ModelRunnerCpp.from_dir(
+ engine_dir=model_path, rank=tensorrt_llm.mpi_rank()
+ )
+
+ @torch.no_grad()
+ def forward(
+ self,
+ input_texts,
+ max_output_token,
+ top_k,
+ top_p,
+ temperature,
+ repetition_penalty,
+ random_seed,
+ stop_words_list,
+ max_attention_window_size,
+ ):
+ batch_input_ids, input_lengths = parse_input(input_texts, self.tokenizer)
+
+ stop_words_list = [stop_words_list for _ in range(len(input_texts))]
+ stop_words_list = prepare_stop_words(stop_words_list, self.tokenizer)
+
+ # TODO: return dictionary with a proper error reporting
+ try:
+ output_ids = self.runner.generate(
+ batch_input_ids,
+ max_new_tokens=max_output_token,
+ end_id=self.end_id,
+ pad_id=self.pad_id,
+ temperature=temperature,
+ top_k=top_k,
+ top_p=top_p,
+ repetition_penalty=repetition_penalty,
+ random_seed=random_seed,
+ stop_words_list=stop_words_list,
+ max_attention_window_size=max_attention_window_size,
+ return_dict=False,
+ )
+ torch.cuda.synchronize()
+
+ output = get_output(
+ output_ids, input_lengths, max_output_token, self.tokenizer, self.end_id
+ )
+ except RuntimeError as e:
+ logging.error("RuntimeError: %s", e)
+ output = [f"RuntimeError: {e}"] * len(input_texts)
+
+ return output
+
+
+class WrapperServer:
+ def __init__(self, model_path: str):
+ self.comm = MPI.COMM_WORLD
+ self.rank = self.comm.Get_rank()
+
+ self.model = TensorRTLLM(model_path=model_path)
+
+ if self.rank == 0:
+ self.app = Flask(__file__, static_url_path="")
+ api = Api(self.app)
+ api.add_resource(
+ TritonServerGenerate, "/generate", resource_class_args=[self.model]
+ )
+
+ def run(self, url, port=5000):
+ if self.rank == 0:
+ self.app.run(url, threaded=True, port=port, debug=False)
+ else:
+ self.worker_loop()
+
+ def worker_loop(self):
+ triton = TritonServerGenerate(self.model)
+ while True:
+ self.comm.Barrier()
+ data = None
+ data = self.comm.bcast(data, root=0)
+ triton.generate(**data)
+
+
+if __name__ == "__main__":
+ # TODO: can we reuse normal logger here?
+ logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
+
+ parser = ArgumentParser()
+ parser.add_argument("--model_path", required=True)
+ parser.add_argument("--host", type=str, default="0.0.0.0")
+ parser.add_argument("--port", type=int, default=5000)
+ args = parser.parse_args()
+
+ server = WrapperServer(model_path=args.model_path)
+ server.run(args.host, args.port)
diff --git a/experiments/ruler/pred/serve_vllm.py b/experiments/ruler/pred/serve_vllm.py
new file mode 100644
index 0000000..4bb0354
--- /dev/null
+++ b/experiments/ruler/pred/serve_vllm.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+# adapted from https://github.com/vllm-project/vllm/blob/v0.4.0/vllm/entrypoints/api_server.py
+
+import argparse
+import json
+from typing import AsyncGenerator
+
+import uvicorn
+from fastapi import FastAPI, Request
+from fastapi.responses import JSONResponse, Response, StreamingResponse
+from vllm.engine.arg_utils import AsyncEngineArgs
+from vllm.engine.async_llm_engine import AsyncLLMEngine
+from vllm.sampling_params import SamplingParams
+from vllm.utils import random_uuid
+
+TIMEOUT_KEEP_ALIVE = 5 # seconds.
+app = FastAPI()
+engine = None
+
+
+@app.get("/health")
+async def health() -> Response:
+ """Health check."""
+ return Response(status_code=200)
+
+
+@app.put("/generate")
+async def generate(request: Request) -> Response:
+ """Generate completion for the request.
+
+ The request should be a JSON object with the following fields:
+ - prompt: the prompt to use for the generation.
+ - stream: whether to stream the results or not.
+ - other fields: the sampling parameters (See `SamplingParams` for details).
+ """
+ request_dict = await request.json()
+ prompt = request_dict.pop("prompt")
+ stream = request_dict.pop("stream", False)
+ sampling_params = SamplingParams(**request_dict)
+ request_id = random_uuid()
+
+ results_generator = engine.generate(prompt, sampling_params, request_id)
+
+ # Streaming case
+ async def stream_results() -> AsyncGenerator[bytes, None]:
+ async for request_output in results_generator:
+ prompt = request_output.prompt
+ text_outputs = [prompt + output.text for output in request_output.outputs]
+ ret = {"text": text_outputs}
+ yield (json.dumps(ret) + "\0").encode("utf-8")
+
+ if stream:
+ return StreamingResponse(stream_results())
+
+ # Non-streaming case
+ final_output = None
+ async for request_output in results_generator:
+ if await request.is_disconnected():
+ # Abort the request if the client disconnects.
+ await engine.abort(request_id)
+ return Response(status_code=499)
+ final_output = request_output
+ assert final_output is not None
+ text_outputs = [output.text for output in final_output.outputs]
+ ret = {"text": text_outputs}
+ return JSONResponse(ret)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="0.0.0.0")
+ parser.add_argument("--port", type=int, default=5000)
+ parser.add_argument("--ssl-keyfile", type=str, default=None)
+ parser.add_argument("--ssl-certfile", type=str, default=None)
+ parser.add_argument(
+ "--root-path",
+ type=str,
+ default=None,
+ help="FastAPI root_path when app is behind a path based routing proxy",
+ )
+ parser = AsyncEngineArgs.add_cli_args(parser)
+ args = parser.parse_args()
+
+ engine_args = AsyncEngineArgs.from_cli_args(args)
+ engine_args.max_model_len = 140000
+ engine_args.disable_log_requests = True
+ engine = AsyncLLMEngine.from_engine_args(engine_args)
+
+ app.root_path = args.root_path
+ uvicorn.run(
+ app,
+ host=args.host,
+ port=args.port,
+ log_level="warning",
+ timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
+ ssl_keyfile=args.ssl_keyfile,
+ ssl_certfile=args.ssl_certfile,
+ )
diff --git a/experiments/ruler/requirements.txt b/experiments/ruler/requirements.txt
new file mode 100644
index 0000000..889ed5f
--- /dev/null
+++ b/experiments/ruler/requirements.txt
@@ -0,0 +1,11 @@
+flask
+flask_restful
+sshtunnel_requests
+tritonclient[all]
+wonderwords
+tenacity
+huggingface_hub
+causal-conv1d>=1.2.0
+mamba-ssm
+html2text
+google-generativeai
diff --git a/experiments/ruler/run.sh b/experiments/ruler/run.sh
new file mode 100644
index 0000000..78f939e
--- /dev/null
+++ b/experiments/ruler/run.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+export TOKENIZERS_PARALLELISM=false
+RULER_PATH=$(dirname $0)
+python -c "import nltk; nltk.download('punkt')"
+
+SEQ_LENGTHS=(
+ 4096
+ # 8192
+ # 16384
+ # 32768
+ # 65536
+ # 131072
+)
+
+TASKS=(
+ "niah_single_1"
+ "niah_single_2"
+ "niah_single_3"
+ "niah_multikey_1"
+ "niah_multikey_2"
+ "niah_multikey_3"
+ "niah_multivalue"
+ "niah_multiquery"
+ "vt"
+ "cwe"
+ "fwe"
+ "qa_1"
+ "qa_2"
+)
+
+# Experiment Setup
+NUM_SAMPLES=25
+TEMPERATURE="0.0"
+TOP_P="1.0"
+TOP_K="32"
+
+# The model
+MODEL_NAME=$1
+BENCHMARK="synthetic"
+MODEL_TEMPLATE_TYPE="base"
+MODEL_FRAMEWORK=$2
+
+# MInference
+STARTING_LAYER=-1
+KV_CACHE_CPU="false"
+USE_SNAPKV="false"
+TRUST_REMOTE_CODE="true"
+
+if [ "${MODEL_FRAMEWORK}" == "minference" ]; then
+ MINFERENCE_PARAMS="--starting_layer ${STARTING_LAYER}"
+
+ if [ -n "${CONFIG_PATH}" ]; then
+ MINFERENCE_PARAMS="${MINFERENCE_PARAMS} --config_path ${CONFIG_PATH}"
+ fi
+
+ if [ "${USE_SNAPKV}" == "true" ]; then
+ MINFERENCE_PARAMS="${MINFERENCE_PARAMS} --use_snapkv"
+ fi
+
+ echo "MInference enabled with params: ${MINFERENCE_PARAMS}"
+fi
+
+if [ "${TRUST_REMOTE_CODE}" == "true" ]; then
+ EXTRA_PARAMS="${EXTRA_PARAMS} --trust_remote_code"
+fi
+
+if [ "${KV_CACHE_CPU}" == "true" ]; then
+ EXTRA_PARAMS="${EXTRA_PARAMS} --kv_cache_cpu --kv_cache_cpu_device cpu"
+fi
+
+# Gpu and output path
+GPUS="1" # GPU size for tensor_parallel.
+ROOT_DIR=$3 # the path that stores generated task samples and model predictions.
+
+for MAX_SEQ_LENGTH in "${SEQ_LENGTHS[@]}"; do
+
+ RESULTS_DIR="${ROOT_DIR}/${MODEL_NAME}_${MODEL_FRAMEWORK}/${BENCHMARK}/${MAX_SEQ_LENGTH}"
+ DATA_DIR="${RESULTS_DIR}/data"
+ PRED_DIR="${RESULTS_DIR}/pred"
+ mkdir -p ${DATA_DIR}
+ mkdir -p ${PRED_DIR}
+
+ for TASK in "${TASKS[@]}"; do
+ python ${RULER_PATH}/data/prepare.py \
+ --save_dir ${DATA_DIR} \
+ --benchmark ${BENCHMARK} \
+ --task ${TASK} \
+ --tokenizer_path ${MODEL_NAME} \
+ --tokenizer_type "hf" \
+ --max_seq_length ${MAX_SEQ_LENGTH} \
+ --model_template_type ${MODEL_TEMPLATE_TYPE} \
+ --num_samples ${NUM_SAMPLES} \
+ ${REMOVE_NEWLINE_TAB}
+
+ python ${RULER_PATH}/pred/call_api.py \
+ --data_dir ${DATA_DIR} \
+ --save_dir ${PRED_DIR} \
+ --benchmark ${BENCHMARK} \
+ --task ${TASK} \
+ --server_type ${MODEL_FRAMEWORK} \
+ --model_name_or_path ${MODEL_NAME} \
+ --temperature ${TEMPERATURE} \
+ --top_k ${TOP_K} \
+ --top_p ${TOP_P} \
+ ${MINFERENCE_PARAMS} \
+ ${EXTRA_PARAMS} \
+ ${STOP_WORDS}
+ done
+
+ python ${RULER_PATH}/eval/evaluate.py \
+ --data_dir ${PRED_DIR} \
+ --benchmark ${BENCHMARK}
+done
diff --git a/experiments/ruler/synthetic.yaml b/experiments/ruler/synthetic.yaml
new file mode 100644
index 0000000..6aa0562
--- /dev/null
+++ b/experiments/ruler/synthetic.yaml
@@ -0,0 +1,110 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+niah_single_1:
+ task: niah
+ args:
+ type_haystack: repeat
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_single_2:
+ task: niah
+ args:
+ type_haystack: essay
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_single_3:
+ task: niah
+ args:
+ type_haystack: essay
+ type_needle_k: words
+ type_needle_v: uuids
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_multikey_1:
+ task: niah
+ args:
+ type_haystack: essay
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 4
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_multikey_2:
+ task: niah
+ args:
+ type_haystack: needle
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_multikey_3:
+ task: niah
+ args:
+ type_haystack: needle
+ type_needle_k: uuids
+ type_needle_v: uuids
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 1
+
+niah_multivalue:
+ task: niah
+ args:
+ type_haystack: essay
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 1
+ num_needle_v: 4
+ num_needle_q: 1
+
+niah_multiquery:
+ task: niah
+ args:
+ type_haystack: essay
+ type_needle_k: words
+ type_needle_v: numbers
+ num_needle_k: 1
+ num_needle_v: 1
+ num_needle_q: 4
+
+vt:
+ task: variable_tracking
+ args:
+ num_chains: 1
+ num_hops: 4
+
+cwe:
+ task: common_words_extraction
+ args:
+ freq_cw: 30
+ freq_ucw: 3
+ num_cw: 10
+
+fwe:
+ task: freq_words_extraction
+ args:
+ alpha: 2.0
+
+qa_1:
+ task: qa
+ args:
+ dataset: squad
+
+qa_2:
+ task: qa
+ args:
+ dataset: hotpotqa
diff --git a/images/MInference1_onepage.png b/images/MInference1_onepage.png
new file mode 100644
index 0000000..8f58ffc
Binary files /dev/null and b/images/MInference1_onepage.png differ
diff --git a/images/MInference_logo.png b/images/MInference_logo.png
new file mode 100644
index 0000000..857dcef
Binary files /dev/null and b/images/MInference_logo.png differ
diff --git a/images/benchmarks/needle_viz_LLaMA-3-8B-1M_ours_1K_1000K.png b/images/benchmarks/needle_viz_LLaMA-3-8B-1M_ours_1K_1000K.png
new file mode 100644
index 0000000..f15ad2b
Binary files /dev/null and b/images/benchmarks/needle_viz_LLaMA-3-8B-1M_ours_1K_1000K.png differ
diff --git a/images/benchmarks/ppl-LLaMA-3-262k.png b/images/benchmarks/ppl-LLaMA-3-262k.png
new file mode 100644
index 0000000..c9491d8
Binary files /dev/null and b/images/benchmarks/ppl-LLaMA-3-262k.png differ
diff --git a/images/t5_sparse_pattern.png b/images/t5_sparse_pattern.png
new file mode 100644
index 0000000..33543ab
Binary files /dev/null and b/images/t5_sparse_pattern.png differ
diff --git a/minference/__init__.py b/minference/__init__.py
new file mode 100644
index 0000000..54089cd
--- /dev/null
+++ b/minference/__init__.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+from .configs.model2path import get_support_models
+
+# flake8: noqa
+from .minference_configuration import MInferenceConfig
+from .models_patch import MInference
+from .ops.block_sparse_flash_attention import block_sparse_attention
+from .ops.pit_sparse_flash_attention_v2 import vertical_slash_sparse_attention
+from .ops.streaming_kernel import streaming_forward
+from .patch import (
+ minference_patch,
+ minference_patch_kv_cache_cpu,
+ minference_patch_with_snapkv,
+ patch_hf,
+)
+from .version import VERSION as __version__
+
+__all__ = [
+ "MInference",
+ "MInferenceConfig",
+ "minference_patch",
+ "minference_patch_kv_cache_cpu",
+ "minference_patch_with_snapkv",
+ "patch_hf",
+ "vertical_slash_sparse_attention",
+ "block_sparse_attention",
+ "streaming_forward",
+ "get_support_models",
+]
diff --git a/minference/configs/GLM_4_9B_1M_instruct_kv_out_v32_fit_o_best_pattern.json b/minference/configs/GLM_4_9B_1M_instruct_kv_out_v32_fit_o_best_pattern.json
new file mode 100644
index 0000000..9f0135f
--- /dev/null
+++ b/minference/configs/GLM_4_9B_1M_instruct_kv_out_v32_fit_o_best_pattern.json
@@ -0,0 +1 @@
+[{"0": ["vertical_and_slash", 1000, 6096, 0.607320249080658], "1": ["vertical_and_slash", 30, 800, 0.8188685178756714], "2": ["vertical_and_slash", 3500, 100, 0.5349845290184021], "3": ["vertical_and_slash", 1000, 6096, 0.4409796893596649], "4": ["vertical_and_slash", 1000, 6096, 0.6407181024551392], "5": ["vertical_and_slash", 1000, 6096, 0.7205579876899719], "6": ["vertical_and_slash", 1000, 6096, 0.8290738463401794], "7": ["vertical_and_slash", 1000, 6096, 0.5250093936920166], "8": ["vertical_and_slash", 1000, 6096, 0.39919227361679077], "9": ["vertical_and_slash", 1000, 6096, 0.8610774278640747], "10": ["vertical_and_slash", 1000, 6096, 0.557726263999939], "11": ["vertical_and_slash", 1000, 6096, 0.41778454184532166], "12": ["vertical_and_slash", 1000, 6096, 0.5489121079444885], "13": ["vertical_and_slash", 1000, 6096, 0.4011226296424866], "14": ["vertical_and_slash", 1000, 6096, 0.42057377099990845], "15": ["vertical_and_slash", 1000, 6096, 0.4420812726020813], "16": ["vertical_and_slash", 3500, 100, 0.539909303188324], "17": ["vertical_and_slash", 1000, 6096, 0.473086953163147], "18": ["vertical_and_slash", 3500, 100, 0.6077426075935364], "19": ["vertical_and_slash", 1000, 6096, 0.49552032351493835], "20": ["vertical_and_slash", 1000, 6096, 0.8738178014755249], "21": ["vertical_and_slash", 1000, 6096, 0.7316954135894775], "22": ["vertical_and_slash", 3500, 100, 0.5517809391021729], "23": ["vertical_and_slash", 1000, 6096, 0.5873302221298218], "24": ["vertical_and_slash", 1000, 6096, 0.5817166566848755], "25": ["vertical_and_slash", 3500, 100, 0.5116572380065918], "26": ["vertical_and_slash", 3500, 100, 0.586027979850769], "27": ["vertical_and_slash", 1000, 6096, 0.39690160751342773], "28": ["vertical_and_slash", 1000, 6096, 0.688616156578064], "29": ["vertical_and_slash", 30, 800, 0.9594184756278992], "30": ["vertical_and_slash", 1000, 6096, 0.44193533062934875], "31": ["vertical_and_slash", 1000, 6096, 0.598908543586731]}, {"0": ["vertical_and_slash", 30, 800, 0.984818696975708], "1": ["vertical_and_slash", 1000, 6096, 0.9897077679634094], "2": ["vertical_and_slash", 1000, 6096, 0.9849515557289124], "3": ["vertical_and_slash", 1000, 6096, 0.8847459554672241], "4": ["vertical_and_slash", 1000, 6096, 0.982058048248291], "5": ["vertical_and_slash", 1000, 6096, 0.9073014855384827], "6": ["vertical_and_slash", 1000, 6096, 0.9022318720817566], "7": ["vertical_and_slash", 1000, 6096, 0.7311952114105225], "8": ["vertical_and_slash", 1000, 6096, 0.9608654975891113], "9": ["vertical_and_slash", 1000, 6096, 0.9718127250671387], "10": ["vertical_and_slash", 1000, 6096, 0.9784811735153198], "11": ["vertical_and_slash", 1000, 6096, 0.9465556144714355], "12": ["vertical_and_slash", 1000, 6096, 0.9491732120513916], "13": ["vertical_and_slash", 1000, 6096, 0.6718633770942688], "14": ["vertical_and_slash", 3500, 100, 0.9624817967414856], "15": ["vertical_and_slash", 1000, 6096, 0.9829132556915283], "16": ["vertical_and_slash", 3500, 100, 0.8535671234130859], "17": ["vertical_and_slash", 3500, 100, 0.9092485308647156], "18": ["vertical_and_slash", 1000, 6096, 0.9137914180755615], "19": ["vertical_and_slash", 1000, 6096, 0.9581669569015503], "20": ["vertical_and_slash", 3500, 100, 0.9440631866455078], "21": ["vertical_and_slash", 1000, 6096, 0.960383415222168], "22": ["vertical_and_slash", 1000, 6096, 0.9197445511817932], "23": ["vertical_and_slash", 1000, 6096, 0.932186484336853], "24": ["vertical_and_slash", 1000, 6096, 0.9688400030136108], "25": ["vertical_and_slash", 3500, 100, 0.8753203749656677], "26": ["vertical_and_slash", 1000, 6096, 0.778458833694458], "27": ["vertical_and_slash", 1000, 6096, 0.9089145660400391], "28": ["vertical_and_slash", 1000, 6096, 0.9541447162628174], "29": ["vertical_and_slash", 1000, 6096, 0.849644660949707], "30": ["vertical_and_slash", 1000, 6096, 0.6009925603866577], "31": ["vertical_and_slash", 3500, 100, 0.8730477690696716]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9927235245704651], "1": ["vertical_and_slash", 3500, 100, 0.970400333404541], "2": ["vertical_and_slash", 1000, 6096, 0.9524849057197571], "3": ["vertical_and_slash", 1000, 6096, 0.8191299438476562], "4": ["vertical_and_slash", 1000, 6096, 0.99163419008255], "5": ["vertical_and_slash", 1000, 6096, 0.9896164536476135], "6": ["vertical_and_slash", 1000, 6096, 0.9094650149345398], "7": ["vertical_and_slash", 30, 800, 0.988660991191864], "8": ["vertical_and_slash", 1000, 6096, 0.9893123507499695], "9": ["vertical_and_slash", 1000, 6096, 0.9888522624969482], "10": ["vertical_and_slash", 1000, 6096, 0.9870534539222717], "11": ["vertical_and_slash", 1000, 6096, 0.988201379776001], "12": ["vertical_and_slash", 1000, 6096, 0.9722323417663574], "13": ["vertical_and_slash", 1000, 6096, 0.9796289801597595], "14": ["vertical_and_slash", 1000, 6096, 0.9456352591514587], "15": ["vertical_and_slash", 1000, 6096, 0.9492893218994141], "16": ["vertical_and_slash", 1000, 6096, 0.939911425113678], "17": ["vertical_and_slash", 1000, 6096, 0.9800530076026917], "18": ["vertical_and_slash", 1000, 6096, 0.9930890798568726], "19": ["vertical_and_slash", 1000, 6096, 0.994559109210968], "20": ["vertical_and_slash", 1000, 6096, 0.9873578548431396], "21": ["vertical_and_slash", 3500, 100, 0.9922747015953064], "22": ["vertical_and_slash", 1000, 6096, 0.868395984172821], "23": ["vertical_and_slash", 1000, 6096, 0.7782280445098877], "24": ["vertical_and_slash", 1000, 6096, 0.9941012263298035], "25": ["vertical_and_slash", 1000, 6096, 0.9804511070251465], "26": ["vertical_and_slash", 1000, 6096, 0.9945708513259888], "27": ["vertical_and_slash", 1000, 6096, 0.9056034088134766], "28": ["vertical_and_slash", 1000, 6096, 0.9548148512840271], "29": ["vertical_and_slash", 1000, 6096, 0.9423152804374695], "30": ["vertical_and_slash", 1000, 6096, 0.8910396695137024], "31": ["vertical_and_slash", 1000, 6096, 0.9749031066894531]}, {"0": ["vertical_and_slash", 1000, 6096, 0.980637788772583], "1": ["vertical_and_slash", 1000, 6096, 0.9693703055381775], "2": ["vertical_and_slash", 1000, 6096, 0.975855827331543], "3": ["vertical_and_slash", 1000, 6096, 0.968798816204071], "4": ["vertical_and_slash", 1000, 6096, 0.981493353843689], "5": ["vertical_and_slash", 1000, 6096, 0.7912566065788269], "6": ["vertical_and_slash", 30, 800, 0.984382688999176], "7": ["vertical_and_slash", 1000, 6096, 0.9784753918647766], "8": ["vertical_and_slash", 1000, 6096, 0.9720668196678162], "9": ["vertical_and_slash", 1000, 6096, 0.9726041555404663], "10": ["vertical_and_slash", 30, 800, 0.9742363691329956], "11": ["vertical_and_slash", 1000, 6096, 0.9672162532806396], "12": ["vertical_and_slash", 1000, 6096, 0.9729542136192322], "13": ["vertical_and_slash", 1000, 6096, 0.9446829557418823], "14": ["vertical_and_slash", 30, 800, 0.9846457839012146], "15": ["vertical_and_slash", 1000, 6096, 0.8565210103988647], "16": ["vertical_and_slash", 1000, 6096, 0.9071152806282043], "17": ["vertical_and_slash", 1000, 6096, 0.842366099357605], "18": ["vertical_and_slash", 1000, 6096, 0.9122264981269836], "19": ["vertical_and_slash", 1000, 6096, 0.9932763576507568], "20": ["vertical_and_slash", 1000, 6096, 0.9251010417938232], "21": ["vertical_and_slash", 1000, 6096, 0.9563732147216797], "22": ["vertical_and_slash", 1000, 6096, 0.9262596964836121], "23": ["vertical_and_slash", 1000, 6096, 0.8246892690658569], "24": ["vertical_and_slash", 1000, 6096, 0.9308041334152222], "25": ["vertical_and_slash", 1000, 6096, 0.9385374784469604], "26": ["vertical_and_slash", 1000, 6096, 0.9059684872627258], "27": ["vertical_and_slash", 1000, 6096, 0.991765558719635], "28": ["vertical_and_slash", 1000, 6096, 0.9371857047080994], "29": ["vertical_and_slash", 1000, 6096, 0.9697853326797485], "30": ["vertical_and_slash", 1000, 6096, 0.8799659609794617], "31": ["vertical_and_slash", 1000, 6096, 0.9886091351509094]}, {"0": ["vertical_and_slash", 30, 800, 0.9664506316184998], "1": ["vertical_and_slash", 30, 800, 0.9881126284599304], "2": ["vertical_and_slash", 30, 800, 0.9925991892814636], "3": ["vertical_and_slash", 30, 800, 0.9699523448944092], "4": ["vertical_and_slash", 30, 800, 0.9941449761390686], "5": ["vertical_and_slash", 1000, 6096, 0.7463071942329407], "6": ["vertical_and_slash", 1000, 6096, 0.8109810948371887], "7": ["vertical_and_slash", 30, 800, 0.9786219000816345], "8": ["vertical_and_slash", 30, 800, 0.9961836934089661], "9": ["vertical_and_slash", 30, 800, 0.9840148091316223], "10": ["vertical_and_slash", 30, 800, 0.9944106936454773], "11": ["vertical_and_slash", 30, 800, 0.9784397482872009], "12": ["vertical_and_slash", 30, 800, 0.9790462255477905], "13": ["vertical_and_slash", 30, 800, 0.9580661058425903], "14": ["vertical_and_slash", 30, 800, 0.9926502704620361], "15": ["vertical_and_slash", 30, 800, 0.9953798055648804], "16": ["vertical_and_slash", 30, 800, 0.9911163449287415], "17": ["vertical_and_slash", 30, 800, 0.9968860149383545], "18": ["vertical_and_slash", 30, 800, 0.9946805238723755], "19": ["vertical_and_slash", 30, 800, 0.9786041378974915], "20": ["vertical_and_slash", 30, 800, 0.9883182644844055], "21": ["vertical_and_slash", 30, 800, 0.9965389966964722], "22": ["vertical_and_slash", 30, 800, 0.9950538277626038], "23": ["vertical_and_slash", 30, 800, 0.9956833124160767], "24": ["vertical_and_slash", 1000, 6096, 0.6033216714859009], "25": ["vertical_and_slash", 30, 800, 0.9926028847694397], "26": ["vertical_and_slash", 1000, 6096, 0.9789131879806519], "27": ["vertical_and_slash", 1000, 6096, 0.6449341177940369], "28": ["vertical_and_slash", 30, 800, 0.9902080297470093], "29": ["vertical_and_slash", 30, 800, 0.9909520745277405], "30": ["vertical_and_slash", 30, 800, 0.9747541546821594], "31": ["vertical_and_slash", 1000, 6096, 0.9476008415222168]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9904411435127258], "1": ["vertical_and_slash", 30, 800, 0.9910487532615662], "2": ["vertical_and_slash", 1000, 6096, 0.9756672382354736], "3": ["vertical_and_slash", 1000, 6096, 0.932816743850708], "4": ["vertical_and_slash", 1000, 6096, 0.9568681120872498], "5": ["vertical_and_slash", 1000, 6096, 0.9725597500801086], "6": ["vertical_and_slash", 1000, 6096, 0.9484926462173462], "7": ["vertical_and_slash", 1000, 6096, 0.9971364140510559], "8": ["vertical_and_slash", 1000, 6096, 0.998288631439209], "9": ["vertical_and_slash", 1000, 6096, 0.9971045255661011], "10": ["vertical_and_slash", 1000, 6096, 0.9566321969032288], "11": ["vertical_and_slash", 1000, 6096, 0.9951393008232117], "12": ["vertical_and_slash", 1000, 6096, 0.7474702000617981], "13": ["vertical_and_slash", 1000, 6096, 0.9912301301956177], "14": ["vertical_and_slash", 100, 800, 1.0], "15": ["vertical_and_slash", 1000, 6096, 0.8816485404968262], "16": ["vertical_and_slash", 30, 800, 0.9929412603378296], "17": ["vertical_and_slash", 1000, 6096, 0.7502387762069702], "18": ["vertical_and_slash", 30, 800, 0.9968609809875488], "19": ["vertical_and_slash", 1000, 6096, 0.9264512062072754], "20": ["vertical_and_slash", 1000, 6096, 0.7249436974525452], "21": ["vertical_and_slash", 30, 800, 0.9985251426696777], "22": ["vertical_and_slash", 1000, 6096, 0.4554010033607483], "23": ["vertical_and_slash", 100, 800, 0.98046875], "24": ["vertical_and_slash", 30, 800, 0.9897025227546692], "25": ["vertical_and_slash", 30, 800, 0.9924492835998535], "26": ["vertical_and_slash", 1000, 6096, 0.8219242095947266], "27": ["vertical_and_slash", 30, 800, 0.9866492748260498], "28": ["vertical_and_slash", 30, 800, 0.8629838228225708], "29": ["vertical_and_slash", 1000, 6096, 0.8555644154548645], "30": ["vertical_and_slash", 30, 800, 0.9946920275688171], "31": ["vertical_and_slash", 1000, 6096, 0.7602364420890808]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9341182112693787], "1": ["vertical_and_slash", 30, 800, 0.9930327534675598], "2": ["vertical_and_slash", 30, 800, 0.9864211082458496], "3": ["vertical_and_slash", 30, 800, 0.9971551299095154], "4": ["vertical_and_slash", 1000, 6096, 0.8854694366455078], "5": ["vertical_and_slash", 30, 800, 0.9771373867988586], "6": ["vertical_and_slash", 1000, 6096, 0.9856491684913635], "7": ["vertical_and_slash", 30, 800, 0.9830764532089233], "8": ["vertical_and_slash", 30, 800, 0.9875907897949219], "9": ["vertical_and_slash", 1000, 6096, 0.9652862548828125], "10": ["vertical_and_slash", 1000, 6096, 0.9916911125183105], "11": ["vertical_and_slash", 1000, 6096, 0.9601388573646545], "12": ["vertical_and_slash", 1000, 6096, 0.9818140268325806], "13": ["vertical_and_slash", 30, 800, 0.9573540687561035], "14": ["vertical_and_slash", 30, 800, 0.9918647408485413], "15": ["vertical_and_slash", 1000, 6096, 0.9186978936195374], "16": ["vertical_and_slash", 1000, 6096, 0.9034823775291443], "17": ["vertical_and_slash", 1000, 6096, 0.9693074226379395], "18": ["vertical_and_slash", 1000, 6096, 0.9873420596122742], "19": ["vertical_and_slash", 1000, 6096, 0.9989088177680969], "20": ["vertical_and_slash", 1000, 6096, 0.9074654579162598], "21": ["vertical_and_slash", 1000, 6096, 0.7031925916671753], "22": ["vertical_and_slash", 1000, 6096, 0.951509416103363], "23": ["vertical_and_slash", 1000, 6096, 0.8980137705802917], "24": ["vertical_and_slash", 1000, 6096, 0.9660430550575256], "25": ["vertical_and_slash", 1000, 6096, 0.6851689219474792], "26": ["vertical_and_slash", 1000, 6096, 0.7880844473838806], "27": ["vertical_and_slash", 1000, 6096, 0.9759742617607117], "28": ["vertical_and_slash", 1000, 6096, 0.6127987504005432], "29": ["vertical_and_slash", 1000, 6096, 0.8653184771537781], "30": ["vertical_and_slash", 30, 800, 0.9919092655181885], "31": ["vertical_and_slash", 1000, 6096, 0.9924417734146118]}, {"0": ["vertical_and_slash", 1000, 6096, 0.949621319770813], "1": ["vertical_and_slash", 1000, 6096, 0.9911826848983765], "2": ["vertical_and_slash", 1000, 6096, 0.9973833560943604], "3": ["vertical_and_slash", 1000, 6096, 0.8331511616706848], "4": ["vertical_and_slash", 1000, 6096, 0.8171661496162415], "5": ["vertical_and_slash", 1000, 6096, 0.9722827076911926], "6": ["vertical_and_slash", 1000, 6096, 0.9896110892295837], "7": ["vertical_and_slash", 1000, 6096, 0.9418066740036011], "8": ["vertical_and_slash", 1000, 6096, 0.7648960947990417], "9": ["vertical_and_slash", 1000, 6096, 0.9909672141075134], "10": ["vertical_and_slash", 3500, 100, 0.8392957448959351], "11": ["vertical_and_slash", 1000, 6096, 0.755734920501709], "12": ["vertical_and_slash", 1000, 6096, 0.9959866404533386], "13": ["vertical_and_slash", 1000, 6096, 0.8561900854110718], "14": ["vertical_and_slash", 1000, 6096, 0.7799063324928284], "15": ["vertical_and_slash", 1000, 6096, 0.8104366064071655], "16": ["vertical_and_slash", 1000, 6096, 0.9958652257919312], "17": ["vertical_and_slash", 1000, 6096, 0.9523825645446777], "18": ["vertical_and_slash", 30, 800, 0.9975892901420593], "19": ["vertical_and_slash", 1000, 6096, 0.9033409953117371], "20": ["vertical_and_slash", 100, 800, 0.8671875], "21": ["vertical_and_slash", 100, 800, 0.96484375], "22": ["vertical_and_slash", 1000, 6096, 0.9921835660934448], "23": ["vertical_and_slash", 1000, 6096, 0.9847569465637207], "24": ["vertical_and_slash", 1000, 6096, 0.9824939966201782], "25": ["vertical_and_slash", 1000, 6096, 0.9986788630485535], "26": ["vertical_and_slash", 1000, 6096, 0.8709461688995361], "27": ["vertical_and_slash", 1000, 6096, 0.9971341490745544], "28": ["vertical_and_slash", 1000, 6096, 0.9992262125015259], "29": ["vertical_and_slash", 1000, 6096, 0.6482179760932922], "30": ["vertical_and_slash", 1000, 6096, 0.9929115176200867], "31": ["vertical_and_slash", 1000, 6096, 0.9922140836715698]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9948307871818542], "1": ["vertical_and_slash", 1000, 6096, 0.9119188189506531], "2": ["vertical_and_slash", 1000, 6096, 0.9955572485923767], "3": ["vertical_and_slash", 1000, 6096, 0.9988696575164795], "4": ["vertical_and_slash", 1000, 6096, 0.930955708026886], "5": ["vertical_and_slash", 1000, 6096, 0.9703837037086487], "6": ["vertical_and_slash", 30, 800, 0.998771071434021], "7": ["vertical_and_slash", 1000, 6096, 0.9730043411254883], "8": ["vertical_and_slash", 1000, 6096, 0.9916525483131409], "9": ["vertical_and_slash", 1000, 6096, 0.9904662370681763], "10": ["vertical_and_slash", 1000, 6096, 0.9029244780540466], "11": ["vertical_and_slash", 1000, 6096, 0.6398787498474121], "12": ["vertical_and_slash", 1000, 6096, 0.9975113272666931], "13": ["vertical_and_slash", 1000, 6096, 0.9041478633880615], "14": ["vertical_and_slash", 1000, 6096, 0.9986220598220825], "15": ["vertical_and_slash", 1000, 6096, 0.97948157787323], "16": ["vertical_and_slash", 1000, 6096, 0.991273045539856], "17": ["vertical_and_slash", 1000, 6096, 0.9922738075256348], "18": ["vertical_and_slash", 1000, 6096, 0.9973925352096558], "19": ["vertical_and_slash", 1000, 6096, 0.9190897941589355], "20": ["vertical_and_slash", 1000, 6096, 0.9636063575744629], "21": ["vertical_and_slash", 1000, 6096, 0.9976775646209717], "22": ["vertical_and_slash", 1000, 6096, 0.8911557197570801], "23": ["vertical_and_slash", 1000, 6096, 0.9931186437606812], "24": ["vertical_and_slash", 1000, 6096, 0.9836262464523315], "25": ["vertical_and_slash", 1000, 6096, 0.9732087850570679], "26": ["vertical_and_slash", 1000, 6096, 0.8872925639152527], "27": ["vertical_and_slash", 1000, 6096, 0.9577845931053162], "28": ["vertical_and_slash", 1000, 6096, 0.8456900119781494], "29": ["vertical_and_slash", 1000, 6096, 0.6334351897239685], "30": ["vertical_and_slash", 1000, 6096, 0.9903744459152222], "31": ["vertical_and_slash", 1000, 6096, 0.9899418354034424]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9642171859741211], "1": ["vertical_and_slash", 1000, 6096, 0.47584909200668335], "2": ["vertical_and_slash", 1000, 6096, 0.9893169403076172], "3": ["vertical_and_slash", 1000, 6096, 0.6212961077690125], "4": ["vertical_and_slash", 1000, 6096, 0.4895332455635071], "5": ["vertical_and_slash", 1000, 6096, 0.5647990703582764], "6": ["vertical_and_slash", 1000, 6096, 0.7113249897956848], "7": ["vertical_and_slash", 1000, 6096, 0.920142650604248], "8": ["vertical_and_slash", 1000, 6096, 0.9878794550895691], "9": ["vertical_and_slash", 1000, 6096, 0.9920369386672974], "10": ["vertical_and_slash", 1000, 6096, 0.9919956922531128], "11": ["vertical_and_slash", 1000, 6096, 0.9468153119087219], "12": ["vertical_and_slash", 1000, 6096, 0.9600852131843567], "13": ["vertical_and_slash", 1000, 6096, 0.9971191883087158], "14": ["vertical_and_slash", 1000, 6096, 0.9691506028175354], "15": ["vertical_and_slash", 1000, 6096, 0.9900606274604797], "16": ["vertical_and_slash", 1000, 6096, 0.7718557119369507], "17": ["vertical_and_slash", 1000, 6096, 0.9399040937423706], "18": ["vertical_and_slash", 1000, 6096, 0.9840185046195984], "19": ["vertical_and_slash", 1000, 6096, 0.8616393208503723], "20": ["vertical_and_slash", 1000, 6096, 0.9769756197929382], "21": ["vertical_and_slash", 1000, 6096, 0.8033336997032166], "22": ["vertical_and_slash", 1000, 6096, 0.9497328996658325], "23": ["vertical_and_slash", 1000, 6096, 0.9382109642028809], "24": ["vertical_and_slash", 1000, 6096, 0.9936342835426331], "25": ["vertical_and_slash", 1000, 6096, 0.9964848160743713], "26": ["vertical_and_slash", 1000, 6096, 0.9940112829208374], "27": ["vertical_and_slash", 1000, 6096, 0.9805063009262085], "28": ["vertical_and_slash", 1000, 6096, 0.6296311616897583], "29": ["vertical_and_slash", 1000, 6096, 0.8419080972671509], "30": ["vertical_and_slash", 1000, 6096, 0.992581844329834], "31": ["vertical_and_slash", 1000, 6096, 0.9726547002792358]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9562081694602966], "1": ["vertical_and_slash", 1000, 6096, 0.920412540435791], "2": ["vertical_and_slash", 1000, 6096, 0.9765562415122986], "3": ["vertical_and_slash", 1000, 6096, 0.846834123134613], "4": ["vertical_and_slash", 1000, 6096, 0.971135139465332], "5": ["vertical_and_slash", 1000, 6096, 0.9338746666908264], "6": ["vertical_and_slash", 1000, 6096, 0.7242234945297241], "7": ["vertical_and_slash", 1000, 6096, 0.9260809421539307], "8": ["vertical_and_slash", 1000, 6096, 0.876142680644989], "9": ["vertical_and_slash", 1000, 6096, 0.8419959545135498], "10": ["vertical_and_slash", 1000, 6096, 0.7114543318748474], "11": ["vertical_and_slash", 1000, 6096, 0.8231581449508667], "12": ["vertical_and_slash", 1000, 6096, 0.8134669661521912], "13": ["vertical_and_slash", 1000, 6096, 0.9823401570320129], "14": ["vertical_and_slash", 1000, 6096, 0.8788273930549622], "15": ["vertical_and_slash", 1000, 6096, 0.7951365113258362], "16": ["vertical_and_slash", 1000, 6096, 0.9595907330513], "17": ["vertical_and_slash", 1000, 6096, 0.9799779057502747], "18": ["vertical_and_slash", 1000, 6096, 0.9359989166259766], "19": ["vertical_and_slash", 1000, 6096, 0.9746171236038208], "20": ["vertical_and_slash", 1000, 6096, 0.9863234758377075], "21": ["vertical_and_slash", 1000, 6096, 0.9424668550491333], "22": ["vertical_and_slash", 1000, 6096, 0.8575285077095032], "23": ["vertical_and_slash", 1000, 6096, 0.752450168132782], "24": ["vertical_and_slash", 1000, 6096, 0.9958561658859253], "25": ["vertical_and_slash", 3500, 100, 0.8371226191520691], "26": ["vertical_and_slash", 1000, 6096, 0.9757732152938843], "27": ["vertical_and_slash", 1000, 6096, 0.8623197078704834], "28": ["vertical_and_slash", 1000, 6096, 0.9288658499717712], "29": ["vertical_and_slash", 1000, 6096, 0.9244252443313599], "30": ["vertical_and_slash", 1000, 6096, 0.9852326512336731], "31": ["vertical_and_slash", 1000, 6096, 0.8441554307937622]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8664576411247253], "1": ["vertical_and_slash", 1000, 6096, 0.885201632976532], "2": ["vertical_and_slash", 1000, 6096, 0.9882799386978149], "3": ["vertical_and_slash", 1000, 6096, 0.4493744373321533], "4": ["vertical_and_slash", 1000, 6096, 0.9310450553894043], "5": ["vertical_and_slash", 1000, 6096, 0.3955627977848053], "6": ["vertical_and_slash", 1000, 6096, 0.9268753528594971], "7": ["vertical_and_slash", 1000, 6096, 0.7176633477210999], "8": ["vertical_and_slash", 1000, 6096, 0.9735022187232971], "9": ["vertical_and_slash", 1000, 6096, 0.8834140300750732], "10": ["vertical_and_slash", 1000, 6096, 0.8966405987739563], "11": ["vertical_and_slash", 1000, 6096, 0.7090746164321899], "12": ["vertical_and_slash", 1000, 6096, 0.8831575512886047], "13": ["vertical_and_slash", 1000, 6096, 0.9805029034614563], "14": ["vertical_and_slash", 1000, 6096, 0.8853322267532349], "15": ["vertical_and_slash", 3500, 100, 0.9863671660423279], "16": ["vertical_and_slash", 1000, 6096, 0.9831417798995972], "17": ["vertical_and_slash", 1000, 6096, 0.9809348583221436], "18": ["vertical_and_slash", 1000, 6096, 0.9465487599372864], "19": ["vertical_and_slash", 1000, 6096, 0.941412627696991], "20": ["vertical_and_slash", 1000, 6096, 0.9227497577667236], "21": ["vertical_and_slash", 1000, 6096, 0.9292575120925903], "22": ["vertical_and_slash", 1000, 6096, 0.9284678101539612], "23": ["vertical_and_slash", 1000, 6096, 0.9765072464942932], "24": ["vertical_and_slash", 1000, 6096, 0.9329541325569153], "25": ["vertical_and_slash", 1000, 6096, 0.9777523875236511], "26": ["vertical_and_slash", 1000, 6096, 0.8051952123641968], "27": ["vertical_and_slash", 1000, 6096, 0.8399110436439514], "28": ["vertical_and_slash", 1000, 6096, 0.9342514872550964], "29": ["vertical_and_slash", 1000, 6096, 0.9741578102111816], "30": ["vertical_and_slash", 1000, 6096, 0.848839521408081], "31": ["vertical_and_slash", 1000, 6096, 0.98239666223526]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9757362604141235], "1": ["vertical_and_slash", 1000, 6096, 0.9550406336784363], "2": ["vertical_and_slash", 1000, 6096, 0.9211279153823853], "3": ["vertical_and_slash", 1000, 6096, 0.8916653990745544], "4": ["vertical_and_slash", 1000, 6096, 0.8767431974411011], "5": ["vertical_and_slash", 1000, 6096, 0.9455703496932983], "6": ["vertical_and_slash", 1000, 6096, 0.9527192115783691], "7": ["vertical_and_slash", 1000, 6096, 0.7811068296432495], "8": ["vertical_and_slash", 1000, 6096, 0.8316296339035034], "9": ["vertical_and_slash", 1000, 6096, 0.8304035663604736], "10": ["vertical_and_slash", 1000, 6096, 0.8664345741271973], "11": ["vertical_and_slash", 1000, 6096, 0.7507209181785583], "12": ["vertical_and_slash", 1000, 6096, 0.6998193860054016], "13": ["vertical_and_slash", 1000, 6096, 0.8083937764167786], "14": ["vertical_and_slash", 1000, 6096, 0.936531126499176], "15": ["vertical_and_slash", 3500, 100, 0.8388638496398926], "16": ["vertical_and_slash", 1000, 6096, 0.9760980010032654], "17": ["vertical_and_slash", 1000, 6096, 0.8551024794578552], "18": ["vertical_and_slash", 1000, 6096, 0.8102226257324219], "19": ["vertical_and_slash", 1000, 6096, 0.7975588440895081], "20": ["vertical_and_slash", 1000, 6096, 0.7827161550521851], "21": ["vertical_and_slash", 1000, 6096, 0.8549791574478149], "22": ["vertical_and_slash", 1000, 6096, 0.43748968839645386], "23": ["vertical_and_slash", 1000, 6096, 0.9711122512817383], "24": ["vertical_and_slash", 1000, 6096, 0.9607608318328857], "25": ["vertical_and_slash", 1000, 6096, 0.9769198894500732], "26": ["vertical_and_slash", 1000, 6096, 0.8103007674217224], "27": ["vertical_and_slash", 1000, 6096, 0.8963778018951416], "28": ["vertical_and_slash", 1000, 6096, 0.9841576814651489], "29": ["vertical_and_slash", 1000, 6096, 0.7483077645301819], "30": ["vertical_and_slash", 1000, 6096, 0.7113289833068848], "31": ["vertical_and_slash", 1000, 6096, 0.9651824235916138]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9249807000160217], "1": ["vertical_and_slash", 1000, 6096, 0.9844335317611694], "2": ["vertical_and_slash", 1000, 6096, 0.9704158306121826], "3": ["vertical_and_slash", 1000, 6096, 0.8301292657852173], "4": ["vertical_and_slash", 1000, 6096, 0.845939040184021], "5": ["vertical_and_slash", 1000, 6096, 0.960191547870636], "6": ["vertical_and_slash", 1000, 6096, 0.7878851294517517], "7": ["vertical_and_slash", 1000, 6096, 0.8991147875785828], "8": ["vertical_and_slash", 1000, 6096, 0.8055030107498169], "9": ["vertical_and_slash", 1000, 6096, 0.9609166979789734], "10": ["vertical_and_slash", 1000, 6096, 0.6763443350791931], "11": ["vertical_and_slash", 100, 800, 0.8046875], "12": ["vertical_and_slash", 1000, 6096, 0.8667144179344177], "13": ["vertical_and_slash", 1000, 6096, 0.8666128516197205], "14": ["vertical_and_slash", 1000, 6096, 0.9005298018455505], "15": ["vertical_and_slash", 1000, 6096, 0.9701908230781555], "16": ["vertical_and_slash", 1000, 6096, 0.6784012317657471], "17": ["vertical_and_slash", 1000, 6096, 0.6145890355110168], "18": ["vertical_and_slash", 1000, 6096, 0.9477112889289856], "19": ["vertical_and_slash", 1000, 6096, 0.772862434387207], "20": ["vertical_and_slash", 3500, 100, 0.47021621465682983], "21": ["vertical_and_slash", 1000, 6096, 0.6586878895759583], "22": ["vertical_and_slash", 1000, 6096, 0.9840757250785828], "23": ["vertical_and_slash", 1000, 6096, 0.9784411787986755], "24": ["vertical_and_slash", 1000, 6096, 0.9597467184066772], "25": ["vertical_and_slash", 1000, 6096, 0.827265739440918], "26": ["vertical_and_slash", 1000, 6096, 0.9158800840377808], "27": ["vertical_and_slash", 1000, 6096, 0.8512502908706665], "28": ["vertical_and_slash", 1000, 6096, 0.9813379049301147], "29": ["vertical_and_slash", 1000, 6096, 0.8553346991539001], "30": ["vertical_and_slash", 1000, 6096, 0.9829427003860474], "31": ["vertical_and_slash", 1000, 6096, 0.9277620911598206]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9730343222618103], "1": ["vertical_and_slash", 1000, 6096, 0.9595633745193481], "2": ["vertical_and_slash", 1000, 6096, 0.942793607711792], "3": ["vertical_and_slash", 1000, 6096, 0.9198371767997742], "4": ["vertical_and_slash", 1000, 6096, 0.9492561221122742], "5": ["vertical_and_slash", 1000, 6096, 0.8625556230545044], "6": ["vertical_and_slash", 1000, 6096, 0.9631521105766296], "7": ["vertical_and_slash", 3500, 100, 0.8207873702049255], "8": ["vertical_and_slash", 1000, 6096, 0.7821859121322632], "9": ["vertical_and_slash", 1000, 6096, 0.9522233605384827], "10": ["vertical_and_slash", 1000, 6096, 0.8461671471595764], "11": ["vertical_and_slash", 1000, 6096, 0.8316655158996582], "12": ["vertical_and_slash", 1000, 6096, 0.898086667060852], "13": ["vertical_and_slash", 1000, 6096, 0.8890058398246765], "14": ["vertical_and_slash", 1000, 6096, 0.7582146525382996], "15": ["vertical_and_slash", 1000, 6096, 0.8623766303062439], "16": ["vertical_and_slash", 1000, 6096, 0.8074561357498169], "17": ["vertical_and_slash", 1000, 6096, 0.8311309814453125], "18": ["vertical_and_slash", 1000, 6096, 0.8926411271095276], "19": ["vertical_and_slash", 1000, 6096, 0.7730618119239807], "20": ["vertical_and_slash", 1000, 6096, 0.7827007174491882], "21": ["vertical_and_slash", 1000, 6096, 0.7893304228782654], "22": ["vertical_and_slash", 1000, 6096, 0.9643170833587646], "23": ["vertical_and_slash", 1000, 6096, 0.991219162940979], "24": ["vertical_and_slash", 1000, 6096, 0.8481195569038391], "25": ["vertical_and_slash", 1000, 6096, 0.6852841973304749], "26": ["vertical_and_slash", 1000, 6096, 0.9333592057228088], "27": ["vertical_and_slash", 1000, 6096, 0.9733037352561951], "28": ["vertical_and_slash", 100, 800, 0.70703125], "29": ["vertical_and_slash", 30, 800, 0.9687321782112122], "30": ["vertical_and_slash", 1000, 6096, 0.9810034036636353], "31": ["vertical_and_slash", 1000, 6096, 0.9834803342819214]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9606742262840271], "1": ["vertical_and_slash", 1000, 6096, 0.8605623841285706], "2": ["vertical_and_slash", 1000, 6096, 0.9597658514976501], "3": ["vertical_and_slash", 1000, 6096, 0.9735509157180786], "4": ["vertical_and_slash", 1000, 6096, 0.9778039455413818], "5": ["vertical_and_slash", 1000, 6096, 0.9780624508857727], "6": ["vertical_and_slash", 1000, 6096, 0.7774229049682617], "7": ["vertical_and_slash", 1000, 6096, 0.9663316607475281], "8": ["vertical_and_slash", 1000, 6096, 0.9130713939666748], "9": ["vertical_and_slash", 1000, 6096, 0.7733947038650513], "10": ["vertical_and_slash", 3500, 100, 0.9458090662956238], "11": ["vertical_and_slash", 3500, 100, 0.8354401588439941], "12": ["vertical_and_slash", 1000, 6096, 0.981803834438324], "13": ["vertical_and_slash", 1000, 6096, 0.9784169793128967], "14": ["vertical_and_slash", 1000, 6096, 0.9289036989212036], "15": ["vertical_and_slash", 1000, 6096, 0.9593921899795532], "16": ["vertical_and_slash", 1000, 6096, 0.9313501715660095], "17": ["vertical_and_slash", 1000, 6096, 0.8864774107933044], "18": ["vertical_and_slash", 1000, 6096, 0.8550357818603516], "19": ["vertical_and_slash", 1000, 6096, 0.6336711645126343], "20": ["vertical_and_slash", 1000, 6096, 0.8282815217971802], "21": ["vertical_and_slash", 1000, 6096, 0.8986201882362366], "22": ["vertical_and_slash", 1000, 6096, 0.7721410393714905], "23": ["vertical_and_slash", 1000, 6096, 0.9407238960266113], "24": ["vertical_and_slash", 1000, 6096, 0.8724542856216431], "25": ["vertical_and_slash", 1000, 6096, 0.9247862696647644], "26": ["vertical_and_slash", 1000, 6096, 0.8366549611091614], "27": ["vertical_and_slash", 1000, 6096, 0.9383341670036316], "28": ["vertical_and_slash", 1000, 6096, 0.9467699527740479], "29": ["vertical_and_slash", 1000, 6096, 0.9113681316375732], "30": ["vertical_and_slash", 1000, 6096, 0.8863962888717651], "31": ["vertical_and_slash", 1000, 6096, 0.9256607294082642]}, {"0": ["vertical_and_slash", 3500, 100, 0.7657039761543274], "1": ["vertical_and_slash", 1000, 6096, 0.9568408131599426], "2": ["vertical_and_slash", 1000, 6096, 0.9550073742866516], "3": ["vertical_and_slash", 1000, 6096, 0.9009244441986084], "4": ["vertical_and_slash", 1000, 6096, 0.8634456992149353], "5": ["vertical_and_slash", 1000, 6096, 0.6339083313941956], "6": ["vertical_and_slash", 1000, 6096, 0.9180296659469604], "7": ["vertical_and_slash", 1000, 6096, 0.9530697464942932], "8": ["vertical_and_slash", 1000, 6096, 0.9079548120498657], "9": ["vertical_and_slash", 1000, 6096, 0.9398642182350159], "10": ["vertical_and_slash", 1000, 6096, 0.9383392930030823], "11": ["vertical_and_slash", 3500, 100, 0.9319514632225037], "12": ["vertical_and_slash", 1000, 6096, 0.9342671036720276], "13": ["vertical_and_slash", 1000, 6096, 0.9682186841964722], "14": ["vertical_and_slash", 1000, 6096, 0.9641357660293579], "15": ["vertical_and_slash", 1000, 6096, 0.9038820266723633], "16": ["vertical_and_slash", 1000, 6096, 0.5015620589256287], "17": ["vertical_and_slash", 100, 800, 0.640625], "18": ["vertical_and_slash", 3500, 100, 0.7872055768966675], "19": ["vertical_and_slash", 1000, 6096, 0.5252647995948792], "20": ["vertical_and_slash", 3500, 100, 0.7579703330993652], "21": ["vertical_and_slash", 1000, 6096, 0.7857505083084106], "22": ["vertical_and_slash", 1000, 6096, 0.9352824687957764], "23": ["vertical_and_slash", 1000, 6096, 0.855012059211731], "24": ["vertical_and_slash", 1000, 6096, 0.9918354153633118], "25": ["vertical_and_slash", 1000, 6096, 0.9847279787063599], "26": ["vertical_and_slash", 1000, 6096, 0.45677781105041504], "27": ["vertical_and_slash", 1000, 6096, 0.9896170496940613], "28": ["vertical_and_slash", 1000, 6096, 0.6669602394104004], "29": ["vertical_and_slash", 100, 800, 0.8359375], "30": ["vertical_and_slash", 1000, 6096, 0.955535352230072], "31": ["vertical_and_slash", 3500, 100, 0.5456596612930298]}, {"0": ["vertical_and_slash", 1000, 6096, 0.5645420551300049], "1": ["vertical_and_slash", 1000, 6096, 0.787753701210022], "2": ["vertical_and_slash", 1000, 6096, 0.9233205914497375], "3": ["vertical_and_slash", 1000, 6096, 0.9887925386428833], "4": ["vertical_and_slash", 1000, 6096, 0.9445663094520569], "5": ["vertical_and_slash", 100, 800, 0.9765625], "6": ["vertical_and_slash", 1000, 6096, 0.8958441615104675], "7": ["vertical_and_slash", 1000, 6096, 0.9712619185447693], "8": ["vertical_and_slash", 1000, 6096, 0.9680050015449524], "9": ["vertical_and_slash", 1000, 6096, 0.8953807353973389], "10": ["vertical_and_slash", 1000, 6096, 0.708289623260498], "11": ["vertical_and_slash", 1000, 6096, 0.9608820080757141], "12": ["vertical_and_slash", 1000, 6096, 0.9898175597190857], "13": ["vertical_and_slash", 3500, 100, 0.6491235494613647], "14": ["vertical_and_slash", 1000, 6096, 0.9750576019287109], "15": ["vertical_and_slash", 1000, 6096, 0.9741002917289734], "16": ["vertical_and_slash", 1000, 6096, 0.9484618902206421], "17": ["vertical_and_slash", 100, 800, 0.828125], "18": ["vertical_and_slash", 3500, 100, 0.4597291946411133], "19": ["vertical_and_slash", 500, 700, 0.945932924747467], "20": ["vertical_and_slash", 1000, 6096, 0.9694904685020447], "21": ["vertical_and_slash", 500, 700, 0.9613211154937744], "22": ["vertical_and_slash", 500, 700, 0.9677812457084656], "23": ["vertical_and_slash", 3500, 100, 0.8575901389122009], "24": ["vertical_and_slash", 500, 700, 0.9830855131149292], "25": ["vertical_and_slash", 30, 800, 0.8283630013465881], "26": ["vertical_and_slash", 30, 800, 0.9849530458450317], "27": ["vertical_and_slash", 1000, 6096, 0.9672498106956482], "28": ["vertical_and_slash", 30, 800, 0.9876043796539307], "29": ["vertical_and_slash", 500, 700, 0.9809958338737488], "30": ["vertical_and_slash", 1000, 6096, 0.7861392498016357], "31": ["vertical_and_slash", 1000, 6096, 0.9234136343002319]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9541090130805969], "1": ["vertical_and_slash", 1000, 6096, 0.9616696238517761], "2": ["vertical_and_slash", 1000, 6096, 0.995127260684967], "3": ["vertical_and_slash", 1000, 6096, 0.9726031422615051], "4": ["vertical_and_slash", 3500, 100, 0.8511031270027161], "5": ["vertical_and_slash", 1000, 6096, 0.935838520526886], "6": ["vertical_and_slash", 1000, 6096, 0.5472050905227661], "7": ["vertical_and_slash", 1000, 6096, 0.9947358965873718], "8": ["vertical_and_slash", 100, 800, 0.609375], "9": ["vertical_and_slash", 1000, 6096, 0.7860066294670105], "10": ["vertical_and_slash", 1000, 6096, 0.9379380941390991], "11": ["vertical_and_slash", 1000, 6096, 0.9749820828437805], "12": ["vertical_and_slash", 3500, 100, 0.8777661919593811], "13": ["vertical_and_slash", 1000, 6096, 0.995369553565979], "14": ["vertical_and_slash", 500, 700, 0.9991562962532043], "15": ["vertical_and_slash", 1000, 6096, 0.9873014688491821], "16": ["vertical_and_slash", 1000, 6096, 0.9594364166259766], "17": ["vertical_and_slash", 1000, 6096, 0.9792249798774719], "18": ["vertical_and_slash", 1000, 6096, 0.9745029211044312], "19": ["vertical_and_slash", 1000, 6096, 0.931340217590332], "20": ["vertical_and_slash", 1000, 6096, 0.9835373163223267], "21": ["vertical_and_slash", 1000, 6096, 0.6739212274551392], "22": ["vertical_and_slash", 1000, 6096, 0.9919816255569458], "23": ["vertical_and_slash", 1000, 6096, 0.832761287689209], "24": ["vertical_and_slash", 1000, 6096, 0.8737685084342957], "25": ["vertical_and_slash", 1000, 6096, 0.9892163276672363], "26": ["vertical_and_slash", 1000, 6096, 0.9465505480766296], "27": ["vertical_and_slash", 1000, 6096, 0.6151480078697205], "28": ["vertical_and_slash", 1000, 6096, 0.9795054793357849], "29": ["vertical_and_slash", 1000, 6096, 0.968265175819397], "30": ["vertical_and_slash", 1000, 6096, 0.899684727191925], "31": ["vertical_and_slash", 1000, 6096, 0.9843863844871521]}, {"0": ["vertical_and_slash", 1000, 6096, 0.7661387324333191], "1": ["vertical_and_slash", 1000, 6096, 0.6455879211425781], "2": ["vertical_and_slash", 1000, 6096, 0.6188898682594299], "3": ["vertical_and_slash", 1000, 6096, 0.7831589579582214], "4": ["vertical_and_slash", 1000, 6096, 0.8114323616027832], "5": ["vertical_and_slash", 1000, 6096, 0.8699432611465454], "6": ["vertical_and_slash", 1000, 6096, 0.5396524667739868], "7": ["vertical_and_slash", 1000, 6096, 0.8414909243583679], "8": ["vertical_and_slash", 1000, 6096, 0.9677126407623291], "9": ["vertical_and_slash", 1000, 6096, 0.9914301633834839], "10": ["vertical_and_slash", 1000, 6096, 0.6908293962478638], "11": ["vertical_and_slash", 1000, 6096, 0.7694616317749023], "12": ["vertical_and_slash", 1000, 6096, 0.7868252992630005], "13": ["vertical_and_slash", 1000, 6096, 0.7686376571655273], "14": ["vertical_and_slash", 1000, 6096, 0.6416773200035095], "15": ["vertical_and_slash", 1000, 6096, 0.9487892389297485], "16": ["vertical_and_slash", 1000, 6096, 0.9250247478485107], "17": ["vertical_and_slash", 1000, 6096, 0.863334596157074], "18": ["vertical_and_slash", 1000, 6096, 0.9434170126914978], "19": ["vertical_and_slash", 1000, 6096, 0.8029648065567017], "20": ["vertical_and_slash", 1000, 6096, 0.9258765578269958], "21": ["vertical_and_slash", 1000, 6096, 0.7538158893585205], "22": ["vertical_and_slash", 1000, 6096, 0.9208838939666748], "23": ["vertical_and_slash", 1000, 6096, 0.8837108016014099], "24": ["vertical_and_slash", 1000, 6096, 0.970884382724762], "25": ["vertical_and_slash", 1000, 6096, 0.8945068120956421], "26": ["vertical_and_slash", 1000, 6096, 0.9354509711265564], "27": ["vertical_and_slash", 1000, 6096, 0.920552134513855], "28": ["vertical_and_slash", 30, 800, 0.9181707501411438], "29": ["vertical_and_slash", 1000, 6096, 0.891613781452179], "30": ["vertical_and_slash", 1000, 6096, 0.9182100892066956], "31": ["vertical_and_slash", 1000, 6096, 0.8557562828063965]}, {"0": ["vertical_and_slash", 3500, 100, 0.9657055139541626], "1": ["vertical_and_slash", 3500, 100, 0.8897659778594971], "2": ["vertical_and_slash", 1000, 6096, 0.8682578802108765], "3": ["vertical_and_slash", 1000, 6096, 0.8867695331573486], "4": ["vertical_and_slash", 1000, 6096, 0.9828046560287476], "5": ["vertical_and_slash", 1000, 6096, 0.7956032156944275], "6": ["vertical_and_slash", 30, 800, 0.9907302856445312], "7": ["vertical_and_slash", 1000, 6096, 0.934683084487915], "8": ["vertical_and_slash", 1000, 6096, 0.9233026504516602], "9": ["vertical_and_slash", 1000, 6096, 0.9170653223991394], "10": ["vertical_and_slash", 1000, 6096, 0.8703349232673645], "11": ["vertical_and_slash", 1000, 6096, 0.9162744879722595], "12": ["vertical_and_slash", 1000, 6096, 0.9044281840324402], "13": ["vertical_and_slash", 1000, 6096, 0.7801997661590576], "14": ["vertical_and_slash", 1000, 6096, 0.9059498906135559], "15": ["vertical_and_slash", 1000, 6096, 0.911408543586731], "16": ["vertical_and_slash", 1000, 6096, 0.9057997465133667], "17": ["vertical_and_slash", 1000, 6096, 0.9196884632110596], "18": ["vertical_and_slash", 1000, 6096, 0.9520559906959534], "19": ["vertical_and_slash", 1000, 6096, 0.9791962504386902], "20": ["vertical_and_slash", 1000, 6096, 0.9360059499740601], "21": ["vertical_and_slash", 1000, 6096, 0.9846854209899902], "22": ["vertical_and_slash", 1000, 6096, 0.9537449479103088], "23": ["vertical_and_slash", 1000, 6096, 0.8763953447341919], "24": ["vertical_and_slash", 1000, 6096, 0.9365814328193665], "25": ["vertical_and_slash", 1000, 6096, 0.9549977779388428], "26": ["vertical_and_slash", 1000, 6096, 0.8538755178451538], "27": ["vertical_and_slash", 1000, 6096, 0.8482450842857361], "28": ["vertical_and_slash", 1000, 6096, 0.900585949420929], "29": ["vertical_and_slash", 1000, 6096, 0.9658856987953186], "30": ["vertical_and_slash", 1000, 6096, 0.924572229385376], "31": ["vertical_and_slash", 1000, 6096, 0.9305519461631775]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9158159494400024], "1": ["vertical_and_slash", 100, 800, 0.9609375], "2": ["vertical_and_slash", 1000, 6096, 0.9127177000045776], "3": ["vertical_and_slash", 1000, 6096, 0.9467596411705017], "4": ["vertical_and_slash", 1000, 6096, 0.5563061833381653], "5": ["vertical_and_slash", 1000, 6096, 0.9689232707023621], "6": ["vertical_and_slash", 1000, 6096, 0.9137058258056641], "7": ["vertical_and_slash", 1000, 6096, 0.7653136849403381], "8": ["vertical_and_slash", 1000, 6096, 0.7265901565551758], "9": ["vertical_and_slash", 1000, 6096, 0.9632214307785034], "10": ["vertical_and_slash", 1000, 6096, 0.9582708477973938], "11": ["vertical_and_slash", 1000, 6096, 0.9441409111022949], "12": ["vertical_and_slash", 1000, 6096, 0.9170634150505066], "13": ["vertical_and_slash", 1000, 6096, 0.9617065191268921], "14": ["vertical_and_slash", 1000, 6096, 0.8803288340568542], "15": ["vertical_and_slash", 1000, 6096, 0.8892591595649719], "16": ["vertical_and_slash", 1000, 6096, 0.7660742402076721], "17": ["vertical_and_slash", 1000, 6096, 0.9554286003112793], "18": ["vertical_and_slash", 1000, 6096, 0.9157401919364929], "19": ["vertical_and_slash", 1000, 6096, 0.9165143966674805], "20": ["vertical_and_slash", 1000, 6096, 0.9822098612785339], "21": ["vertical_and_slash", 1000, 6096, 0.9935940504074097], "22": ["vertical_and_slash", 1000, 6096, 0.9715600609779358], "23": ["vertical_and_slash", 1000, 6096, 0.992677628993988], "24": ["vertical_and_slash", 1000, 6096, 0.5354439616203308], "25": ["vertical_and_slash", 3500, 100, 0.9313783049583435], "26": ["vertical_and_slash", 1000, 6096, 0.9819331169128418], "27": ["vertical_and_slash", 1000, 6096, 0.8625765442848206], "28": ["vertical_and_slash", 1000, 6096, 0.6488704085350037], "29": ["vertical_and_slash", 1000, 6096, 0.7992751002311707], "30": ["vertical_and_slash", 1000, 6096, 0.9676215052604675], "31": ["vertical_and_slash", 1000, 6096, 0.6730000376701355]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9583336114883423], "1": ["vertical_and_slash", 3500, 100, 0.920649528503418], "2": ["vertical_and_slash", 1000, 6096, 0.9806380867958069], "3": ["vertical_and_slash", 1000, 6096, 0.6618795394897461], "4": ["vertical_and_slash", 500, 700, 0.9746427536010742], "5": ["vertical_and_slash", 1000, 6096, 0.9897501468658447], "6": ["vertical_and_slash", 30, 800, 0.9998135566711426], "7": ["vertical_and_slash", 100, 800, 0.96875], "8": ["vertical_and_slash", 1000, 6096, 0.9899019002914429], "9": ["vertical_and_slash", 1000, 6096, 0.7692935466766357], "10": ["vertical_and_slash", 30, 800, 0.993035614490509], "11": ["vertical_and_slash", 1000, 6096, 0.9593439102172852], "12": ["vertical_and_slash", 1000, 6096, 0.9329172372817993], "13": ["vertical_and_slash", 1000, 6096, 0.9800772666931152], "14": ["vertical_and_slash", 1000, 6096, 0.9913601279258728], "15": ["vertical_and_slash", 1000, 6096, 0.7965394258499146], "16": ["vertical_and_slash", 30, 800, 0.9870527386665344], "17": ["vertical_and_slash", 1000, 6096, 0.9533117413520813], "18": ["vertical_and_slash", 1000, 6096, 0.8804169297218323], "19": ["vertical_and_slash", 1000, 6096, 0.8350136280059814], "20": ["vertical_and_slash", 1000, 6096, 0.9841675758361816], "21": ["vertical_and_slash", 1000, 6096, 0.9561842679977417], "22": ["vertical_and_slash", 1000, 6096, 0.8822300434112549], "23": ["vertical_and_slash", 1000, 6096, 0.9830194115638733], "24": ["vertical_and_slash", 1000, 6096, 0.9482321739196777], "25": ["vertical_and_slash", 1000, 6096, 0.8614307045936584], "26": ["vertical_and_slash", 1000, 6096, 0.9263876080513], "27": ["vertical_and_slash", 1000, 6096, 0.6658102869987488], "28": ["vertical_and_slash", 1000, 6096, 0.9623947143554688], "29": ["vertical_and_slash", 1000, 6096, 0.9536281824111938], "30": ["vertical_and_slash", 1000, 6096, 0.9517220854759216], "31": ["vertical_and_slash", 1000, 6096, 0.8031153678894043]}, {"0": ["vertical_and_slash", 1000, 6096, 0.6388168334960938], "1": ["vertical_and_slash", 1000, 6096, 0.9439829587936401], "2": ["vertical_and_slash", 1000, 6096, 0.9835416674613953], "3": ["vertical_and_slash", 1000, 6096, 0.9886243343353271], "4": ["vertical_and_slash", 1000, 6096, 0.9902036190032959], "5": ["vertical_and_slash", 1000, 6096, 0.9350442290306091], "6": ["vertical_and_slash", 1000, 6096, 0.9958718419075012], "7": ["vertical_and_slash", 1000, 6096, 0.9658843278884888], "8": ["vertical_and_slash", 1000, 6096, 0.9869772791862488], "9": ["vertical_and_slash", 1000, 6096, 0.9570468664169312], "10": ["vertical_and_slash", 1000, 6096, 0.9743318557739258], "11": ["vertical_and_slash", 1000, 6096, 0.7002722024917603], "12": ["vertical_and_slash", 1000, 6096, 0.7885740399360657], "13": ["vertical_and_slash", 1000, 6096, 0.7680553793907166], "14": ["vertical_and_slash", 1000, 6096, 0.6732326745986938], "15": ["vertical_and_slash", 1000, 6096, 0.9269579648971558], "16": ["vertical_and_slash", 1000, 6096, 0.9863139390945435], "17": ["vertical_and_slash", 1000, 6096, 0.890034019947052], "18": ["vertical_and_slash", 1000, 6096, 0.9680511951446533], "19": ["vertical_and_slash", 1000, 6096, 0.9073517918586731], "20": ["vertical_and_slash", 1000, 6096, 0.8970184326171875], "21": ["vertical_and_slash", 1000, 6096, 0.953582763671875], "22": ["vertical_and_slash", 1000, 6096, 0.9870669841766357], "23": ["vertical_and_slash", 1000, 6096, 0.9338790774345398], "24": ["vertical_and_slash", 1000, 6096, 0.9918429851531982], "25": ["vertical_and_slash", 1000, 6096, 0.9930576682090759], "26": ["vertical_and_slash", 1000, 6096, 0.9506353735923767], "27": ["vertical_and_slash", 1000, 6096, 0.9882525205612183], "28": ["vertical_and_slash", 1000, 6096, 0.9903281331062317], "29": ["vertical_and_slash", 1000, 6096, 0.9046964645385742], "30": ["vertical_and_slash", 3500, 100, 0.8055235147476196], "31": ["vertical_and_slash", 1000, 6096, 0.920537531375885]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9932777285575867], "1": ["vertical_and_slash", 1000, 6096, 0.9949989914894104], "2": ["vertical_and_slash", 1000, 6096, 0.9581750631332397], "3": ["vertical_and_slash", 1000, 6096, 0.9929001331329346], "4": ["vertical_and_slash", 1000, 6096, 0.6734450459480286], "5": ["vertical_and_slash", 1000, 6096, 0.9910385012626648], "6": ["vertical_and_slash", 1000, 6096, 0.9714725613594055], "7": ["vertical_and_slash", 1000, 6096, 0.9907165169715881], "8": ["vertical_and_slash", 1000, 6096, 0.981683075428009], "9": ["vertical_and_slash", 1000, 6096, 0.8198555707931519], "10": ["vertical_and_slash", 1000, 6096, 0.9946696162223816], "11": ["vertical_and_slash", 1000, 6096, 0.9697901010513306], "12": ["vertical_and_slash", 1000, 6096, 0.9763979911804199], "13": ["vertical_and_slash", 1000, 6096, 0.9186936616897583], "14": ["vertical_and_slash", 1000, 6096, 0.9955764412879944], "15": ["vertical_and_slash", 1000, 6096, 0.9968273043632507], "16": ["vertical_and_slash", 1000, 6096, 0.9839903712272644], "17": ["vertical_and_slash", 1000, 6096, 0.9854987859725952], "18": ["vertical_and_slash", 1000, 6096, 0.9763216972351074], "19": ["vertical_and_slash", 1000, 6096, 0.92630535364151], "20": ["vertical_and_slash", 1000, 6096, 0.9746662378311157], "21": ["vertical_and_slash", 100, 800, 0.60546875], "22": ["vertical_and_slash", 1000, 6096, 0.9886237382888794], "23": ["vertical_and_slash", 1000, 6096, 0.9529657363891602], "24": ["vertical_and_slash", 1000, 6096, 0.6526110172271729], "25": ["vertical_and_slash", 1000, 6096, 0.9785957932472229], "26": ["vertical_and_slash", 1000, 6096, 0.9283955097198486], "27": ["vertical_and_slash", 1000, 6096, 0.9905862212181091], "28": ["vertical_and_slash", 1000, 6096, 0.831668496131897], "29": ["vertical_and_slash", 3500, 100, 0.7073426842689514], "30": ["vertical_and_slash", 1000, 6096, 0.9910452365875244], "31": ["vertical_and_slash", 1000, 6096, 0.9924555420875549]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9483824372291565], "1": ["vertical_and_slash", 1000, 6096, 0.9952837228775024], "2": ["vertical_and_slash", 3500, 100, 0.9876469969749451], "3": ["vertical_and_slash", 1000, 6096, 0.9910295009613037], "4": ["vertical_and_slash", 1000, 6096, 0.9734227061271667], "5": ["vertical_and_slash", 1000, 6096, 0.978148341178894], "6": ["vertical_and_slash", 1000, 6096, 0.9864757061004639], "7": ["vertical_and_slash", 1000, 6096, 0.9570356607437134], "8": ["vertical_and_slash", 1000, 6096, 0.9480524659156799], "9": ["vertical_and_slash", 1000, 6096, 0.9976329207420349], "10": ["vertical_and_slash", 1000, 6096, 0.9608963131904602], "11": ["vertical_and_slash", 1000, 6096, 0.9907214641571045], "12": ["vertical_and_slash", 1000, 6096, 0.8926163911819458], "13": ["vertical_and_slash", 1000, 6096, 0.9958445429801941], "14": ["vertical_and_slash", 1000, 6096, 0.9896053671836853], "15": ["vertical_and_slash", 1000, 6096, 0.9930106997489929], "16": ["vertical_and_slash", 1000, 6096, 0.9790781140327454], "17": ["vertical_and_slash", 1000, 6096, 0.9917120933532715], "18": ["vertical_and_slash", 1000, 6096, 0.9937633872032166], "19": ["vertical_and_slash", 1000, 6096, 0.9867856502532959], "20": ["vertical_and_slash", 1000, 6096, 0.9723774790763855], "21": ["vertical_and_slash", 1000, 6096, 0.983454704284668], "22": ["vertical_and_slash", 1000, 6096, 0.9925602078437805], "23": ["vertical_and_slash", 1000, 6096, 0.9560484886169434], "24": ["vertical_and_slash", 1000, 6096, 0.9075314402580261], "25": ["vertical_and_slash", 1000, 6096, 0.9980502724647522], "26": ["vertical_and_slash", 1000, 6096, 0.8549617528915405], "27": ["vertical_and_slash", 1000, 6096, 0.9963071346282959], "28": ["vertical_and_slash", 1000, 6096, 0.9482569098472595], "29": ["vertical_and_slash", 1000, 6096, 0.968435525894165], "30": ["vertical_and_slash", 1000, 6096, 0.903647243976593], "31": ["vertical_and_slash", 1000, 6096, 0.7875450849533081]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9211392998695374], "1": ["vertical_and_slash", 1000, 6096, 0.9940032362937927], "2": ["vertical_and_slash", 1000, 6096, 0.9837856888771057], "3": ["vertical_and_slash", 1000, 6096, 0.9709826707839966], "4": ["vertical_and_slash", 1000, 6096, 0.9988773465156555], "5": ["vertical_and_slash", 1000, 6096, 0.9964665174484253], "6": ["vertical_and_slash", 1000, 6096, 0.9945792555809021], "7": ["vertical_and_slash", 1000, 6096, 0.9958017468452454], "8": ["vertical_and_slash", 1000, 6096, 0.9973410964012146], "9": ["vertical_and_slash", 1000, 6096, 0.9888420701026917], "10": ["vertical_and_slash", 1000, 6096, 0.994647204875946], "11": ["vertical_and_slash", 1000, 6096, 0.9852795004844666], "12": ["vertical_and_slash", 1000, 6096, 0.9959890842437744], "13": ["vertical_and_slash", 1000, 6096, 0.9861192107200623], "14": ["vertical_and_slash", 1000, 6096, 0.966552734375], "15": ["vertical_and_slash", 1000, 6096, 0.9597994685173035], "16": ["vertical_and_slash", 1000, 6096, 0.9980231523513794], "17": ["vertical_and_slash", 1000, 6096, 0.758446216583252], "18": ["vertical_and_slash", 1000, 6096, 0.9019535183906555], "19": ["vertical_and_slash", 1000, 6096, 0.9775436520576477], "20": ["vertical_and_slash", 1000, 6096, 0.9885939955711365], "21": ["vertical_and_slash", 1000, 6096, 0.8545481562614441], "22": ["vertical_and_slash", 1000, 6096, 0.9928765296936035], "23": ["vertical_and_slash", 1000, 6096, 0.9882266521453857], "24": ["vertical_and_slash", 1000, 6096, 0.8496577739715576], "25": ["vertical_and_slash", 1000, 6096, 0.7915677428245544], "26": ["vertical_and_slash", 1000, 6096, 0.7121551632881165], "27": ["vertical_and_slash", 1000, 6096, 0.9940221905708313], "28": ["vertical_and_slash", 1000, 6096, 0.9851447939872742], "29": ["vertical_and_slash", 1000, 6096, 0.8594978451728821], "30": ["vertical_and_slash", 1000, 6096, 0.9886206388473511], "31": ["vertical_and_slash", 1000, 6096, 0.9236965775489807]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9797110557556152], "1": ["vertical_and_slash", 1000, 6096, 0.9652978181838989], "2": ["vertical_and_slash", 1000, 6096, 0.9761486649513245], "3": ["vertical_and_slash", 1000, 6096, 0.9916988015174866], "4": ["vertical_and_slash", 1000, 6096, 0.8943752646446228], "5": ["vertical_and_slash", 1000, 6096, 0.9975930452346802], "6": ["vertical_and_slash", 1000, 6096, 0.9633777737617493], "7": ["vertical_and_slash", 1000, 6096, 0.9685147404670715], "8": ["vertical_and_slash", 1000, 6096, 0.9267718195915222], "9": ["vertical_and_slash", 1000, 6096, 0.9835707545280457], "10": ["vertical_and_slash", 1000, 6096, 0.9016554355621338], "11": ["vertical_and_slash", 1000, 6096, 0.9986184239387512], "12": ["vertical_and_slash", 1000, 6096, 0.8630414605140686], "13": ["vertical_and_slash", 1000, 6096, 0.9953534603118896], "14": ["vertical_and_slash", 1000, 6096, 0.9929397106170654], "15": ["vertical_and_slash", 1000, 6096, 0.7794142961502075], "16": ["vertical_and_slash", 30, 800, 0.9923286437988281], "17": ["vertical_and_slash", 1000, 6096, 0.9266855716705322], "18": ["vertical_and_slash", 30, 800, 0.9866135716438293], "19": ["vertical_and_slash", 3500, 100, 0.6937721967697144], "20": ["vertical_and_slash", 1000, 6096, 0.917273998260498], "21": ["vertical_and_slash", 30, 800, 0.9910230040550232], "22": ["vertical_and_slash", 30, 800, 0.9986875057220459], "23": ["vertical_and_slash", 30, 800, 0.9931937456130981], "24": ["vertical_and_slash", 500, 700, 0.9756283760070801], "25": ["vertical_and_slash", 1000, 6096, 0.9228458404541016], "26": ["vertical_and_slash", 30, 800, 0.9704541563987732], "27": ["vertical_and_slash", 30, 800, 0.9999816417694092], "28": ["vertical_and_slash", 30, 800, 0.9961368441581726], "29": ["vertical_and_slash", 3500, 100, 0.49023929238319397], "30": ["vertical_and_slash", 500, 700, 0.9706283807754517], "31": ["vertical_and_slash", 30, 800, 0.9791941046714783]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9944716095924377], "1": ["vertical_and_slash", 1000, 6096, 0.9921188950538635], "2": ["vertical_and_slash", 1000, 6096, 0.9527000784873962], "3": ["vertical_and_slash", 1000, 6096, 0.9524389505386353], "4": ["vertical_and_slash", 1000, 6096, 0.980296790599823], "5": ["vertical_and_slash", 1000, 6096, 0.9973203539848328], "6": ["vertical_and_slash", 1000, 6096, 0.9150237441062927], "7": ["vertical_and_slash", 1000, 6096, 0.9239553213119507], "8": ["vertical_and_slash", 1000, 6096, 0.9812129139900208], "9": ["vertical_and_slash", 1000, 6096, 0.9880436658859253], "10": ["vertical_and_slash", 1000, 6096, 0.9124068021774292], "11": ["vertical_and_slash", 1000, 6096, 0.903336763381958], "12": ["vertical_and_slash", 1000, 6096, 0.9873518347740173], "13": ["vertical_and_slash", 1000, 6096, 0.985855221748352], "14": ["vertical_and_slash", 1000, 6096, 0.7541599869728088], "15": ["vertical_and_slash", 1000, 6096, 0.9859821796417236], "16": ["vertical_and_slash", 1000, 6096, 0.7698959708213806], "17": ["vertical_and_slash", 1000, 6096, 0.9775893092155457], "18": ["vertical_and_slash", 1000, 6096, 0.8645729422569275], "19": ["vertical_and_slash", 1000, 6096, 0.8164383769035339], "20": ["vertical_and_slash", 1000, 6096, 0.9781270027160645], "21": ["vertical_and_slash", 1000, 6096, 0.7774609923362732], "22": ["vertical_and_slash", 1000, 6096, 0.7893700003623962], "23": ["vertical_and_slash", 1000, 6096, 0.667039692401886], "24": ["vertical_and_slash", 1000, 6096, 0.8929789662361145], "25": ["vertical_and_slash", 1000, 6096, 0.9409829378128052], "26": ["vertical_and_slash", 1000, 6096, 0.9977564811706543], "27": ["vertical_and_slash", 1000, 6096, 0.9368633031845093], "28": ["vertical_and_slash", 1000, 6096, 0.6916388869285583], "29": ["vertical_and_slash", 1000, 6096, 0.9498395919799805], "30": ["vertical_and_slash", 1000, 6096, 0.5906273126602173], "31": ["vertical_and_slash", 1000, 6096, 0.49066540598869324]}, {"0": ["vertical_and_slash", 3500, 100, 0.9809368252754211], "1": ["vertical_and_slash", 1000, 6096, 0.9900444149971008], "2": ["vertical_and_slash", 1000, 6096, 0.7763345241546631], "3": ["vertical_and_slash", 1000, 6096, 0.8215399980545044], "4": ["vertical_and_slash", 1000, 6096, 0.8672407269477844], "5": ["vertical_and_slash", 3500, 100, 0.9840344786643982], "6": ["vertical_and_slash", 1000, 6096, 0.9132277369499207], "7": ["vertical_and_slash", 1000, 6096, 0.9514797329902649], "8": ["vertical_and_slash", 1000, 6096, 0.8272950649261475], "9": ["vertical_and_slash", 1000, 6096, 0.9040052890777588], "10": ["vertical_and_slash", 1000, 6096, 0.9036935567855835], "11": ["vertical_and_slash", 1000, 6096, 0.9262740612030029], "12": ["vertical_and_slash", 1000, 6096, 0.9606261253356934], "13": ["vertical_and_slash", 1000, 6096, 0.7733256220817566], "14": ["vertical_and_slash", 1000, 6096, 0.8432139158248901], "15": ["vertical_and_slash", 1000, 6096, 0.9505079984664917], "16": ["vertical_and_slash", 1000, 6096, 0.9494594931602478], "17": ["vertical_and_slash", 1000, 6096, 0.8810411095619202], "18": ["vertical_and_slash", 1000, 6096, 0.9724594354629517], "19": ["vertical_and_slash", 1000, 6096, 0.9956196546554565], "20": ["vertical_and_slash", 1000, 6096, 0.863589346408844], "21": ["vertical_and_slash", 1000, 6096, 0.9712514281272888], "22": ["vertical_and_slash", 1000, 6096, 0.9922794699668884], "23": ["vertical_and_slash", 1000, 6096, 0.8680360317230225], "24": ["vertical_and_slash", 1000, 6096, 0.9562727808952332], "25": ["vertical_and_slash", 1000, 6096, 0.9448184370994568], "26": ["vertical_and_slash", 1000, 6096, 0.8945408463478088], "27": ["vertical_and_slash", 1000, 6096, 0.9359281659126282], "28": ["vertical_and_slash", 1000, 6096, 0.9844917058944702], "29": ["vertical_and_slash", 1000, 6096, 0.9526135921478271], "30": ["vertical_and_slash", 1000, 6096, 0.9507893323898315], "31": ["vertical_and_slash", 1000, 6096, 0.9391229152679443]}, {"0": ["vertical_and_slash", 1000, 6096, 0.959192156791687], "1": ["vertical_and_slash", 1000, 6096, 0.8986132144927979], "2": ["vertical_and_slash", 1000, 6096, 0.9823517203330994], "3": ["vertical_and_slash", 3500, 100, 0.9925433397293091], "4": ["vertical_and_slash", 1000, 6096, 0.8507226705551147], "5": ["vertical_and_slash", 1000, 6096, 0.988812267780304], "6": ["vertical_and_slash", 1000, 6096, 0.8914223313331604], "7": ["vertical_and_slash", 1000, 6096, 0.9909411072731018], "8": ["vertical_and_slash", 1000, 6096, 0.8232712745666504], "9": ["vertical_and_slash", 1000, 6096, 0.9866649508476257], "10": ["vertical_and_slash", 1000, 6096, 0.993914008140564], "11": ["vertical_and_slash", 1000, 6096, 0.994103193283081], "12": ["vertical_and_slash", 1000, 6096, 0.9797621965408325], "13": ["vertical_and_slash", 3500, 100, 0.9829468131065369], "14": ["vertical_and_slash", 1000, 6096, 0.9968794584274292], "15": ["vertical_and_slash", 1000, 6096, 0.9799898266792297], "16": ["vertical_and_slash", 1000, 6096, 0.9583171010017395], "17": ["vertical_and_slash", 1000, 6096, 0.9750096797943115], "18": ["vertical_and_slash", 3500, 100, 0.950920581817627], "19": ["vertical_and_slash", 1000, 6096, 0.9605578184127808], "20": ["vertical_and_slash", 1000, 6096, 0.9931844472885132], "21": ["vertical_and_slash", 3500, 100, 0.9062336087226868], "22": ["vertical_and_slash", 1000, 6096, 0.9385621547698975], "23": ["vertical_and_slash", 3500, 100, 0.9778093099594116], "24": ["vertical_and_slash", 1000, 6096, 0.783235490322113], "25": ["vertical_and_slash", 1000, 6096, 0.9622864127159119], "26": ["vertical_and_slash", 3500, 100, 0.9887783527374268], "27": ["vertical_and_slash", 1000, 6096, 0.9601055979728699], "28": ["vertical_and_slash", 1000, 6096, 0.8838457465171814], "29": ["vertical_and_slash", 3500, 100, 0.9428191184997559], "30": ["vertical_and_slash", 3500, 100, 0.9909257292747498], "31": ["vertical_and_slash", 3500, 100, 0.9503870606422424]}, {"0": ["vertical_and_slash", 1000, 6096, 0.943303108215332], "1": ["vertical_and_slash", 1000, 6096, 0.9910494089126587], "2": ["vertical_and_slash", 1000, 6096, 0.8366222381591797], "3": ["vertical_and_slash", 1000, 6096, 0.9674856066703796], "4": ["vertical_and_slash", 1000, 6096, 0.97574782371521], "5": ["vertical_and_slash", 1000, 6096, 0.9879559278488159], "6": ["vertical_and_slash", 1000, 6096, 0.9647985100746155], "7": ["vertical_and_slash", 1000, 6096, 0.9304100275039673], "8": ["vertical_and_slash", 1000, 6096, 0.9687802195549011], "9": ["vertical_and_slash", 3500, 100, 0.975675642490387], "10": ["vertical_and_slash", 1000, 6096, 0.9975134134292603], "11": ["vertical_and_slash", 3500, 100, 0.9904999136924744], "12": ["vertical_and_slash", 3500, 100, 0.9122200608253479], "13": ["vertical_and_slash", 1000, 6096, 0.9318473935127258], "14": ["vertical_and_slash", 1000, 6096, 0.9959043860435486], "15": ["vertical_and_slash", 3500, 100, 0.987663984298706], "16": ["vertical_and_slash", 1000, 6096, 0.9580157995223999], "17": ["vertical_and_slash", 3500, 100, 0.967541515827179], "18": ["vertical_and_slash", 1000, 6096, 0.9858877062797546], "19": ["vertical_and_slash", 3500, 100, 0.9829609990119934], "20": ["vertical_and_slash", 1000, 6096, 0.9371690154075623], "21": ["vertical_and_slash", 1000, 6096, 0.8966141939163208], "22": ["vertical_and_slash", 1000, 6096, 0.9711248874664307], "23": ["vertical_and_slash", 1000, 6096, 0.9836072325706482], "24": ["vertical_and_slash", 1000, 6096, 0.9597060680389404], "25": ["vertical_and_slash", 3500, 100, 0.9761543869972229], "26": ["vertical_and_slash", 3500, 100, 0.9595860242843628], "27": ["vertical_and_slash", 1000, 6096, 0.9816179871559143], "28": ["vertical_and_slash", 1000, 6096, 0.9611074924468994], "29": ["vertical_and_slash", 3500, 100, 0.9640044569969177], "30": ["vertical_and_slash", 1000, 6096, 0.9451780319213867], "31": ["vertical_and_slash", 1000, 6096, 0.942221999168396]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9892961978912354], "1": ["vertical_and_slash", 1000, 6096, 0.9751477837562561], "2": ["vertical_and_slash", 1000, 6096, 0.9715431928634644], "3": ["vertical_and_slash", 1000, 6096, 0.9555078744888306], "4": ["vertical_and_slash", 1000, 6096, 0.972079336643219], "5": ["vertical_and_slash", 1000, 6096, 0.9383029937744141], "6": ["vertical_and_slash", 3500, 100, 0.97269207239151], "7": ["vertical_and_slash", 1000, 6096, 0.9863042235374451], "8": ["vertical_and_slash", 1000, 6096, 0.9737297892570496], "9": ["vertical_and_slash", 1000, 6096, 0.9771688580513], "10": ["vertical_and_slash", 1000, 6096, 0.9797963500022888], "11": ["vertical_and_slash", 1000, 6096, 0.9806637167930603], "12": ["vertical_and_slash", 1000, 6096, 0.9664291739463806], "13": ["vertical_and_slash", 1000, 6096, 0.9719564318656921], "14": ["vertical_and_slash", 1000, 6096, 0.8995389342308044], "15": ["vertical_and_slash", 1000, 6096, 0.965333878993988], "16": ["vertical_and_slash", 1000, 6096, 0.9119902849197388], "17": ["vertical_and_slash", 1000, 6096, 0.9491288065910339], "18": ["vertical_and_slash", 1000, 6096, 0.9842385649681091], "19": ["vertical_and_slash", 1000, 6096, 0.9473087191581726], "20": ["vertical_and_slash", 3500, 100, 0.9794735312461853], "21": ["vertical_and_slash", 1000, 6096, 0.9777002334594727], "22": ["vertical_and_slash", 1000, 6096, 0.9637954831123352], "23": ["vertical_and_slash", 1000, 6096, 0.9687110185623169], "24": ["vertical_and_slash", 1000, 6096, 0.9259694814682007], "25": ["vertical_and_slash", 1000, 6096, 0.985179603099823], "26": ["vertical_and_slash", 1000, 6096, 0.9707021117210388], "27": ["vertical_and_slash", 3500, 100, 0.978216826915741], "28": ["vertical_and_slash", 1000, 6096, 0.9712181091308594], "29": ["vertical_and_slash", 1000, 6096, 0.9717193245887756], "30": ["vertical_and_slash", 1000, 6096, 0.9928351044654846], "31": ["vertical_and_slash", 1000, 6096, 0.9481915831565857]}, {"0": ["vertical_and_slash", 3500, 100, 0.9874629378318787], "1": ["vertical_and_slash", 1000, 6096, 0.9580618143081665], "2": ["vertical_and_slash", 1000, 6096, 0.9581654071807861], "3": ["vertical_and_slash", 3500, 100, 0.9785739183425903], "4": ["vertical_and_slash", 3500, 100, 0.9860913157463074], "5": ["vertical_and_slash", 3500, 100, 0.9562280774116516], "6": ["vertical_and_slash", 3500, 100, 0.9889052510261536], "7": ["vertical_and_slash", 3500, 100, 0.9604836106300354], "8": ["vertical_and_slash", 1000, 6096, 0.9046469330787659], "9": ["vertical_and_slash", 1000, 6096, 0.9246867895126343], "10": ["vertical_and_slash", 3500, 100, 0.9791927933692932], "11": ["vertical_and_slash", 3500, 100, 0.9869166612625122], "12": ["vertical_and_slash", 1000, 6096, 0.9678682684898376], "13": ["vertical_and_slash", 1000, 6096, 0.9711335897445679], "14": ["vertical_and_slash", 1000, 6096, 0.9279484748840332], "15": ["vertical_and_slash", 1000, 6096, 0.9901157021522522], "16": ["vertical_and_slash", 1000, 6096, 0.9386892318725586], "17": ["vertical_and_slash", 3500, 100, 0.9683235287666321], "18": ["vertical_and_slash", 3500, 100, 0.9513947367668152], "19": ["vertical_and_slash", 3500, 100, 0.9849128723144531], "20": ["vertical_and_slash", 1000, 6096, 0.9730000495910645], "21": ["vertical_and_slash", 1000, 6096, 0.9750357270240784], "22": ["vertical_and_slash", 1000, 6096, 0.9539456367492676], "23": ["vertical_and_slash", 3500, 100, 0.9765530824661255], "24": ["vertical_and_slash", 1000, 6096, 0.9754192233085632], "25": ["vertical_and_slash", 1000, 6096, 0.9772579073905945], "26": ["vertical_and_slash", 1000, 6096, 0.9841098189353943], "27": ["vertical_and_slash", 1000, 6096, 0.9629952907562256], "28": ["vertical_and_slash", 1000, 6096, 0.9697785377502441], "29": ["vertical_and_slash", 1000, 6096, 0.9643436074256897], "30": ["vertical_and_slash", 1000, 6096, 0.953684389591217], "31": ["vertical_and_slash", 3500, 100, 0.9900475740432739]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9736325740814209], "1": ["vertical_and_slash", 3500, 100, 0.9843554496765137], "2": ["vertical_and_slash", 3500, 100, 0.9859849214553833], "3": ["vertical_and_slash", 1000, 6096, 0.9839856624603271], "4": ["vertical_and_slash", 1000, 6096, 0.9794097542762756], "5": ["vertical_and_slash", 1000, 6096, 0.9725930690765381], "6": ["vertical_and_slash", 3500, 100, 0.9811301827430725], "7": ["vertical_and_slash", 3500, 100, 0.9913578629493713], "8": ["vertical_and_slash", 1000, 6096, 0.981084942817688], "9": ["vertical_and_slash", 1000, 6096, 0.9855359196662903], "10": ["vertical_and_slash", 3500, 100, 0.9801225066184998], "11": ["vertical_and_slash", 1000, 6096, 0.9763649702072144], "12": ["vertical_and_slash", 1000, 6096, 0.990553617477417], "13": ["vertical_and_slash", 1000, 6096, 0.9607461094856262], "14": ["vertical_and_slash", 1000, 6096, 0.9469245076179504], "15": ["vertical_and_slash", 1000, 6096, 0.9743368625640869], "16": ["vertical_and_slash", 3500, 100, 0.9114794135093689], "17": ["vertical_and_slash", 500, 700, 0.9141524434089661], "18": ["vertical_and_slash", 1000, 6096, 0.9767755270004272], "19": ["vertical_and_slash", 1000, 6096, 0.9033507108688354], "20": ["vertical_and_slash", 3500, 100, 0.97019362449646], "21": ["vertical_and_slash", 3500, 100, 0.9815121293067932], "22": ["vertical_and_slash", 1000, 6096, 0.9349904656410217], "23": ["vertical_and_slash", 3500, 100, 0.9809385538101196], "24": ["vertical_and_slash", 1000, 6096, 0.9248628616333008], "25": ["vertical_and_slash", 1000, 6096, 0.967199444770813], "26": ["vertical_and_slash", 3500, 100, 0.8729575872421265], "27": ["vertical_and_slash", 1000, 6096, 0.863213062286377], "28": ["vertical_and_slash", 3500, 100, 0.9790768623352051], "29": ["vertical_and_slash", 3500, 100, 0.9910596013069153], "30": ["vertical_and_slash", 1000, 6096, 0.8998411893844604], "31": ["vertical_and_slash", 1000, 6096, 0.9546238780021667]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9143165946006775], "1": ["vertical_and_slash", 1000, 6096, 0.9640366435050964], "2": ["vertical_and_slash", 3500, 100, 0.9640350341796875], "3": ["vertical_and_slash", 3500, 100, 0.9802320599555969], "4": ["vertical_and_slash", 3500, 100, 0.9751210808753967], "5": ["vertical_and_slash", 3500, 100, 0.9597233533859253], "6": ["vertical_and_slash", 1000, 6096, 0.9865791201591492], "7": ["vertical_and_slash", 1000, 6096, 0.859822690486908], "8": ["vertical_and_slash", 3500, 100, 0.9686265587806702], "9": ["vertical_and_slash", 3500, 100, 0.9796083569526672], "10": ["vertical_and_slash", 3500, 100, 0.9844924807548523], "11": ["vertical_and_slash", 500, 700, 0.9741306304931641], "12": ["vertical_and_slash", 3500, 100, 0.8193228840827942], "13": ["vertical_and_slash", 1000, 6096, 0.7864953279495239], "14": ["vertical_and_slash", 1000, 6096, 0.8475791811943054], "15": ["vertical_and_slash", 1000, 6096, 0.9647654891014099], "16": ["vertical_and_slash", 1000, 6096, 0.9607741236686707], "17": ["vertical_and_slash", 3500, 100, 0.964085042476654], "18": ["vertical_and_slash", 3500, 100, 0.9860562682151794], "19": ["vertical_and_slash", 1000, 6096, 0.9428103566169739], "20": ["vertical_and_slash", 3500, 100, 0.9327119588851929], "21": ["vertical_and_slash", 1000, 6096, 0.9789990186691284], "22": ["vertical_and_slash", 3500, 100, 0.9621856212615967], "23": ["vertical_and_slash", 3500, 100, 0.9818771481513977], "24": ["vertical_and_slash", 3500, 100, 0.9849852323532104], "25": ["vertical_and_slash", 1000, 6096, 0.9598783850669861], "26": ["vertical_and_slash", 1000, 6096, 0.9695847630500793], "27": ["vertical_and_slash", 3500, 100, 0.986915111541748], "28": ["vertical_and_slash", 1000, 6096, 0.9638352990150452], "29": ["vertical_and_slash", 1000, 6096, 0.9399517774581909], "30": ["vertical_and_slash", 3500, 100, 0.9664122462272644], "31": ["vertical_and_slash", 3500, 100, 0.9540696144104004]}, {"0": ["vertical_and_slash", 3500, 100, 0.9685828685760498], "1": ["vertical_and_slash", 1000, 6096, 0.8338062167167664], "2": ["vertical_and_slash", 3500, 100, 0.9749028086662292], "3": ["vertical_and_slash", 1000, 6096, 0.9684938192367554], "4": ["vertical_and_slash", 3500, 100, 0.9898025989532471], "5": ["vertical_and_slash", 1000, 6096, 0.9454031586647034], "6": ["vertical_and_slash", 1000, 6096, 0.9983919262886047], "7": ["vertical_and_slash", 3500, 100, 0.9774957895278931], "8": ["vertical_and_slash", 1000, 6096, 0.9124627113342285], "9": ["vertical_and_slash", 3500, 100, 0.9761123657226562], "10": ["vertical_and_slash", 1000, 6096, 0.9672303199768066], "11": ["vertical_and_slash", 3500, 100, 0.8949474692344666], "12": ["vertical_and_slash", 1000, 6096, 0.976123034954071], "13": ["vertical_and_slash", 3500, 100, 0.992988109588623], "14": ["vertical_and_slash", 1000, 6096, 0.9778774976730347], "15": ["vertical_and_slash", 1000, 6096, 0.9676492810249329], "16": ["vertical_and_slash", 3500, 100, 0.944362461566925], "17": ["vertical_and_slash", 3500, 100, 0.9966608285903931], "18": ["vertical_and_slash", 3500, 100, 0.9861279726028442], "19": ["vertical_and_slash", 3500, 100, 0.9936995506286621], "20": ["vertical_and_slash", 1000, 6096, 0.9839529991149902], "21": ["vertical_and_slash", 1000, 6096, 0.8699288368225098], "22": ["vertical_and_slash", 3500, 100, 0.9550421237945557], "23": ["vertical_and_slash", 3500, 100, 0.9807092547416687], "24": ["vertical_and_slash", 3500, 100, 0.9943180084228516], "25": ["vertical_and_slash", 3500, 100, 0.97030109167099], "26": ["vertical_and_slash", 1000, 6096, 0.7757391333580017], "27": ["vertical_and_slash", 3500, 100, 0.8722144365310669], "28": ["vertical_and_slash", 3500, 100, 0.9817062020301819], "29": ["vertical_and_slash", 3500, 100, 0.7830486297607422], "30": ["vertical_and_slash", 3500, 100, 0.9059069156646729], "31": ["vertical_and_slash", 1000, 6096, 0.9004206657409668]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9989221692085266], "1": ["vertical_and_slash", 1000, 6096, 0.9751622080802917], "2": ["vertical_and_slash", 500, 700, 0.9843096733093262], "3": ["vertical_and_slash", 1000, 6096, 0.9970217943191528], "4": ["vertical_and_slash", 1000, 6096, 0.982936441898346], "5": ["vertical_and_slash", 1000, 6096, 0.9922932386398315], "6": ["vertical_and_slash", 1000, 6096, 0.9900820255279541], "7": ["vertical_and_slash", 3500, 100, 0.904089093208313], "8": ["vertical_and_slash", 1000, 6096, 0.8594567179679871], "9": ["vertical_and_slash", 1000, 6096, 0.9729855060577393], "10": ["vertical_and_slash", 500, 700, 0.9566927552223206], "11": ["vertical_and_slash", 500, 700, 0.9787583947181702], "12": ["vertical_and_slash", 1000, 6096, 0.9179439544677734], "13": ["vertical_and_slash", 500, 700, 0.9690548181533813], "14": ["vertical_and_slash", 1000, 6096, 0.9650132060050964], "15": ["vertical_and_slash", 1000, 6096, 0.9982704520225525], "16": ["vertical_and_slash", 3500, 100, 0.5764418840408325], "17": ["vertical_and_slash", 3500, 100, 0.8460132479667664], "18": ["vertical_and_slash", 3500, 100, 0.9885469079017639], "19": ["vertical_and_slash", 500, 700, 0.8908513784408569], "20": ["vertical_and_slash", 3500, 100, 0.7877959609031677], "21": ["vertical_and_slash", 1000, 6096, 0.9026593565940857], "22": ["vertical_and_slash", 1000, 6096, 0.8227401375770569], "23": ["vertical_and_slash", 3500, 100, 0.9510420560836792], "24": ["vertical_and_slash", 1000, 6096, 0.7077246904373169], "25": ["vertical_and_slash", 1000, 6096, 0.6111702919006348], "26": ["vertical_and_slash", 3500, 100, 0.7379475831985474], "27": ["vertical_and_slash", 500, 700, 0.9393547773361206], "28": ["vertical_and_slash", 1000, 6096, 0.9732987284660339], "29": ["vertical_and_slash", 1000, 6096, 0.8916338086128235], "30": ["vertical_and_slash", 1000, 6096, 0.5094305276870728], "31": ["vertical_and_slash", 3500, 100, 0.8615384697914124]}, {"0": ["vertical_and_slash", 500, 700, 0.8384267687797546], "1": ["vertical_and_slash", 30, 800, 0.9832523465156555], "2": ["vertical_and_slash", 1000, 6096, 0.9556738138198853], "3": ["vertical_and_slash", 30, 800, 0.9658747315406799], "4": ["vertical_and_slash", 1000, 6096, 0.7001036405563354], "5": ["vertical_and_slash", 3500, 100, 0.9094016551971436], "6": ["vertical_and_slash", 1000, 6096, 0.8296511769294739], "7": ["vertical_and_slash", 30, 800, 0.9839702844619751], "8": ["vertical_and_slash", 1000, 6096, 0.9338380694389343], "9": ["vertical_and_slash", 30, 800, 0.8721705079078674], "10": ["vertical_and_slash", 1000, 6096, 0.8274728059768677], "11": ["vertical_and_slash", 30, 800, 0.906589686870575], "12": ["vertical_and_slash", 1000, 6096, 0.8754740357398987], "13": ["vertical_and_slash", 30, 800, 0.8910427689552307], "14": ["vertical_and_slash", 3500, 100, 0.9788433313369751], "15": ["vertical_and_slash", 1000, 6096, 0.6505519151687622], "16": ["vertical_and_slash", 1000, 6096, 0.9525647163391113], "17": ["vertical_and_slash", 1000, 6096, 0.9517712593078613], "18": ["vertical_and_slash", 3500, 100, 0.9884803295135498], "19": ["vertical_and_slash", 3500, 100, 0.8653204441070557], "20": ["vertical_and_slash", 1000, 6096, 0.6870253086090088], "21": ["vertical_and_slash", 1000, 6096, 0.7905831336975098], "22": ["vertical_and_slash", 3500, 100, 0.978307843208313], "23": ["vertical_and_slash", 1000, 6096, 0.8475686311721802], "24": ["vertical_and_slash", 1000, 6096, 0.732090175151825], "25": ["vertical_and_slash", 1000, 6096, 0.9593052864074707], "26": ["vertical_and_slash", 3500, 100, 0.7858700156211853], "27": ["vertical_and_slash", 3500, 100, 0.9247616529464722], "28": ["vertical_and_slash", 1000, 6096, 0.9564287662506104], "29": ["vertical_and_slash", 1000, 6096, 0.9688812494277954], "30": ["vertical_and_slash", 1000, 6096, 0.9487019181251526], "31": ["vertical_and_slash", 1000, 6096, 0.8594679236412048]}, {"0": ["vertical_and_slash", 3500, 100, 0.7102671265602112], "1": ["vertical_and_slash", 3500, 100, 0.687782347202301], "2": ["vertical_and_slash", 1000, 6096, 0.6231171488761902], "3": ["vertical_and_slash", 1000, 6096, 0.6519328951835632], "4": ["vertical_and_slash", 1000, 6096, 0.6371148228645325], "5": ["vertical_and_slash", 1000, 6096, 0.5512841939926147], "6": ["vertical_and_slash", 1000, 6096, 0.6453664302825928], "7": ["vertical_and_slash", 1000, 6096, 0.5656585097312927], "8": ["vertical_and_slash", 1000, 6096, 0.5154845118522644], "9": ["vertical_and_slash", 1000, 6096, 0.8060692548751831], "10": ["vertical_and_slash", 1000, 6096, 0.5447877049446106], "11": ["vertical_and_slash", 3500, 100, 0.7371558547019958], "12": ["vertical_and_slash", 3500, 100, 0.8459636569023132], "13": ["vertical_and_slash", 1000, 6096, 0.6367743611335754], "14": ["vertical_and_slash", 1000, 6096, 0.6031586527824402], "15": ["vertical_and_slash", 3500, 100, 0.8160911798477173], "16": ["vertical_and_slash", 1000, 6096, 0.8445301055908203], "17": ["vertical_and_slash", 1000, 6096, 0.9076457023620605], "18": ["vertical_and_slash", 1000, 6096, 0.8370077610015869], "19": ["vertical_and_slash", 1000, 6096, 0.9505546689033508], "20": ["vertical_and_slash", 1000, 6096, 0.9496653079986572], "21": ["vertical_and_slash", 1000, 6096, 0.9966088533401489], "22": ["vertical_and_slash", 1000, 6096, 0.7111172080039978], "23": ["vertical_and_slash", 1000, 6096, 0.825422465801239], "24": ["vertical_and_slash", 1000, 6096, 0.6696130633354187], "25": ["vertical_and_slash", 1000, 6096, 0.7732230424880981], "26": ["vertical_and_slash", 1000, 6096, 0.9513412117958069], "27": ["vertical_and_slash", 1000, 6096, 0.8669811487197876], "28": ["vertical_and_slash", 1000, 6096, 0.839459240436554], "29": ["vertical_and_slash", 1000, 6096, 0.8636441230773926], "30": ["vertical_and_slash", 1000, 6096, 0.9775622487068176], "31": ["vertical_and_slash", 1000, 6096, 0.8842886090278625]}]
diff --git a/minference/configs/Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json b/minference/configs/Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json
new file mode 100644
index 0000000..ce60f95
--- /dev/null
+++ b/minference/configs/Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json
@@ -0,0 +1 @@
+[{"0": ["vertical_and_slash", 1000, 6096, 336], "1": ["vertical_and_slash", 1000, 6096, 26473], "2": ["vertical_and_slash", 1000, 6096, 0], "3": ["vertical_and_slash", 1000, 6096, 26958], "4": ["vertical_and_slash", 1000, 6096, 18905], "5": ["vertical_and_slash", 1000, 6096, 27990], "6": ["vertical_and_slash", 1000, 6096, 15162], "7": ["vertical_and_slash", 1000, 6096, 10529], "8": ["vertical_and_slash", 1000, 6096, 2], "9": ["vertical_and_slash", 1000, 6096, 828], "10": ["vertical_and_slash", 1000, 6096, 11405], "11": ["vertical_and_slash", 1000, 6096, 0], "12": ["vertical_and_slash", 1000, 6096, 55], "13": ["vertical_and_slash", 1000, 6096, 1], "14": ["vertical_and_slash", 1000, 6096, 0], "15": ["vertical_and_slash", 1000, 6096, 7021], "16": ["vertical_and_slash", 30, 800, 185169], "17": ["vertical_and_slash", 30, 800, 72929], "18": ["vertical_and_slash", 30, 800, 460008], "19": ["vertical_and_slash", 1000, 6096, 0], "20": ["vertical_and_slash", 1000, 6096, 71729], "21": ["vertical_and_slash", 1000, 6096, 52], "22": ["vertical_and_slash", 1000, 6096, 636], "23": ["vertical_and_slash", 1000, 6096, 75020], "24": ["vertical_and_slash", 1000, 6096, 23545], "25": ["vertical_and_slash", 1000, 6096, 90256], "26": ["vertical_and_slash", 1000, 6096, 45294], "27": ["vertical_and_slash", 1000, 6096, 32617], "28": ["vertical_and_slash", 3500, 100, 4777248], "29": ["vertical_and_slash", 3500, 100, 3996], "30": ["vertical_and_slash", 3500, 100, 590252], "31": ["vertical_and_slash", 3500, 100, 0]}, {"0": ["vertical_and_slash", 30, 800, 11048], "1": ["vertical_and_slash", 30, 800, 99768], "2": ["vertical_and_slash", 1000, 6096, 1393328], "3": ["vertical_and_slash", 30, 800, 97570], "4": ["vertical_and_slash", 30, 800, 9], "5": ["vertical_and_slash", 30, 800, 18], "6": ["vertical_and_slash", 30, 800, 216277], "7": ["vertical_and_slash", 30, 800, 148491], "8": ["vertical_and_slash", 100, 800, 543785], "9": ["vertical_and_slash", 1000, 6096, 2343829], "10": ["vertical_and_slash", 100, 800, 251542], "11": ["vertical_and_slash", 30, 800, 1064367], "12": ["vertical_and_slash", 1000, 6096, 6092], "13": ["vertical_and_slash", 30, 800, 12654], "14": ["vertical_and_slash", 1000, 6096, 0], "15": ["vertical_and_slash", 1000, 6096, 101], "16": ["vertical_and_slash", 30, 800, 21873], "17": ["vertical_and_slash", 30, 800, 107039], "18": ["vertical_and_slash", 30, 800, 9011], "19": ["vertical_and_slash", 30, 800, 445736], "20": ["vertical_and_slash", 30, 800, 1906], "21": ["vertical_and_slash", 30, 800, 3058], "22": ["vertical_and_slash", 1000, 6096, 430742], "23": ["vertical_and_slash", 1000, 6096, 181839], "24": ["vertical_and_slash", 30, 800, 125666], "25": ["vertical_and_slash", 30, 800, 704271], "26": ["vertical_and_slash", 30, 800, 14405], "27": ["vertical_and_slash", 30, 800, 70563], "28": ["vertical_and_slash", 1000, 6096, 38630], "29": ["vertical_and_slash", 1000, 6096, 68041], "30": ["vertical_and_slash", 30, 800, 6942], "31": ["vertical_and_slash", 1000, 6096, 35430]}, {"0": ["vertical_and_slash", 30, 800, 2720], "1": ["vertical_and_slash", 1000, 6096, 3045], "2": ["vertical_and_slash", 30, 800, 785], "3": ["vertical_and_slash", 1000, 6096, 14146], "4": ["vertical_and_slash", 100, 800, 315229], "5": ["vertical_and_slash", 1000, 6096, 195280], "6": ["vertical_and_slash", 1000, 6096, 1640055], "7": ["vertical_and_slash", 30, 800, 21026], "8": ["vertical_and_slash", 30, 800, 1082], "9": ["vertical_and_slash", 30, 800, 1851], "10": ["vertical_and_slash", 100, 800, 97766], "11": ["vertical_and_slash", 30, 800, 14401], "12": ["vertical_and_slash", 100, 800, 55741], "13": ["vertical_and_slash", 30, 800, 100674], "14": ["vertical_and_slash", 100, 800, 5597503], "15": ["vertical_and_slash", 1000, 6096, 437796], "16": ["vertical_and_slash", 30, 800, 9647], "17": ["vertical_and_slash", 30, 800, 4590], "18": ["vertical_and_slash", 30, 800, 73], "19": ["vertical_and_slash", 1000, 6096, 823400], "20": ["vertical_and_slash", 1000, 6096, 464893], "21": ["vertical_and_slash", 1000, 6096, 406520], "22": ["vertical_and_slash", 1000, 6096, 49477], "23": ["vertical_and_slash", 30, 800, 25445], "24": ["vertical_and_slash", 30, 800, 172935], "25": ["vertical_and_slash", 30, 800, 125813], "26": ["vertical_and_slash", 30, 800, 35964], "27": ["vertical_and_slash", 30, 800, 64113], "28": ["vertical_and_slash", 30, 800, 8780], "29": ["vertical_and_slash", 30, 800, 7883], "30": ["vertical_and_slash", 30, 800, 3944], "31": ["vertical_and_slash", 30, 800, 1049]}, {"0": ["vertical_and_slash", 1000, 6096, 119045], "1": ["vertical_and_slash", 1000, 6096, 21633], "2": ["vertical_and_slash", 1000, 6096, 54], "3": ["vertical_and_slash", 1000, 6096, 756], "4": ["vertical_and_slash", 30, 800, 1524], "5": ["vertical_and_slash", 30, 800, 7576], "6": ["vertical_and_slash", 30, 800, 212024], "7": ["vertical_and_slash", 30, 800, 106253], "8": ["vertical_and_slash", 30, 800, 4801], "9": ["vertical_and_slash", 30, 800, 311445], "10": ["vertical_and_slash", 30, 800, 31540], "11": ["vertical_and_slash", 30, 800, 7706], "12": ["vertical_and_slash", 1000, 6096, 397], "13": ["vertical_and_slash", 1000, 6096, 40], "14": ["vertical_and_slash", 100, 800, 181], "15": ["vertical_and_slash", 1000, 6096, 15], "16": ["vertical_and_slash", 30, 800, 424080], "17": ["vertical_and_slash", 30, 800, 66114], "18": ["vertical_and_slash", 30, 800, 132526], "19": ["vertical_and_slash", 30, 800, 1478993], "20": ["vertical_and_slash", 1000, 6096, 655153], "21": ["vertical_and_slash", 1000, 6096, 117322], "22": ["vertical_and_slash", 1000, 6096, 572237], "23": ["vertical_and_slash", 1000, 6096, 688623], "24": ["vertical_and_slash", 1000, 6096, 294], "25": ["vertical_and_slash", 1000, 6096, 5035], "26": ["vertical_and_slash", 30, 800, 3874], "27": ["vertical_and_slash", 1000, 6096, 618117], "28": ["vertical_and_slash", 30, 800, 545357], "29": ["vertical_and_slash", 30, 800, 1746675], "30": ["vertical_and_slash", 30, 800, 612225], "31": ["vertical_and_slash", 100, 800, 232415]}, {"0": ["vertical_and_slash", 100, 800, 5379826], "1": ["vertical_and_slash", 100, 800, 4399425], "2": ["vertical_and_slash", 100, 800, 5842], "3": ["vertical_and_slash", 30, 800, 178263], "4": ["vertical_and_slash", 30, 800, 356], "5": ["vertical_and_slash", 30, 800, 2387916], "6": ["vertical_and_slash", 1000, 6096, 216595], "7": ["vertical_and_slash", 30, 800, 466], "8": ["vertical_and_slash", 1000, 6096, 832044], "9": ["vertical_and_slash", 1000, 6096, 59709], "10": ["vertical_and_slash", 1000, 6096, 1194089], "11": ["vertical_and_slash", 1000, 6096, 356408], "12": ["vertical_and_slash", 30, 800, 30528], "13": ["vertical_and_slash", 30, 800, 22217], "14": ["vertical_and_slash", 30, 800, 9162], "15": ["vertical_and_slash", 100, 800, 1641325], "16": ["vertical_and_slash", 1000, 6096, 489936], "17": ["vertical_and_slash", 30, 800, 58107], "18": ["vertical_and_slash", 1000, 6096, 8539], "19": ["vertical_and_slash", 1000, 6096, 508038], "20": ["vertical_and_slash", 100, 800, 2632857], "21": ["vertical_and_slash", 1000, 6096, 79517], "22": ["vertical_and_slash", 30, 800, 330362], "23": ["vertical_and_slash", 1000, 6096, 85961], "24": ["vertical_and_slash", 30, 800, 23942], "25": ["vertical_and_slash", 30, 800, 75337], "26": ["vertical_and_slash", 30, 800, 3544417], "27": ["vertical_and_slash", 30, 800, 146427], "28": ["vertical_and_slash", 1000, 6096, 10561], "29": ["vertical_and_slash", 100, 800, 8759352], "30": ["vertical_and_slash", 100, 800, 8425], "31": ["vertical_and_slash", 30, 800, 22]}, {"0": ["vertical_and_slash", 30, 800, 50473], "1": ["vertical_and_slash", 1000, 6096, 277369], "2": ["vertical_and_slash", 30, 800, 59349], "3": ["vertical_and_slash", 30, 800, 27256], "4": ["vertical_and_slash", 30, 800, 112822], "5": ["vertical_and_slash", 1000, 6096, 346887], "6": ["vertical_and_slash", 1000, 6096, 84774], "7": ["vertical_and_slash", 1000, 6096, 954773], "8": ["vertical_and_slash", 1000, 6096, 1210908], "9": ["vertical_and_slash", 1000, 6096, 1679398], "10": ["vertical_and_slash", 1000, 6096, 2474351], "11": ["vertical_and_slash", 1000, 6096, 80495], "12": ["vertical_and_slash", 30, 800, 56761], "13": ["vertical_and_slash", 30, 800, 27757], "14": ["vertical_and_slash", 30, 800, 8811], "15": ["vertical_and_slash", 30, 800, 31547], "16": ["vertical_and_slash", 100, 800, 93167], "17": ["vertical_and_slash", 1000, 6096, 1464896], "18": ["vertical_and_slash", 1000, 6096, 434459], "19": ["vertical_and_slash", 30, 800, 1654521], "20": ["vertical_and_slash", 1000, 6096, 414], "21": ["vertical_and_slash", 1000, 6096, 76207], "22": ["vertical_and_slash", 1000, 6096, 8583], "23": ["vertical_and_slash", 1000, 6096, 1471], "24": ["vertical_and_slash", 1000, 6096, 231656], "25": ["vertical_and_slash", 500, 700, 95889], "26": ["vertical_and_slash", 30, 800, 62035], "27": ["vertical_and_slash", 1000, 6096, 43859], "28": ["vertical_and_slash", 30, 800, 23458], "29": ["vertical_and_slash", 30, 800, 53092], "30": ["vertical_and_slash", 30, 800, 74240], "31": ["vertical_and_slash", 30, 800, 45214]}, {"0": ["vertical_and_slash", 30, 800, 507], "1": ["vertical_and_slash", 100, 800, 8490], "2": ["vertical_and_slash", 100, 800, 3952118], "3": ["vertical_and_slash", 100, 800, 2475164], "4": ["vertical_and_slash", 100, 800, 8038], "5": ["vertical_and_slash", 30, 800, 2620494], "6": ["vertical_and_slash", 1000, 6096, 57306], "7": ["vertical_and_slash", 30, 800, 18889], "8": ["vertical_and_slash", 30, 800, 14900], "9": ["vertical_and_slash", 30, 800, 310453], "10": ["vertical_and_slash", 30, 800, 5494], "11": ["vertical_and_slash", 30, 800, 16096], "12": ["vertical_and_slash", 30, 800, 45897], "13": ["vertical_and_slash", 30, 800, 120295], "14": ["vertical_and_slash", 30, 800, 1446587], "15": ["vertical_and_slash", 30, 800, 133562], "16": ["vertical_and_slash", 30, 800, 81561], "17": ["vertical_and_slash", 100, 800, 1091558], "18": ["vertical_and_slash", 30, 800, 1104027], "19": ["vertical_and_slash", 30, 800, 95228], "20": ["vertical_and_slash", 1000, 6096, 81766], "21": ["vertical_and_slash", 1000, 6096, 1604474], "22": ["vertical_and_slash", 30, 800, 1720847], "23": ["vertical_and_slash", 30, 800, 254367], "24": ["vertical_and_slash", 1000, 6096, 69837], "25": ["vertical_and_slash", 1000, 6096, 1346498], "26": ["vertical_and_slash", 1000, 6096, 251707], "27": ["vertical_and_slash", 1000, 6096, 21055], "28": ["vertical_and_slash", 100, 800, 1310349], "29": ["vertical_and_slash", 1000, 6096, 523], "30": ["vertical_and_slash", 100, 800, 5], "31": ["vertical_and_slash", 1000, 6096, 4114]}, {"0": ["vertical_and_slash", 30, 800, 2076100], "1": ["vertical_and_slash", 30, 800, 742482], "2": ["vertical_and_slash", 30, 800, 84396], "3": ["vertical_and_slash", 100, 800, 6621015], "4": ["vertical_and_slash", 30, 800, 269671], "5": ["vertical_and_slash", 30, 800, 142041], "6": ["vertical_and_slash", 1000, 6096, 2493869], "7": ["vertical_and_slash", 1000, 6096, 2460341], "8": ["vertical_and_slash", 30, 800, 352690], "9": ["vertical_and_slash", 30, 800, 134441], "10": ["vertical_and_slash", 1000, 6096, 112278], "11": ["vertical_and_slash", 30, 800, 62933], "12": ["vertical_and_slash", 30, 800, 150459], "13": ["vertical_and_slash", 1000, 6096, 120036], "14": ["vertical_and_slash", 100, 800, 433238], "15": ["vertical_and_slash", 100, 800, 2723047], "16": ["vertical_and_slash", 1000, 6096, 112925], "17": ["vertical_and_slash", 1000, 6096, 23380], "18": ["vertical_and_slash", 1000, 6096, 92620], "19": ["vertical_and_slash", 1000, 6096, 37993], "20": ["vertical_and_slash", 100, 800, 74928], "21": ["vertical_and_slash", 3500, 100, 14191655], "22": ["vertical_and_slash", 1000, 6096, 514675], "23": ["vertical_and_slash", 100, 800, 9577073], "24": ["vertical_and_slash", 100, 800, 531136], "25": ["vertical_and_slash", 1000, 6096, 30007], "26": ["vertical_and_slash", 1000, 6096, 170687], "27": ["vertical_and_slash", 30, 800, 540287], "28": ["vertical_and_slash", 30, 800, 1435852], "29": ["vertical_and_slash", 30, 800, 948060], "30": ["vertical_and_slash", 1000, 6096, 37219], "31": ["vertical_and_slash", 1000, 6096, 211641]}, {"0": ["vertical_and_slash", 1000, 6096, 582795], "1": ["vertical_and_slash", 1000, 6096, 6289238], "2": ["vertical_and_slash", 1000, 6096, 570805], "3": ["vertical_and_slash", 1000, 6096, 198493], "4": ["vertical_and_slash", 30, 800, 112215], "5": ["vertical_and_slash", 30, 800, 5387246], "6": ["vertical_and_slash", 30, 800, 754350], "7": ["vertical_and_slash", 1000, 6096, 164737], "8": ["vertical_and_slash", 1000, 6096, 8597099], "9": ["vertical_and_slash", 1000, 6096, 13891466], "10": ["vertical_and_slash", 100, 800, 12184646], "11": ["vertical_and_slash", 1000, 6096, 3397834], "12": ["vertical_and_slash", 1000, 6096, 274297], "13": ["vertical_and_slash", 30, 800, 505818], "14": ["vertical_and_slash", 1000, 6096, 382749], "15": ["vertical_and_slash", 1000, 6096, 53485], "16": ["vertical_and_slash", 1000, 6096, 63748], "17": ["vertical_and_slash", 1000, 6096, 743437], "18": ["vertical_and_slash", 1000, 6096, 884226], "19": ["vertical_and_slash", 1000, 6096, 32754], "20": ["vertical_and_slash", 30, 800, 154807], "21": ["vertical_and_slash", 30, 800, 515833], "22": ["vertical_and_slash", 30, 800, 379827], "23": ["vertical_and_slash", 30, 800, 5140670], "24": ["vertical_and_slash", 1000, 6096, 8857], "25": ["vertical_and_slash", 1000, 6096, 9739], "26": ["vertical_and_slash", 1000, 6096, 3362559], "27": ["vertical_and_slash", 1000, 6096, 3602170], "28": ["vertical_and_slash", 1000, 6096, 286758], "29": ["vertical_and_slash", 1000, 6096, 1091568], "30": ["vertical_and_slash", 1000, 6096, 464410], "31": ["vertical_and_slash", 1000, 6096, 9113238]}, {"0": ["vertical_and_slash", 1000, 6096, 4112309], "1": ["vertical_and_slash", 1000, 6096, 6237157], "2": ["vertical_and_slash", 1000, 6096, 12411496], "3": ["vertical_and_slash", 1000, 6096, 3333545], "4": ["vertical_and_slash", 1000, 6096, 1082199], "5": ["vertical_and_slash", 1000, 6096, 3624535], "6": ["vertical_and_slash", 1000, 6096, 85587], "7": ["vertical_and_slash", 1000, 6096, 5060732], "8": ["vertical_and_slash", 30, 800, 981020], "9": ["vertical_and_slash", 30, 800, 647089], "10": ["vertical_and_slash", 30, 800, 1168497], "11": ["vertical_and_slash", 30, 800, 241811], "12": ["vertical_and_slash", 1000, 6096, 14258787], "13": ["vertical_and_slash", 1000, 6096, 13881708], "14": ["vertical_and_slash", 100, 800, 9807781], "15": ["vertical_and_slash", 1000, 6096, 11824390], "16": ["vertical_and_slash", 1000, 6096, 382173], "17": ["vertical_and_slash", 1000, 6096, 682553], "18": ["vertical_and_slash", 1000, 6096, 228115], "19": ["vertical_and_slash", 1000, 6096, 730935], "20": ["vertical_and_slash", 1000, 6096, 10237660], "21": ["vertical_and_slash", 1000, 6096, 210229], "22": ["vertical_and_slash", 1000, 6096, 4883397], "23": ["vertical_and_slash", 1000, 6096, 569329], "24": ["vertical_and_slash", 100, 800, 4152], "25": ["vertical_and_slash", 1000, 6096, 235235], "26": ["vertical_and_slash", 100, 800, 22473], "27": ["vertical_and_slash", 3500, 100, 14276508], "28": ["vertical_and_slash", 1000, 6096, 2277550], "29": ["vertical_and_slash", 1000, 6096, 1821096], "30": ["vertical_and_slash", 30, 800, 1212061], "31": ["vertical_and_slash", 1000, 6096, 13192107]}, {"0": ["vertical_and_slash", 1000, 6096, 812453], "1": ["vertical_and_slash", 1000, 6096, 6634405], "2": ["vertical_and_slash", 1000, 6096, 6896128], "3": ["vertical_and_slash", 1000, 6096, 12539813], "4": ["vertical_and_slash", 1000, 6096, 90867], "5": ["vertical_and_slash", 1000, 6096, 592412], "6": ["vertical_and_slash", 1000, 6096, 1863965], "7": ["vertical_and_slash", 1000, 6096, 1412714], "8": ["vertical_and_slash", 100, 800, 4723238], "9": ["vertical_and_slash", 30, 800, 73268], "10": ["vertical_and_slash", 1000, 6096, 522198], "11": ["vertical_and_slash", 30, 800, 144456], "12": ["vertical_and_slash", 1000, 6096, 218571], "13": ["vertical_and_slash", 1000, 6096, 4766244], "14": ["vertical_and_slash", 1000, 6096, 519409], "15": ["vertical_and_slash", 100, 800, 257427], "16": ["vertical_and_slash", 30, 800, 913307], "17": ["vertical_and_slash", 1000, 6096, 272105], "18": ["vertical_and_slash", 1000, 6096, 10253560], "19": ["vertical_and_slash", 1000, 6096, 103219], "20": ["vertical_and_slash", 1000, 6096, 825917], "21": ["vertical_and_slash", 1000, 6096, 1573906], "22": ["vertical_and_slash", 1000, 6096, 1401963], "23": ["vertical_and_slash", 1000, 6096, 903562], "24": ["vertical_and_slash", 1000, 6096, 116448], "25": ["vertical_and_slash", 500, 700, 10497021], "26": ["vertical_and_slash", 1000, 6096, 1451038], "27": ["vertical_and_slash", 100, 800, 9129837], "28": ["vertical_and_slash", 1000, 6096, 6069558], "29": ["vertical_and_slash", 100, 800, 4906900], "30": ["vertical_and_slash", 100, 800, 1935350], "31": ["vertical_and_slash", 1000, 6096, 13438131]}, {"0": ["vertical_and_slash", 1000, 6096, 200475], "1": ["vertical_and_slash", 1000, 6096, 2525357], "2": ["vertical_and_slash", 1000, 6096, 1581552], "3": ["vertical_and_slash", 1000, 6096, 1585962], "4": ["vertical_and_slash", 100, 800, 2468769], "5": ["vertical_and_slash", 1000, 6096, 2284149], "6": ["vertical_and_slash", 1000, 6096, 3954975], "7": ["vertical_and_slash", 1000, 6096, 12242517], "8": ["vertical_and_slash", 1000, 6096, 407981], "9": ["vertical_and_slash", 1000, 6096, 387918], "10": ["vertical_and_slash", 30, 800, 494970], "11": ["vertical_and_slash", 1000, 6096, 237593], "12": ["vertical_and_slash", 1000, 6096, 13227100], "13": ["vertical_and_slash", 1000, 6096, 7150283], "14": ["vertical_and_slash", 1000, 6096, 1460829], "15": ["vertical_and_slash", 1000, 6096, 5830515], "16": ["vertical_and_slash", 30, 800, 321990], "17": ["vertical_and_slash", 500, 700, 412885], "18": ["vertical_and_slash", 30, 800, 7754087], "19": ["vertical_and_slash", 30, 800, 593222], "20": ["vertical_and_slash", 1000, 6096, 9430066], "21": ["vertical_and_slash", 1000, 6096, 11445545], "22": ["vertical_and_slash", 1000, 6096, 10096832], "23": ["vertical_and_slash", 1000, 6096, 11108827], "24": ["vertical_and_slash", 1000, 6096, 2040566], "25": ["vertical_and_slash", 1000, 6096, 1293645], "26": ["vertical_and_slash", 1000, 6096, 1681146], "27": ["vertical_and_slash", 1000, 6096, 1621078], "28": ["vertical_and_slash", 3500, 100, 14482863], "29": ["vertical_and_slash", 3500, 100, 14306340], "30": ["vertical_and_slash", 3500, 100, 14736032], "31": ["vertical_and_slash", 30, 800, 59474]}, {"0": ["vertical_and_slash", 30, 800, 2015977], "1": ["vertical_and_slash", 1000, 6096, 1851908], "2": ["vertical_and_slash", 500, 700, 3019045], "3": ["vertical_and_slash", 30, 800, 2275137], "4": ["vertical_and_slash", 1000, 6096, 111007], "5": ["vertical_and_slash", 1000, 6096, 74876], "6": ["vertical_and_slash", 1000, 6096, 291657], "7": ["vertical_and_slash", 1000, 6096, 72059], "8": ["vertical_and_slash", 100, 800, 4966732], "9": ["vertical_and_slash", 30, 800, 1227926], "10": ["vertical_and_slash", 1000, 6096, 817635], "11": ["vertical_and_slash", 100, 800, 1996081], "12": ["vertical_and_slash", 30, 800, 320794], "13": ["vertical_and_slash", 30, 800, 641018], "14": ["vertical_and_slash", 1000, 6096, 784584], "15": ["vertical_and_slash", 500, 700, 615730], "16": ["vertical_and_slash", 30, 800, 130637], "17": ["vertical_and_slash", 500, 700, 237719], "18": ["vertical_and_slash", 30, 800, 484009], "19": ["vertical_and_slash", 30, 800, 71667], "20": ["vertical_and_slash", 30, 800, 6034932], "21": ["vertical_and_slash", 30, 800, 279606], "22": ["vertical_and_slash", 30, 800, 273046], "23": ["vertical_and_slash", 500, 700, 5343396], "24": ["vertical_and_slash", 30, 800, 424419], "25": ["vertical_and_slash", 30, 800, 268585], "26": ["vertical_and_slash", 500, 700, 469509], "27": ["vertical_and_slash", 30, 800, 1150183], "28": ["vertical_and_slash", 30, 800, 567665], "29": ["vertical_and_slash", 30, 800, 689969], "30": ["vertical_and_slash", 30, 800, 3124447], "31": ["vertical_and_slash", 500, 700, 1311816]}, {"0": ["vertical_and_slash", 1000, 6096, 13054849], "1": ["vertical_and_slash", 1000, 6096, 11676492], "2": ["vertical_and_slash", 1000, 6096, 13662962], "3": ["vertical_and_slash", 1000, 6096, 13009510], "4": ["vertical_and_slash", 1000, 6096, 13228770], "5": ["vertical_and_slash", 1000, 6096, 13738897], "6": ["vertical_and_slash", 1000, 6096, 4327684], "7": ["vertical_and_slash", 100, 800, 1780647], "8": ["vertical_and_slash", 1000, 6096, 12984525], "9": ["vertical_and_slash", 1000, 6096, 10106452], "10": ["vertical_and_slash", 1000, 6096, 13121645], "11": ["vertical_and_slash", 1000, 6096, 7143877], "12": ["vertical_and_slash", 1000, 6096, 1302273], "13": ["vertical_and_slash", 1000, 6096, 12189960], "14": ["vertical_and_slash", 1000, 6096, 10369892], "15": ["vertical_and_slash", 1000, 6096, 6251432], "16": ["vertical_and_slash", 1000, 6096, 13767358], "17": ["vertical_and_slash", 1000, 6096, 14264179], "18": ["vertical_and_slash", 1000, 6096, 14027354], "19": ["vertical_and_slash", 1000, 6096, 12810299], "20": ["vertical_and_slash", 1000, 6096, 11500719], "21": ["vertical_and_slash", 1000, 6096, 8729013], "22": ["vertical_and_slash", 100, 800, 1386474], "23": ["vertical_and_slash", 1000, 6096, 8809015], "24": ["vertical_and_slash", 30, 800, 1192385], "25": ["vertical_and_slash", 100, 800, 6597145], "26": ["vertical_and_slash", 100, 800, 11801029], "27": ["vertical_and_slash", 1000, 6096, 981847], "28": ["vertical_and_slash", 1000, 6096, 3790181], "29": ["vertical_and_slash", 30, 800, 1641474], "30": ["vertical_and_slash", 1000, 6096, 4214917], "31": ["vertical_and_slash", 1000, 6096, 3423871]}, {"0": ["vertical_and_slash", 1000, 6096, 7281028], "1": ["vertical_and_slash", 1000, 6096, 6327889], "2": ["vertical_and_slash", 1000, 6096, 5161807], "3": ["vertical_and_slash", 1000, 6096, 6944365], "4": ["vertical_and_slash", 1000, 6096, 10798408], "5": ["vertical_and_slash", 1000, 6096, 11848526], "6": ["vertical_and_slash", 1000, 6096, 5023703], "7": ["vertical_and_slash", 1000, 6096, 6869756], "8": ["vertical_and_slash", 30, 800, 2070673], "9": ["vertical_and_slash", 30, 800, 2108039], "10": ["vertical_and_slash", 30, 800, 2478923], "11": ["vertical_and_slash", 30, 800, 1062019], "12": ["vertical_and_slash", 1000, 6096, 10483422], "13": ["vertical_and_slash", 1000, 6096, 13220734], "14": ["vertical_and_slash", 1000, 6096, 10864461], "15": ["vertical_and_slash", 1000, 6096, 10380263], "16": ["vertical_and_slash", 1000, 6096, 12606664], "17": ["vertical_and_slash", 1000, 6096, 12755695], "18": ["vertical_and_slash", 1000, 6096, 14481440], "19": ["vertical_and_slash", 1000, 6096, 12125755], "20": ["vertical_and_slash", 1000, 6096, 13727938], "21": ["vertical_and_slash", 100, 800, 9986525], "22": ["vertical_and_slash", 1000, 6096, 13802294], "23": ["vertical_and_slash", 1000, 6096, 8589854], "24": ["vertical_and_slash", 1000, 6096, 8696624], "25": ["vertical_and_slash", 1000, 6096, 6711141], "26": ["vertical_and_slash", 30, 800, 11407], "27": ["vertical_and_slash", 1000, 6096, 10286733], "28": ["vertical_and_slash", 100, 800, 14346519], "29": ["vertical_and_slash", 3500, 100, 14822370], "30": ["vertical_and_slash", 1000, 6096, 13996996], "31": ["vertical_and_slash", 3500, 100, 13837843]}, {"0": ["vertical_and_slash", 30, 800, 187826], "1": ["vertical_and_slash", 1000, 6096, 319682], "2": ["vertical_and_slash", 1000, 6096, 717971], "3": ["vertical_and_slash", 1000, 6096, 12248225], "4": ["vertical_and_slash", 30, 800, 2311494], "5": ["vertical_and_slash", 1000, 6096, 354949], "6": ["vertical_and_slash", 30, 800, 2723442], "7": ["vertical_and_slash", 30, 800, 217627], "8": ["vertical_and_slash", 500, 700, 1800505], "9": ["vertical_and_slash", 30, 800, 5395314], "10": ["vertical_and_slash", 30, 800, 10715415], "11": ["vertical_and_slash", 100, 800, 13267898], "12": ["vertical_and_slash", 30, 800, 282819], "13": ["vertical_and_slash", 1000, 6096, 8417130], "14": ["vertical_and_slash", 1000, 6096, 5380564], "15": ["vertical_and_slash", 1000, 6096, 9802765], "16": ["vertical_and_slash", 1000, 6096, 385044], "17": ["vertical_and_slash", 1000, 6096, 2048601], "18": ["vertical_and_slash", 1000, 6096, 2798283], "19": ["vertical_and_slash", 100, 800, 11985153], "20": ["vertical_and_slash", 1000, 6096, 9560488], "21": ["vertical_and_slash", 1000, 6096, 8719957], "22": ["vertical_and_slash", 1000, 6096, 10883722], "23": ["vertical_and_slash", 1000, 6096, 11184293], "24": ["vertical_and_slash", 1000, 6096, 5049287], "25": ["vertical_and_slash", 1000, 6096, 6119952], "26": ["vertical_and_slash", 1000, 6096, 11948638], "27": ["vertical_and_slash", 1000, 6096, 4654529], "28": ["vertical_and_slash", 1000, 6096, 269543], "29": ["vertical_and_slash", 1000, 6096, 1183543], "30": ["vertical_and_slash", 1000, 6096, 4018748], "31": ["vertical_and_slash", 30, 800, 208750]}, {"0": ["vertical_and_slash", 3500, 100, 14712977], "1": ["vertical_and_slash", 1000, 6096, 7977346], "2": ["vertical_and_slash", 100, 800, 12022826], "3": ["vertical_and_slash", 100, 800, 7525648], "4": ["vertical_and_slash", 500, 700, 627445], "5": ["vertical_and_slash", 1000, 6096, 1067661], "6": ["vertical_and_slash", 500, 700, 199111], "7": ["vertical_and_slash", 100, 800, 1462908], "8": ["vertical_and_slash", 1000, 6096, 12608289], "9": ["vertical_and_slash", 1000, 6096, 3815760], "10": ["vertical_and_slash", 100, 800, 5050623], "11": ["vertical_and_slash", 3500, 100, 6790875], "12": ["vertical_and_slash", 30, 800, 284918], "13": ["vertical_and_slash", 500, 700, 277887], "14": ["vertical_and_slash", 500, 700, 236664], "15": ["vertical_and_slash", 30, 800, 3582148], "16": ["vertical_and_slash", 100, 800, 13373963], "17": ["vertical_and_slash", 100, 800, 682950], "18": ["vertical_and_slash", 1000, 6096, 7136486], "19": ["vertical_and_slash", 1000, 6096, 13769505], "20": ["vertical_and_slash", 1000, 6096, 9883913], "21": ["vertical_and_slash", 1000, 6096, 10833503], "22": ["vertical_and_slash", 30, 800, 62940], "23": ["vertical_and_slash", 1000, 6096, 4652762], "24": ["vertical_and_slash", 1000, 6096, 5480379], "25": ["vertical_and_slash", 3500, 100, 14131887], "26": ["vertical_and_slash", 100, 800, 9221283], "27": ["vertical_and_slash", 1000, 6096, 4197162], "28": ["vertical_and_slash", 30, 800, 4438611], "29": ["vertical_and_slash", 30, 800, 354648], "30": ["vertical_and_slash", 30, 800, 7285775], "31": ["vertical_and_slash", 30, 800, 4392079]}, {"0": ["vertical_and_slash", 1000, 6096, 2131686], "1": ["vertical_and_slash", 1000, 6096, 3609919], "2": ["vertical_and_slash", 1000, 6096, 899481], "3": ["vertical_and_slash", 100, 800, 3219776], "4": ["vertical_and_slash", 3500, 100, 11460535], "5": ["vertical_and_slash", 1000, 6096, 154336], "6": ["vertical_and_slash", 3500, 100, 14438950], "7": ["vertical_and_slash", 100, 800, 6652113], "8": ["vertical_and_slash", 100, 800, 9133667], "9": ["vertical_and_slash", 100, 800, 8048731], "10": ["vertical_and_slash", 1000, 6096, 528931], "11": ["vertical_and_slash", 30, 800, 2635938], "12": ["vertical_and_slash", 30, 800, 8546455], "13": ["vertical_and_slash", 500, 700, 7229697], "14": ["vertical_and_slash", 1000, 6096, 32195], "15": ["vertical_and_slash", 1000, 6096, 230534], "16": ["vertical_and_slash", 100, 800, 2475909], "17": ["vertical_and_slash", 30, 800, 2484470], "18": ["vertical_and_slash", 100, 800, 8168145], "19": ["vertical_and_slash", 3500, 100, 6348588], "20": ["vertical_and_slash", 500, 700, 290337], "21": ["vertical_and_slash", 3500, 100, 12830116], "22": ["vertical_and_slash", 100, 800, 11406972], "23": ["vertical_and_slash", 1000, 6096, 9663426], "24": ["vertical_and_slash", 3500, 100, 14333500], "25": ["vertical_and_slash", 3500, 100, 14787732], "26": ["vertical_and_slash", 1000, 6096, 13209856], "27": ["vertical_and_slash", 100, 800, 14623240], "28": ["vertical_and_slash", 1000, 6096, 6321698], "29": ["vertical_and_slash", 1000, 6096, 10324255], "30": ["vertical_and_slash", 100, 800, 1338], "31": ["vertical_and_slash", 1000, 6096, 5182275]}, {"0": ["vertical_and_slash", 100, 800, 2653574], "1": ["vertical_and_slash", 1000, 6096, 156404], "2": ["vertical_and_slash", 1000, 6096, 3288754], "3": ["vertical_and_slash", 1000, 6096, 597358], "4": ["vertical_and_slash", 1000, 6096, 13162000], "5": ["vertical_and_slash", 100, 800, 3304599], "6": ["vertical_and_slash", 100, 800, 2334228], "7": ["vertical_and_slash", 30, 800, 151547], "8": ["vertical_and_slash", 1000, 6096, 8084555], "9": ["vertical_and_slash", 1000, 6096, 6986695], "10": ["vertical_and_slash", 30, 800, 1349542], "11": ["vertical_and_slash", 1000, 6096, 62139], "12": ["vertical_and_slash", 500, 700, 586215], "13": ["vertical_and_slash", 30, 800, 3339401], "14": ["vertical_and_slash", 500, 700, 9080591], "15": ["vertical_and_slash", 100, 800, 1860621], "16": ["vertical_and_slash", 1000, 6096, 11577402], "17": ["vertical_and_slash", 1000, 6096, 6483036], "18": ["vertical_and_slash", 1000, 6096, 10223119], "19": ["vertical_and_slash", 1000, 6096, 2516899], "20": ["vertical_and_slash", 100, 800, 14689692], "21": ["vertical_and_slash", 1000, 6096, 9574317], "22": ["vertical_and_slash", 1000, 6096, 14315469], "23": ["vertical_and_slash", 1000, 6096, 11084722], "24": ["vertical_and_slash", 30, 800, 5714332], "25": ["vertical_and_slash", 30, 800, 440501], "26": ["vertical_and_slash", 30, 800, 135011], "27": ["vertical_and_slash", 100, 800, 1143711], "28": ["vertical_and_slash", 1000, 6096, 10833817], "29": ["vertical_and_slash", 100, 800, 9389405], "30": ["vertical_and_slash", 1000, 6096, 7182171], "31": ["vertical_and_slash", 1000, 6096, 3116752]}, {"0": ["vertical_and_slash", 1000, 6096, 2272762], "1": ["vertical_and_slash", 100, 800, 9251901], "2": ["vertical_and_slash", 1000, 6096, 3172792], "3": ["vertical_and_slash", 1000, 6096, 11166637], "4": ["vertical_and_slash", 1000, 6096, 267179], "5": ["vertical_and_slash", 100, 800, 1956945], "6": ["vertical_and_slash", 1000, 6096, 431457], "7": ["vertical_and_slash", 100, 800, 215074], "8": ["vertical_and_slash", 30, 800, 160167], "9": ["vertical_and_slash", 1000, 6096, 13251530], "10": ["vertical_and_slash", 100, 800, 1045212], "11": ["vertical_and_slash", 1000, 6096, 7767754], "12": ["vertical_and_slash", 100, 800, 8430862], "13": ["vertical_and_slash", 100, 800, 12275346], "14": ["vertical_and_slash", 1000, 6096, 12967454], "15": ["vertical_and_slash", 1000, 6096, 776792], "16": ["vertical_and_slash", 30, 800, 4940981], "17": ["vertical_and_slash", 1000, 6096, 4687476], "18": ["vertical_and_slash", 30, 800, 3396568], "19": ["vertical_and_slash", 1000, 6096, 6330177], "20": ["vertical_and_slash", 100, 800, 10772100], "21": ["vertical_and_slash", 1000, 6096, 431927], "22": ["vertical_and_slash", 100, 800, 5368777], "23": ["vertical_and_slash", 100, 800, 11971880], "24": ["vertical_and_slash", 1000, 6096, 3355141], "25": ["vertical_and_slash", 30, 800, 7775685], "26": ["vertical_and_slash", 1000, 6096, 17862], "27": ["vertical_and_slash", 1000, 6096, 2368170], "28": ["vertical_and_slash", 1000, 6096, 887652], "29": ["vertical_and_slash", 1000, 6096, 342019], "30": ["vertical_and_slash", 1000, 6096, 2031], "31": ["vertical_and_slash", 100, 800, 851845]}, {"0": ["vertical_and_slash", 1000, 6096, 9577296], "1": ["vertical_and_slash", 1000, 6096, 6130994], "2": ["vertical_and_slash", 1000, 6096, 932158], "3": ["vertical_and_slash", 1000, 6096, 6193523], "4": ["vertical_and_slash", 30, 800, 4212495], "5": ["vertical_and_slash", 1000, 6096, 82539], "6": ["vertical_and_slash", 1000, 6096, 2033854], "7": ["vertical_and_slash", 100, 800, 973812], "8": ["vertical_and_slash", 1000, 6096, 96691], "9": ["vertical_and_slash", 1000, 6096, 7346123], "10": ["vertical_and_slash", 1000, 6096, 3425225], "11": ["vertical_and_slash", 1000, 6096, 5656378], "12": ["vertical_and_slash", 1000, 6096, 13585373], "13": ["vertical_and_slash", 3500, 100, 12228455], "14": ["vertical_and_slash", 100, 800, 14994473], "15": ["vertical_and_slash", 1000, 6096, 12825284], "16": ["vertical_and_slash", 1000, 6096, 8256], "17": ["vertical_and_slash", 1000, 6096, 287798], "18": ["vertical_and_slash", 1000, 6096, 3485339], "19": ["vertical_and_slash", 1000, 6096, 4049013], "20": ["vertical_and_slash", 1000, 6096, 10172329], "21": ["vertical_and_slash", 100, 800, 70376], "22": ["vertical_and_slash", 500, 700, 624964], "23": ["vertical_and_slash", 1000, 6096, 7478718], "24": ["vertical_and_slash", 1000, 6096, 11234418], "25": ["vertical_and_slash", 100, 800, 12774404], "26": ["vertical_and_slash", 1000, 6096, 10820183], "27": ["vertical_and_slash", 1000, 6096, 8669939], "28": ["vertical_and_slash", 100, 800, 46], "29": ["vertical_and_slash", 30, 800, 2478], "30": ["vertical_and_slash", 1000, 6096, 343890], "31": ["vertical_and_slash", 1000, 6096, 485618]}, {"0": ["vertical_and_slash", 1000, 6096, 2552], "1": ["vertical_and_slash", 1000, 6096, 3940587], "2": ["vertical_and_slash", 1000, 6096, 2070936], "3": ["vertical_and_slash", 1000, 6096, 232875], "4": ["vertical_and_slash", 30, 800, 751140], "5": ["vertical_and_slash", 100, 800, 231769], "6": ["vertical_and_slash", 30, 800, 2274515], "7": ["vertical_and_slash", 30, 800, 989564], "8": ["vertical_and_slash", 3500, 100, 14768346], "9": ["vertical_and_slash", 30, 800, 1208594], "10": ["vertical_and_slash", 30, 800, 1770328], "11": ["vertical_and_slash", 1000, 6096, 8752930], "12": ["vertical_and_slash", 3500, 100, 46312], "13": ["vertical_and_slash", 100, 800, 289542], "14": ["vertical_and_slash", 3500, 100, 306397], "15": ["vertical_and_slash", 3500, 100, 56350], "16": ["vertical_and_slash", 100, 800, 356204], "17": ["vertical_and_slash", 3500, 100, 1500240], "18": ["vertical_and_slash", 1000, 6096, 150152], "19": ["vertical_and_slash", 100, 800, 101799], "20": ["vertical_and_slash", 1000, 6096, 299393], "21": ["vertical_and_slash", 1000, 6096, 8627429], "22": ["vertical_and_slash", 1000, 6096, 3529325], "23": ["vertical_and_slash", 1000, 6096, 1448873], "24": ["vertical_and_slash", 1000, 6096, 1712901], "25": ["vertical_and_slash", 500, 700, 4048433], "26": ["vertical_and_slash", 1000, 6096, 3837844], "27": ["vertical_and_slash", 1000, 6096, 5399791], "28": ["vertical_and_slash", 1000, 6096, 5525857], "29": ["vertical_and_slash", 1000, 6096, 4847570], "30": ["vertical_and_slash", 1000, 6096, 7521944], "31": ["vertical_and_slash", 1000, 6096, 6944849]}, {"0": ["vertical_and_slash", 3500, 100, 12061195], "1": ["vertical_and_slash", 3500, 100, 13821114], "2": ["vertical_and_slash", 1000, 6096, 11831232], "3": ["vertical_and_slash", 1000, 6096, 1990608], "4": ["vertical_and_slash", 1000, 6096, 1126789], "5": ["vertical_and_slash", 1000, 6096, 164058], "6": ["vertical_and_slash", 1000, 6096, 1546250], "7": ["vertical_and_slash", 3500, 100, 3197616], "8": ["vertical_and_slash", 1000, 6096, 4347461], "9": ["vertical_and_slash", 100, 800, 6182587], "10": ["vertical_and_slash", 100, 800, 344594], "11": ["vertical_and_slash", 100, 800, 4476113], "12": ["vertical_and_slash", 1000, 6096, 13461002], "13": ["vertical_and_slash", 1000, 6096, 10764088], "14": ["vertical_and_slash", 1000, 6096, 12256526], "15": ["vertical_and_slash", 1000, 6096, 13680456], "16": ["vertical_and_slash", 30, 800, 247807], "17": ["vertical_and_slash", 30, 800, 283870], "18": ["vertical_and_slash", 30, 800, 8225577], "19": ["vertical_and_slash", 30, 800, 448632], "20": ["vertical_and_slash", 1000, 6096, 4175564], "21": ["vertical_and_slash", 1000, 6096, 2726117], "22": ["vertical_and_slash", 1000, 6096, 310838], "23": ["vertical_and_slash", 1000, 6096, 204919], "24": ["vertical_and_slash", 30, 800, 875524], "25": ["vertical_and_slash", 30, 800, 1182277], "26": ["vertical_and_slash", 30, 800, 4252580], "27": ["vertical_and_slash", 100, 800, 728402], "28": ["vertical_and_slash", 1000, 6096, 12755775], "29": ["vertical_and_slash", 1000, 6096, 13455097], "30": ["vertical_and_slash", 100, 800, 10492805], "31": ["vertical_and_slash", 3500, 100, 11957996]}, {"0": ["vertical_and_slash", 500, 700, 386640], "1": ["vertical_and_slash", 100, 800, 819517], "2": ["vertical_and_slash", 30, 800, 1170984], "3": ["vertical_and_slash", 100, 800, 626489], "4": ["vertical_and_slash", 1000, 6096, 5856605], "5": ["vertical_and_slash", 1000, 6096, 12960788], "6": ["vertical_and_slash", 1000, 6096, 13042017], "7": ["vertical_and_slash", 1000, 6096, 12542120], "8": ["vertical_and_slash", 1000, 6096, 24167], "9": ["vertical_and_slash", 100, 800, 440430], "10": ["vertical_and_slash", 3500, 100, 748759], "11": ["vertical_and_slash", 1000, 6096, 4655], "12": ["vertical_and_slash", 1000, 6096, 10739360], "13": ["vertical_and_slash", 1000, 6096, 9336615], "14": ["vertical_and_slash", 3500, 100, 14305575], "15": ["vertical_and_slash", 3500, 100, 13833292], "16": ["vertical_and_slash", 30, 800, 3412], "17": ["vertical_and_slash", 500, 700, 16614], "18": ["vertical_and_slash", 1000, 6096, 839930], "19": ["vertical_and_slash", 500, 700, 77296], "20": ["vertical_and_slash", 1000, 6096, 11148082], "21": ["vertical_and_slash", 100, 800, 2483383], "22": ["vertical_and_slash", 3500, 100, 11902907], "23": ["vertical_and_slash", 100, 800, 2194], "24": ["vertical_and_slash", 1000, 6096, 4441496], "25": ["vertical_and_slash", 3500, 100, 10827107], "26": ["vertical_and_slash", 100, 800, 105753], "27": ["vertical_and_slash", 1000, 6096, 5261357], "28": ["vertical_and_slash", 30, 800, 61603], "29": ["vertical_and_slash", 30, 800, 108480], "30": ["vertical_and_slash", 30, 800, 30219], "31": ["vertical_and_slash", 30, 800, 31426]}, {"0": ["vertical_and_slash", 1000, 6096, 136760], "1": ["vertical_and_slash", 100, 800, 827733], "2": ["vertical_and_slash", 100, 800, 670059], "3": ["vertical_and_slash", 3500, 100, 502020], "4": ["vertical_and_slash", 100, 800, 469444], "5": ["vertical_and_slash", 100, 800, 162670], "6": ["vertical_and_slash", 1000, 6096, 22310], "7": ["vertical_and_slash", 1000, 6096, 465], "8": ["vertical_and_slash", 30, 800, 951054], "9": ["vertical_and_slash", 30, 800, 799102], "10": ["vertical_and_slash", 30, 800, 936020], "11": ["vertical_and_slash", 30, 800, 2027181], "12": ["vertical_and_slash", 3500, 100, 5986265], "13": ["vertical_and_slash", 500, 700, 3941412], "14": ["vertical_and_slash", 100, 800, 10557303], "15": ["vertical_and_slash", 100, 800, 1533916], "16": ["vertical_and_slash", 3500, 100, 11870953], "17": ["vertical_and_slash", 3500, 100, 12342581], "18": ["vertical_and_slash", 3500, 100, 12699180], "19": ["vertical_and_slash", 1000, 6096, 5138869], "20": ["vertical_and_slash", 1000, 6096, 12477033], "21": ["vertical_and_slash", 1000, 6096, 872144], "22": ["vertical_and_slash", 3500, 100, 13382501], "23": ["vertical_and_slash", 1000, 6096, 11531397], "24": ["vertical_and_slash", 1000, 6096, 13884364], "25": ["vertical_and_slash", 1000, 6096, 13611635], "26": ["vertical_and_slash", 1000, 6096, 13516676], "27": ["vertical_and_slash", 1000, 6096, 12560863], "28": ["vertical_and_slash", 500, 700, 3865996], "29": ["vertical_and_slash", 30, 800, 3343532], "30": ["vertical_and_slash", 30, 800, 179777], "31": ["vertical_and_slash", 3500, 100, 3863085]}, {"0": ["vertical_and_slash", 3500, 100, 6771823], "1": ["vertical_and_slash", 3500, 100, 10770780], "2": ["vertical_and_slash", 1000, 6096, 108476], "3": ["vertical_and_slash", 1000, 6096, 917033], "4": ["vertical_and_slash", 3500, 100, 9994951], "5": ["vertical_and_slash", 3500, 100, 13503132], "6": ["vertical_and_slash", 3500, 100, 11843766], "7": ["vertical_and_slash", 3500, 100, 10714999], "8": ["vertical_and_slash", 100, 800, 650037], "9": ["vertical_and_slash", 30, 800, 321924], "10": ["vertical_and_slash", 100, 800, 306681], "11": ["vertical_and_slash", 100, 800, 76181], "12": ["vertical_and_slash", 3500, 100, 12194592], "13": ["vertical_and_slash", 1000, 6096, 12635491], "14": ["vertical_and_slash", 3500, 100, 11953805], "15": ["vertical_and_slash", 3500, 100, 12355730], "16": ["vertical_and_slash", 100, 800, 614284], "17": ["vertical_and_slash", 100, 800, 512751], "18": ["vertical_and_slash", 3500, 100, 2679940], "19": ["vertical_and_slash", 100, 800, 1749683], "20": ["vertical_and_slash", 30, 800, 563622], "21": ["vertical_and_slash", 30, 800, 9985639], "22": ["vertical_and_slash", 30, 800, 1055029], "23": ["vertical_and_slash", 30, 800, 501782], "24": ["vertical_and_slash", 30, 800, 68229], "25": ["vertical_and_slash", 100, 800, 211743], "26": ["vertical_and_slash", 100, 800, 1690702], "27": ["vertical_and_slash", 30, 800, 2720080], "28": ["vertical_and_slash", 30, 800, 3884686], "29": ["vertical_and_slash", 30, 800, 3303748], "30": ["vertical_and_slash", 30, 800, 3335960], "31": ["vertical_and_slash", 30, 800, 2469116]}, {"0": ["vertical_and_slash", 1000, 6096, 726797], "1": ["vertical_and_slash", 100, 800, 5833160], "2": ["vertical_and_slash", 1000, 6096, 1766748], "3": ["vertical_and_slash", 1000, 6096, 6021028], "4": ["vertical_and_slash", 1000, 6096, 3120126], "5": ["vertical_and_slash", 30, 800, 3103142], "6": ["vertical_and_slash", 1000, 6096, 22974], "7": ["vertical_and_slash", 1000, 6096, 616209], "8": ["vertical_and_slash", 100, 800, 5571258], "9": ["vertical_and_slash", 30, 800, 2259315], "10": ["vertical_and_slash", 1000, 6096, 438342], "11": ["vertical_and_slash", 100, 800, 5557528], "12": ["vertical_and_slash", 3500, 100, 12954645], "13": ["vertical_and_slash", 1000, 6096, 12677660], "14": ["vertical_and_slash", 3500, 100, 13038925], "15": ["vertical_and_slash", 1000, 6096, 11239328], "16": ["vertical_and_slash", 3500, 100, 5247646], "17": ["vertical_and_slash", 500, 700, 384866], "18": ["vertical_and_slash", 1000, 6096, 655131], "19": ["vertical_and_slash", 3500, 100, 8826025], "20": ["vertical_and_slash", 100, 800, 4478606], "21": ["vertical_and_slash", 100, 800, 3881052], "22": ["vertical_and_slash", 100, 800, 6027887], "23": ["vertical_and_slash", 3500, 100, 8475077], "24": ["vertical_and_slash", 1000, 6096, 103633], "25": ["vertical_and_slash", 1000, 6096, 76484], "26": ["vertical_and_slash", 100, 800, 22432], "27": ["vertical_and_slash", 1000, 6096, 1313063], "28": ["vertical_and_slash", 1000, 6096, 6617078], "29": ["vertical_and_slash", 3500, 100, 12355842], "30": ["vertical_and_slash", 100, 800, 1401085], "31": ["vertical_and_slash", 3500, 100, 11350169]}, {"0": ["vertical_and_slash", 100, 800, 142456], "1": ["vertical_and_slash", 500, 700, 290481], "2": ["vertical_and_slash", 30, 800, 195338], "3": ["vertical_and_slash", 30, 800, 235375], "4": ["vertical_and_slash", 3500, 100, 13220328], "5": ["vertical_and_slash", 1000, 6096, 13040738], "6": ["vertical_and_slash", 3500, 100, 14847993], "7": ["vertical_and_slash", 1000, 6096, 12236451], "8": ["vertical_and_slash", 30, 800, 1360565], "9": ["vertical_and_slash", 30, 800, 115757], "10": ["vertical_and_slash", 30, 800, 806615], "11": ["vertical_and_slash", 30, 800, 5655605], "12": ["vertical_and_slash", 1000, 6096, 803465], "13": ["vertical_and_slash", 1000, 6096, 7601845], "14": ["vertical_and_slash", 30, 800, 8869563], "15": ["vertical_and_slash", 100, 800, 9177143], "16": ["vertical_and_slash", 1000, 6096, 612999], "17": ["vertical_and_slash", 100, 800, 2657352], "18": ["vertical_and_slash", 1000, 6096, 297015], "19": ["vertical_and_slash", 1000, 6096, 309571], "20": ["vertical_and_slash", 1000, 6096, 13160644], "21": ["vertical_and_slash", 1000, 6096, 14006964], "22": ["vertical_and_slash", 3500, 100, 14287913], "23": ["vertical_and_slash", 3500, 100, 14586379], "24": ["vertical_and_slash", 1000, 6096, 12023244], "25": ["vertical_and_slash", 30, 800, 12092108], "26": ["vertical_and_slash", 500, 700, 6005169], "27": ["vertical_and_slash", 500, 700, 9574963], "28": ["vertical_and_slash", 1000, 6096, 1696021], "29": ["vertical_and_slash", 30, 800, 1516298], "30": ["vertical_and_slash", 1000, 6096, 2303483], "31": ["vertical_and_slash", 1000, 6096, 903636]}, {"0": ["vertical_and_slash", 3500, 100, 7496361], "1": ["vertical_and_slash", 30, 800, 571560], "2": ["vertical_and_slash", 100, 800, 3025676], "3": ["vertical_and_slash", 30, 800, 5167076], "4": ["vertical_and_slash", 30, 800, 501453], "5": ["vertical_and_slash", 30, 800, 342659], "6": ["vertical_and_slash", 30, 800, 2561588], "7": ["vertical_and_slash", 30, 800, 869660], "8": ["vertical_and_slash", 100, 800, 10740412], "9": ["vertical_and_slash", 30, 800, 87115], "10": ["vertical_and_slash", 3500, 100, 9800623], "11": ["vertical_and_slash", 30, 800, 9191448], "12": ["vertical_and_slash", 1000, 6096, 289817], "13": ["vertical_and_slash", 3500, 100, 9009480], "14": ["vertical_and_slash", 1000, 6096, 1799625], "15": ["vertical_and_slash", 1000, 6096, 4984031], "16": ["vertical_and_slash", 3500, 100, 3381538], "17": ["vertical_and_slash", 100, 800, 11456778], "18": ["vertical_and_slash", 3500, 100, 14316760], "19": ["vertical_and_slash", 100, 800, 5228661], "20": ["vertical_and_slash", 3500, 100, 5831971], "21": ["vertical_and_slash", 500, 700, 10184028], "22": ["vertical_and_slash", 30, 800, 578221], "23": ["vertical_and_slash", 3500, 100, 6213253], "24": ["vertical_and_slash", 1000, 6096, 6146366], "25": ["vertical_and_slash", 1000, 6096, 1477166], "26": ["vertical_and_slash", 30, 800, 318810], "27": ["vertical_and_slash", 1000, 6096, 8654738], "28": ["vertical_and_slash", 500, 700, 3294065], "29": ["vertical_and_slash", 100, 800, 8531992], "30": ["vertical_and_slash", 100, 800, 2564233], "31": ["vertical_and_slash", 100, 800, 113957]}, {"0": ["vertical_and_slash", 100, 800, 530019], "1": ["vertical_and_slash", 100, 800, 647580], "2": ["vertical_and_slash", 30, 800, 4990437], "3": ["vertical_and_slash", 30, 800, 317415], "4": ["vertical_and_slash", 100, 800, 365956], "5": ["vertical_and_slash", 100, 800, 1689094], "6": ["vertical_and_slash", 100, 800, 454281], "7": ["vertical_and_slash", 30, 800, 266331], "8": ["vertical_and_slash", 3500, 100, 3603593], "9": ["vertical_and_slash", 100, 800, 14614370], "10": ["vertical_and_slash", 1000, 6096, 5361097], "11": ["vertical_and_slash", 100, 800, 14371859], "12": ["vertical_and_slash", 30, 800, 1232558], "13": ["vertical_and_slash", 30, 800, 546028], "14": ["vertical_and_slash", 30, 800, 853313], "15": ["vertical_and_slash", 30, 800, 194933], "16": ["vertical_and_slash", 3500, 100, 14304381], "17": ["vertical_and_slash", 1000, 6096, 815541], "18": ["vertical_and_slash", 100, 800, 5138518], "19": ["vertical_and_slash", 3500, 100, 9565094], "20": ["vertical_and_slash", 1000, 6096, 2035169], "21": ["vertical_and_slash", 1000, 6096, 3375423], "22": ["vertical_and_slash", 1000, 6096, 3777615], "23": ["vertical_and_slash", 1000, 6096, 12354929], "24": ["vertical_and_slash", 30, 800, 1763576], "25": ["vertical_and_slash", 30, 800, 3727796], "26": ["vertical_and_slash", 30, 800, 2744406], "27": ["vertical_and_slash", 30, 800, 1997757], "28": ["vertical_and_slash", 1000, 6096, 12257], "29": ["vertical_and_slash", 1000, 6096, 1169443], "30": ["vertical_and_slash", 3500, 100, 5723144], "31": ["vertical_and_slash", 3500, 100, 5420298]}, {"0": ["vertical_and_slash", 1000, 6096, 2447512], "1": ["vertical_and_slash", 3500, 100, 10860908], "2": ["vertical_and_slash", 100, 800, 9108572], "3": ["vertical_and_slash", 3500, 100, 11624453], "4": ["vertical_and_slash", 100, 800, 6925192], "5": ["vertical_and_slash", 100, 800, 9369879], "6": ["vertical_and_slash", 3500, 100, 11865786], "7": ["vertical_and_slash", 30, 800, 9628595], "8": ["vertical_and_slash", 1000, 6096, 6302171], "9": ["vertical_and_slash", 3500, 100, 8455497], "10": ["vertical_and_slash", 30, 800, 6885122], "11": ["vertical_and_slash", 1000, 6096, 5076785], "12": ["vertical_and_slash", 1000, 6096, 12769698], "13": ["vertical_and_slash", 1000, 6096, 13513363], "14": ["vertical_and_slash", 1000, 6096, 14089388], "15": ["vertical_and_slash", 1000, 6096, 14501815], "16": ["vertical_and_slash", 1000, 6096, 1619566], "17": ["vertical_and_slash", 1000, 6096, 5031895], "18": ["vertical_and_slash", 1000, 6096, 3833561], "19": ["vertical_and_slash", 100, 800, 12325460], "20": ["vertical_and_slash", 1000, 6096, 320906], "21": ["vertical_and_slash", 3500, 100, 13924855], "22": ["vertical_and_slash", 100, 800, 10478874], "23": ["vertical_and_slash", 30, 800, 4410655], "24": ["vertical_and_slash", 3500, 100, 14767197], "25": ["vertical_and_slash", 1000, 6096, 4108672], "26": ["vertical_and_slash", 100, 800, 14797906], "27": ["vertical_and_slash", 3500, 100, 14643144], "28": ["vertical_and_slash", 100, 800, 10556268], "29": ["vertical_and_slash", 3500, 100, 14575250], "30": ["vertical_and_slash", 1000, 6096, 14076831], "31": ["vertical_and_slash", 1000, 6096, 10779010]}, {"0": ["vertical_and_slash", 30, 800, 4744885], "1": ["vertical_and_slash", 30, 800, 4794511], "2": ["vertical_and_slash", 30, 800, 9418373], "3": ["vertical_and_slash", 30, 800, 2291979], "4": ["vertical_and_slash", 30, 800, 10009392], "5": ["vertical_and_slash", 30, 800, 981769], "6": ["vertical_and_slash", 30, 800, 3395467], "7": ["vertical_and_slash", 100, 800, 5966942], "8": ["vertical_and_slash", 30, 800, 7092993], "9": ["vertical_and_slash", 30, 800, 2176489], "10": ["vertical_and_slash", 30, 800, 4330010], "11": ["vertical_and_slash", 1000, 6096, 2664159], "12": ["vertical_and_slash", 30, 800, 7282328], "13": ["vertical_and_slash", 30, 800, 14135136], "14": ["vertical_and_slash", 1000, 6096, 791118], "15": ["vertical_and_slash", 30, 800, 9266081], "16": ["vertical_and_slash", 3500, 100, 14422288], "17": ["vertical_and_slash", 3500, 100, 11457529], "18": ["vertical_and_slash", 30, 800, 4503306], "19": ["vertical_and_slash", 100, 800, 11937543], "20": ["vertical_and_slash", 3500, 100, 14538141], "21": ["vertical_and_slash", 3500, 100, 13564714], "22": ["vertical_and_slash", 100, 800, 9671640], "23": ["vertical_and_slash", 30, 800, 2841456], "24": ["vertical_and_slash", 30, 800, 1395156], "25": ["vertical_and_slash", 30, 800, 989026], "26": ["vertical_and_slash", 30, 800, 10617339], "27": ["vertical_and_slash", 30, 800, 8170836], "28": ["vertical_and_slash", 100, 800, 2032096], "29": ["vertical_and_slash", 3500, 100, 13931334], "30": ["vertical_and_slash", 3500, 100, 14790424], "31": ["vertical_and_slash", 1000, 6096, 4133248]}]
diff --git a/minference/configs/Phi_3_mini_128k_instruct_kv_out_v32_fit_o_best_pattern.json b/minference/configs/Phi_3_mini_128k_instruct_kv_out_v32_fit_o_best_pattern.json
new file mode 100644
index 0000000..aee9242
--- /dev/null
+++ b/minference/configs/Phi_3_mini_128k_instruct_kv_out_v32_fit_o_best_pattern.json
@@ -0,0 +1 @@
+[{"0": ["vertical_and_slash", 1000, 6096, 0.33349305391311646], "1": ["vertical_and_slash", 1000, 6096, 0.4378805160522461], "2": ["vertical_and_slash", 1000, 6096, 0.48282963037490845], "3": ["vertical_and_slash", 1000, 6096, 0.37695789337158203], "4": ["vertical_and_slash", 1000, 6096, 0.38924556970596313], "5": ["vertical_and_slash", 1000, 6096, 0.3510749340057373], "6": ["vertical_and_slash", 1000, 6096, 0.39886632561683655], "7": ["vertical_and_slash", 1000, 6096, 0.8939290046691895], "8": ["vertical_and_slash", 1000, 6096, 0.44007450342178345], "9": ["vertical_and_slash", 1000, 6096, 0.3897586464881897], "10": ["vertical_and_slash", 1000, 6096, 0.40355661511421204], "11": ["vertical_and_slash", 1000, 6096, 0.36381030082702637], "12": ["vertical_and_slash", 1000, 6096, 0.4459313154220581], "13": ["vertical_and_slash", 1000, 6096, 0.3341565728187561], "14": ["vertical_and_slash", 1000, 6096, 0.384276419878006], "15": ["vertical_and_slash", 1000, 6096, 0.34818336367607117], "16": ["vertical_and_slash", 1000, 6096, 0.3867861330509186], "17": ["vertical_and_slash", 1000, 6096, 0.3639705777168274], "18": ["vertical_and_slash", 1000, 6096, 0.3512721359729767], "19": ["vertical_and_slash", 1000, 6096, 0.4681489169597626], "20": ["vertical_and_slash", 1000, 6096, 0.4651115834712982], "21": ["vertical_and_slash", 1000, 6096, 0.3882596790790558], "22": ["vertical_and_slash", 1000, 6096, 0.47017091512680054], "23": ["vertical_and_slash", 1000, 6096, 0.8037586808204651], "24": ["vertical_and_slash", 1000, 6096, 0.3913174867630005], "25": ["vertical_and_slash", 1000, 6096, 0.5203016400337219], "26": ["vertical_and_slash", 1000, 6096, 0.47166702151298523], "27": ["vertical_and_slash", 1000, 6096, 0.760438084602356], "28": ["vertical_and_slash", 1000, 6096, 0.943070650100708], "29": ["vertical_and_slash", 1000, 6096, 0.4118039011955261], "30": ["vertical_and_slash", 1000, 6096, 0.6815055012702942], "31": ["vertical_and_slash", 1000, 6096, 0.6300445795059204]}, {"0": ["vertical_and_slash", 1000, 6096, 0.6439709663391113], "1": ["vertical_and_slash", 1000, 6096, 0.5207313895225525], "2": ["vertical_and_slash", 1000, 6096, 0.47401225566864014], "3": ["vertical_and_slash", 1000, 6096, 0.5988013744354248], "4": ["vertical_and_slash", 1000, 6096, 0.6021823287010193], "5": ["vertical_and_slash", 1000, 6096, 0.4162128269672394], "6": ["vertical_and_slash", 1000, 6096, 0.7858797311782837], "7": ["vertical_and_slash", 1000, 6096, 0.6350969672203064], "8": ["vertical_and_slash", 1000, 6096, 0.5817031860351562], "9": ["vertical_and_slash", 1000, 6096, 0.9291586875915527], "10": ["vertical_and_slash", 1000, 6096, 0.6078806519508362], "11": ["vertical_and_slash", 1000, 6096, 0.5813876986503601], "12": ["vertical_and_slash", 1000, 6096, 0.7652914524078369], "13": ["vertical_and_slash", 1000, 6096, 0.4502100944519043], "14": ["vertical_and_slash", 1000, 6096, 0.6180105209350586], "15": ["vertical_and_slash", 1000, 6096, 0.7175759673118591], "16": ["vertical_and_slash", 1000, 6096, 0.6323421597480774], "17": ["vertical_and_slash", 3500, 100, 0.479082852602005], "18": ["vertical_and_slash", 1000, 6096, 0.6011233329772949], "19": ["vertical_and_slash", 1000, 6096, 0.8908118605613708], "20": ["vertical_and_slash", 1000, 6096, 0.9255861639976501], "21": ["vertical_and_slash", 1000, 6096, 0.795491099357605], "22": ["vertical_and_slash", 1000, 6096, 0.5210989117622375], "23": ["vertical_and_slash", 1000, 6096, 0.5200297236442566], "24": ["vertical_and_slash", 1000, 6096, 0.5280771255493164], "25": ["vertical_and_slash", 1000, 6096, 0.7380014657974243], "26": ["vertical_and_slash", 1000, 6096, 0.9885807633399963], "27": ["vertical_and_slash", 30, 800, 0.8718840479850769], "28": ["vertical_and_slash", 1000, 6096, 0.6302862167358398], "29": ["vertical_and_slash", 1000, 6096, 0.5750876069068909], "30": ["vertical_and_slash", 1000, 6096, 0.45260417461395264], "31": ["vertical_and_slash", 1000, 6096, 0.6499432325363159]}, {"0": ["vertical_and_slash", 1000, 6096, 0.7977765798568726], "1": ["vertical_and_slash", 1000, 6096, 0.8083621859550476], "2": ["vertical_and_slash", 1000, 6096, 0.5935484170913696], "3": ["vertical_and_slash", 1000, 6096, 0.5435713529586792], "4": ["vertical_and_slash", 1000, 6096, 0.5687218904495239], "5": ["vertical_and_slash", 1000, 6096, 0.854501485824585], "6": ["vertical_and_slash", 1000, 6096, 0.6359673142433167], "7": ["vertical_and_slash", 1000, 6096, 0.5785433053970337], "8": ["vertical_and_slash", 1000, 6096, 0.8543683290481567], "9": ["vertical_and_slash", 1000, 6096, 0.762371838092804], "10": ["vertical_and_slash", 1000, 6096, 0.6970657706260681], "11": ["vertical_and_slash", 1000, 6096, 0.6844046115875244], "12": ["vertical_and_slash", 1000, 6096, 0.7364732623100281], "13": ["vertical_and_slash", 1000, 6096, 0.8335257172584534], "14": ["vertical_and_slash", 1000, 6096, 0.7734203934669495], "15": ["vertical_and_slash", 1000, 6096, 0.7341973185539246], "16": ["vertical_and_slash", 1000, 6096, 0.7554108500480652], "17": ["vertical_and_slash", 1000, 6096, 0.9054623246192932], "18": ["vertical_and_slash", 1000, 6096, 0.6300320029258728], "19": ["vertical_and_slash", 1000, 6096, 0.70512455701828], "20": ["vertical_and_slash", 1000, 6096, 0.6085258722305298], "21": ["vertical_and_slash", 1000, 6096, 0.6398192644119263], "22": ["vertical_and_slash", 1000, 6096, 0.5992570519447327], "23": ["vertical_and_slash", 1000, 6096, 0.7130728363990784], "24": ["vertical_and_slash", 1000, 6096, 0.8504863977432251], "25": ["vertical_and_slash", 1000, 6096, 0.5748745799064636], "26": ["vertical_and_slash", 1000, 6096, 0.7758736610412598], "27": ["vertical_and_slash", 1000, 6096, 0.5538337230682373], "28": ["vertical_and_slash", 1000, 6096, 0.7384650707244873], "29": ["vertical_and_slash", 1000, 6096, 0.6905707120895386], "30": ["vertical_and_slash", 1000, 6096, 0.6217074990272522], "31": ["vertical_and_slash", 1000, 6096, 0.9545422196388245]}, {"0": ["vertical_and_slash", 500, 700, 0.9924208521842957], "1": ["vertical_and_slash", 100, 750, 0.9987075924873352], "2": ["vertical_and_slash", 500, 700, 0.9915499687194824], "3": ["vertical_and_slash", 100, 750, 0.9940086007118225], "4": ["vertical_and_slash", 100, 750, 0.9947375655174255], "5": ["vertical_and_slash", 100, 750, 0.9920898675918579], "6": ["vertical_and_slash", 100, 750, 0.9960256218910217], "7": ["vertical_and_slash", 100, 750, 0.995691180229187], "8": ["vertical_and_slash", 100, 750, 0.9113738536834717], "9": ["vertical_and_slash", 100, 750, 0.9700976014137268], "10": ["vertical_and_slash", 3500, 100, 0.9520721435546875], "11": ["vertical_and_slash", 100, 750, 0.9561598300933838], "12": ["vertical_and_slash", 100, 750, 0.8256366848945618], "13": ["vertical_and_slash", 100, 750, 0.9905430674552917], "14": ["vertical_and_slash", 500, 700, 0.9822967648506165], "15": ["vertical_and_slash", 100, 750, 0.9880149960517883], "16": ["vertical_and_slash", 100, 750, 0.9570814967155457], "17": ["vertical_and_slash", 100, 750, 0.9678364396095276], "18": ["vertical_and_slash", 3500, 100, 0.9819864630699158], "19": ["vertical_and_slash", 100, 750, 0.9930639266967773], "20": ["vertical_and_slash", 3500, 100, 0.9928342700004578], "21": ["vertical_and_slash", 3500, 100, 0.9522428512573242], "22": ["vertical_and_slash", 100, 750, 0.9961853623390198], "23": ["vertical_and_slash", 100, 750, 0.9895046353340149], "24": ["vertical_and_slash", 100, 750, 0.9106875061988831], "25": ["vertical_and_slash", 100, 750, 0.9944272041320801], "26": ["vertical_and_slash", 100, 750, 0.9603897333145142], "27": ["vertical_and_slash", 100, 750, 0.9967218637466431], "28": ["vertical_and_slash", 100, 750, 0.9922856092453003], "29": ["vertical_and_slash", 100, 750, 0.9425711631774902], "30": ["vertical_and_slash", 1000, 6096, 0.6492345333099365], "31": ["vertical_and_slash", 500, 700, 0.957703709602356]}, {"0": ["vertical_and_slash", 100, 750, 0.9920511841773987], "1": ["vertical_and_slash", 3500, 100, 0.9784621000289917], "2": ["vertical_and_slash", 100, 750, 0.9945407509803772], "3": ["vertical_and_slash", 100, 750, 0.9613493084907532], "4": ["vertical_and_slash", 100, 750, 0.8482271432876587], "5": ["vertical_and_slash", 500, 700, 0.9943300485610962], "6": ["vertical_and_slash", 100, 750, 0.9810841083526611], "7": ["vertical_and_slash", 3500, 100, 0.9297769069671631], "8": ["vertical_and_slash", 100, 750, 0.8839191198348999], "9": ["vertical_and_slash", 100, 750, 0.9955653548240662], "10": ["vertical_and_slash", 100, 750, 0.9484658241271973], "11": ["vertical_and_slash", 100, 750, 0.994473397731781], "12": ["vertical_and_slash", 500, 700, 0.9420907497406006], "13": ["vertical_and_slash", 100, 750, 0.9161052107810974], "14": ["vertical_and_slash", 100, 750, 0.9645522832870483], "15": ["vertical_and_slash", 100, 750, 0.9875764846801758], "16": ["vertical_and_slash", 100, 750, 0.7891636490821838], "17": ["vertical_and_slash", 1000, 6096, 0.7788199186325073], "18": ["vertical_and_slash", 100, 750, 0.9488416910171509], "19": ["vertical_and_slash", 3500, 100, 0.9959850311279297], "20": ["vertical_and_slash", 100, 750, 0.9768155217170715], "21": ["vertical_and_slash", 100, 750, 0.995807945728302], "22": ["vertical_and_slash", 3500, 100, 0.8900895118713379], "23": ["vertical_and_slash", 100, 750, 0.9586788415908813], "24": ["vertical_and_slash", 100, 750, 0.9651024341583252], "25": ["vertical_and_slash", 3500, 100, 0.9384130239486694], "26": ["vertical_and_slash", 100, 750, 0.9855350255966187], "27": ["vertical_and_slash", 100, 750, 0.9657205939292908], "28": ["vertical_and_slash", 3500, 100, 0.9184022545814514], "29": ["vertical_and_slash", 100, 750, 0.866909384727478], "30": ["vertical_and_slash", 1000, 6096, 0.7826077342033386], "31": ["vertical_and_slash", 100, 750, 0.9975974559783936]}, {"0": ["vertical_and_slash", 100, 750, 0.9865456223487854], "1": ["vertical_and_slash", 100, 750, 0.9591361880302429], "2": ["vertical_and_slash", 100, 750, 0.9168012142181396], "3": ["vertical_and_slash", 500, 700, 0.9530511498451233], "4": ["vertical_and_slash", 1000, 6096, 0.8645423650741577], "5": ["vertical_and_slash", 500, 700, 0.9792267084121704], "6": ["vertical_and_slash", 100, 750, 0.9941954612731934], "7": ["vertical_and_slash", 100, 750, 0.960307776927948], "8": ["vertical_and_slash", 3500, 100, 0.9855586886405945], "9": ["vertical_and_slash", 100, 750, 0.9828901886940002], "10": ["vertical_and_slash", 100, 750, 0.8591288328170776], "11": ["vertical_and_slash", 100, 750, 0.917044460773468], "12": ["vertical_and_slash", 100, 750, 0.9849950075149536], "13": ["vertical_and_slash", 100, 750, 0.8859434723854065], "14": ["vertical_and_slash", 100, 750, 0.9971017241477966], "15": ["vertical_and_slash", 500, 700, 0.9620269536972046], "16": ["vertical_and_slash", 500, 700, 0.9597799181938171], "17": ["vertical_and_slash", 500, 700, 0.9934410452842712], "18": ["vertical_and_slash", 3500, 100, 0.9977172017097473], "19": ["vertical_and_slash", 500, 700, 0.9520473480224609], "20": ["vertical_and_slash", 3500, 100, 0.9906032085418701], "21": ["vertical_and_slash", 100, 750, 0.9745447635650635], "22": ["vertical_and_slash", 100, 750, 0.9957244396209717], "23": ["vertical_and_slash", 100, 750, 0.9829675555229187], "24": ["vertical_and_slash", 100, 750, 0.9565562009811401], "25": ["vertical_and_slash", 100, 750, 0.9823064804077148], "26": ["vertical_and_slash", 100, 750, 0.987698495388031], "27": ["vertical_and_slash", 1000, 6096, 0.8219541907310486], "28": ["vertical_and_slash", 1000, 6096, 0.7586351633071899], "29": ["vertical_and_slash", 100, 750, 0.9752539992332458], "30": ["vertical_and_slash", 100, 750, 0.9929803609848022], "31": ["vertical_and_slash", 100, 750, 0.9185792803764343]}, {"0": ["vertical_and_slash", 100, 750, 0.9146243333816528], "1": ["vertical_and_slash", 100, 750, 0.9178520441055298], "2": ["vertical_and_slash", 3500, 100, 0.9930599331855774], "3": ["vertical_and_slash", 100, 750, 0.9993709325790405], "4": ["vertical_and_slash", 500, 700, 0.9853806495666504], "5": ["vertical_and_slash", 100, 750, 0.9141497015953064], "6": ["vertical_and_slash", 100, 750, 0.992788553237915], "7": ["vertical_and_slash", 100, 750, 0.9772038459777832], "8": ["vertical_and_slash", 1000, 6096, 0.6869983673095703], "9": ["vertical_and_slash", 100, 750, 0.9871460795402527], "10": ["vertical_and_slash", 100, 750, 0.9741801619529724], "11": ["vertical_and_slash", 100, 750, 0.9956739544868469], "12": ["vertical_and_slash", 100, 750, 0.9555794596672058], "13": ["vertical_and_slash", 3500, 100, 0.8615856766700745], "14": ["vertical_and_slash", 3500, 100, 0.9012727737426758], "15": ["vertical_and_slash", 100, 750, 0.9786412715911865], "16": ["vertical_and_slash", 3500, 100, 0.7491975426673889], "17": ["vertical_and_slash", 100, 750, 0.9849361181259155], "18": ["vertical_and_slash", 3500, 100, 0.9097980856895447], "19": ["vertical_and_slash", 1000, 6096, 0.8621278405189514], "20": ["vertical_and_slash", 500, 700, 0.9943590760231018], "21": ["vertical_and_slash", 100, 750, 0.8645753264427185], "22": ["vertical_and_slash", 100, 750, 0.9920986294746399], "23": ["vertical_and_slash", 1000, 6096, 0.8657084703445435], "24": ["vertical_and_slash", 3500, 100, 0.9750965237617493], "25": ["vertical_and_slash", 3500, 100, 0.8507974147796631], "26": ["vertical_and_slash", 3500, 100, 0.9118348360061646], "27": ["vertical_and_slash", 3500, 100, 0.9703859090805054], "28": ["vertical_and_slash", 3500, 100, 0.9725451469421387], "29": ["vertical_and_slash", 1000, 6096, 0.7008982300758362], "30": ["vertical_and_slash", 1000, 6096, 0.838621199131012], "31": ["vertical_and_slash", 100, 750, 0.9929103255271912]}, {"0": ["vertical_and_slash", 1000, 6096, 0.7402030825614929], "1": ["vertical_and_slash", 1000, 6096, 0.8565414547920227], "2": ["vertical_and_slash", 100, 750, 0.9612839221954346], "3": ["vertical_and_slash", 1000, 6096, 0.9598837494850159], "4": ["vertical_and_slash", 1000, 6096, 0.7645464539527893], "5": ["vertical_and_slash", 100, 750, 0.9872377514839172], "6": ["vertical_and_slash", 1000, 6096, 0.7918620705604553], "7": ["vertical_and_slash", 500, 700, 0.9622856378555298], "8": ["vertical_and_slash", 100, 750, 0.8891160488128662], "9": ["vertical_and_slash", 500, 700, 0.9844319224357605], "10": ["vertical_and_slash", 500, 700, 0.9876360297203064], "11": ["vertical_and_slash", 500, 700, 0.9688720703125], "12": ["vertical_and_slash", 1000, 6096, 0.5671995878219604], "13": ["vertical_and_slash", 100, 750, 0.9620596170425415], "14": ["vertical_and_slash", 1000, 6096, 0.6478529572486877], "15": ["vertical_and_slash", 100, 750, 0.9807542562484741], "16": ["vertical_and_slash", 3500, 100, 0.9823787212371826], "17": ["vertical_and_slash", 100, 750, 0.8980384469032288], "18": ["vertical_and_slash", 1000, 6096, 0.8713955879211426], "19": ["vertical_and_slash", 100, 750, 0.9611169099807739], "20": ["vertical_and_slash", 100, 750, 0.9941024780273438], "21": ["vertical_and_slash", 100, 750, 0.9876882433891296], "22": ["vertical_and_slash", 3500, 100, 0.9474965333938599], "23": ["vertical_and_slash", 100, 750, 0.9415712952613831], "24": ["vertical_and_slash", 100, 750, 0.9960836172103882], "25": ["vertical_and_slash", 100, 750, 0.9898598194122314], "26": ["vertical_and_slash", 100, 750, 0.9720168113708496], "27": ["vertical_and_slash", 100, 750, 0.985356330871582], "28": ["vertical_and_slash", 3500, 100, 0.9795358180999756], "29": ["vertical_and_slash", 100, 750, 0.970496654510498], "30": ["vertical_and_slash", 3500, 100, 0.999195396900177], "31": ["vertical_and_slash", 100, 750, 0.9589951038360596]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8079184889793396], "1": ["stream_llm", 100, 800, 0.96484375], "2": ["vertical_and_slash", 1000, 6096, 0.6607644557952881], "3": ["vertical_and_slash", 30, 800, 0.9899947047233582], "4": ["vertical_and_slash", 1000, 6096, 0.9565256237983704], "5": ["vertical_and_slash", 1000, 6096, 0.9755614995956421], "6": ["vertical_and_slash", 30, 800, 0.9720635414123535], "7": ["vertical_and_slash", 30, 800, 0.9191414713859558], "8": ["stream_llm", 100, 800, 0.9921875], "9": ["vertical_and_slash", 1000, 6096, 0.6984944939613342], "10": ["stream_llm", 100, 800, 0.97265625], "11": ["vertical_and_slash", 30, 800, 0.955635666847229], "12": ["vertical_and_slash", 1000, 6096, 0.9949175715446472], "13": ["vertical_and_slash", 30, 800, 0.9833577871322632], "14": ["vertical_and_slash", 1000, 6096, 0.612384021282196], "15": ["vertical_and_slash", 1000, 6096, 0.9294421076774597], "16": ["vertical_and_slash", 30, 800, 0.9978874921798706], "17": ["vertical_and_slash", 30, 800, 0.9265275001525879], "18": ["vertical_and_slash", 500, 700, 0.8441793322563171], "19": ["vertical_and_slash", 1000, 6096, 0.9973151087760925], "20": ["vertical_and_slash", 30, 800, 0.8883945941925049], "21": ["vertical_and_slash", 1000, 6096, 0.9890816807746887], "22": ["vertical_and_slash", 30, 800, 0.9924365282058716], "23": ["stream_llm", 100, 800, 0.98828125], "24": ["vertical_and_slash", 1000, 6096, 0.9733841419219971], "25": ["vertical_and_slash", 1000, 6096, 0.8846827149391174], "26": ["vertical_and_slash", 1000, 6096, 0.8909521698951721], "27": ["vertical_and_slash", 30, 800, 0.95379239320755], "28": ["vertical_and_slash", 30, 800, 0.989055871963501], "29": ["vertical_and_slash", 30, 800, 0.9804853796958923], "30": ["vertical_and_slash", 30, 800, 0.9921841621398926], "31": ["vertical_and_slash", 30, 800, 0.9727922677993774]}, {"0": ["stream_llm", 100, 800, 0.984375], "1": ["vertical_and_slash", 30, 800, 0.9801875352859497], "2": ["vertical_and_slash", 3500, 100, 0.9504685997962952], "3": ["vertical_and_slash", 500, 700, 0.5719053745269775], "4": ["vertical_and_slash", 30, 800, 0.9975548386573792], "5": ["vertical_and_slash", 30, 800, 0.9834421873092651], "6": ["vertical_and_slash", 500, 700, 0.876423180103302], "7": ["vertical_and_slash", 1000, 6096, 0.9761123657226562], "8": ["vertical_and_slash", 1000, 6096, 0.6793014407157898], "9": ["vertical_and_slash", 30, 800, 0.8573703765869141], "10": ["vertical_and_slash", 500, 700, 0.9037665128707886], "11": ["stream_llm", 100, 800, 0.94921875], "12": ["stream_llm", 100, 800, 0.59375], "13": ["vertical_and_slash", 30, 800, 0.9938877820968628], "14": ["vertical_and_slash", 30, 800, 0.9964749217033386], "15": ["stream_llm", 100, 800, 0.9765625], "16": ["vertical_and_slash", 500, 700, 0.9928801655769348], "17": ["stream_llm", 100, 800, 0.859375], "18": ["stream_llm", 100, 800, 0.93359375], "19": ["vertical_and_slash", 500, 700, 0.9897311329841614], "20": ["stream_llm", 100, 800, 0.96875], "21": ["stream_llm", 100, 800, 0.9296875], "22": ["vertical_and_slash", 1000, 6096, 0.49674782156944275], "23": ["vertical_and_slash", 1000, 6096, 0.5498730540275574], "24": ["vertical_and_slash", 1000, 6096, 0.6677294373512268], "25": ["vertical_and_slash", 30, 800, 0.8520674109458923], "26": ["vertical_and_slash", 30, 800, 0.9708148241043091], "27": ["vertical_and_slash", 1000, 6096, 0.9498739838600159], "28": ["vertical_and_slash", 30, 800, 0.9852201342582703], "29": ["vertical_and_slash", 30, 800, 0.9892252683639526], "30": ["vertical_and_slash", 30, 800, 0.9976245164871216], "31": ["stream_llm", 100, 800, 0.91796875]}, {"0": ["vertical_and_slash", 30, 800, 0.976232647895813], "1": ["vertical_and_slash", 1000, 6096, 0.850098729133606], "2": ["vertical_and_slash", 30, 800, 0.9943907260894775], "3": ["stream_llm", 100, 800, 0.984375], "4": ["vertical_and_slash", 1000, 6096, 0.9408355355262756], "5": ["stream_llm", 100, 800, 0.62109375], "6": ["vertical_and_slash", 30, 800, 0.9146958589553833], "7": ["stream_llm", 100, 800, 0.578125], "8": ["vertical_and_slash", 1000, 6096, 0.9866257905960083], "9": ["stream_llm", 100, 800, 0.8671875], "10": ["stream_llm", 100, 800, 0.98828125], "11": ["stream_llm", 100, 800, 0.80078125], "12": ["vertical_and_slash", 30, 800, 0.9795709252357483], "13": ["vertical_and_slash", 1000, 6096, 0.9181753396987915], "14": ["vertical_and_slash", 30, 800, 0.9088999032974243], "15": ["stream_llm", 100, 800, 1.0], "16": ["stream_llm", 100, 800, 0.93359375], "17": ["vertical_and_slash", 1000, 6096, 0.7872908115386963], "18": ["stream_llm", 100, 800, 0.96875], "19": ["vertical_and_slash", 30, 800, 0.9915726184844971], "20": ["vertical_and_slash", 30, 800, 0.9914611577987671], "21": ["stream_llm", 100, 800, 0.94921875], "22": ["stream_llm", 100, 800, 0.91796875], "23": ["vertical_and_slash", 3500, 100, 0.4178726077079773], "24": ["vertical_and_slash", 1000, 6096, 0.9209551811218262], "25": ["stream_llm", 100, 800, 0.953125], "26": ["vertical_and_slash", 1000, 6096, 0.8251335024833679], "27": ["vertical_and_slash", 1000, 6096, 0.7916073799133301], "28": ["stream_llm", 100, 800, 0.98046875], "29": ["vertical_and_slash", 30, 800, 0.9805914163589478], "30": ["vertical_and_slash", 30, 800, 0.9889715313911438], "31": ["vertical_and_slash", 30, 800, 0.7096468210220337]}, {"0": ["vertical_and_slash", 3500, 100, 0.9098867774009705], "1": ["vertical_and_slash", 1000, 6096, 0.9131186008453369], "2": ["vertical_and_slash", 1000, 6096, 0.6216369271278381], "3": ["vertical_and_slash", 3500, 100, 0.9781222939491272], "4": ["vertical_and_slash", 1000, 6096, 0.6995159983634949], "5": ["vertical_and_slash", 30, 800, 0.7733919620513916], "6": ["stream_llm", 100, 800, 0.8046875], "7": ["stream_llm", 100, 800, 0.9921875], "8": ["vertical_and_slash", 1000, 6096, 0.9208213686943054], "9": ["vertical_and_slash", 30, 800, 0.9892569780349731], "10": ["stream_llm", 100, 800, 0.65234375], "11": ["vertical_and_slash", 3500, 100, 0.8766616582870483], "12": ["stream_llm", 100, 800, 0.69140625], "13": ["vertical_and_slash", 30, 800, 0.9681114554405212], "14": ["vertical_and_slash", 30, 800, 0.954004168510437], "15": ["vertical_and_slash", 1000, 6096, 0.6683151721954346], "16": ["vertical_and_slash", 1000, 6096, 0.9404566287994385], "17": ["vertical_and_slash", 1000, 6096, 0.629856288433075], "18": ["vertical_and_slash", 500, 700, 0.9569997191429138], "19": ["vertical_and_slash", 1000, 6096, 0.9538705348968506], "20": ["stream_llm", 100, 800, 0.85546875], "21": ["vertical_and_slash", 1000, 6096, 0.8144884705543518], "22": ["vertical_and_slash", 30, 800, 0.95702064037323], "23": ["stream_llm", 100, 800, 0.99609375], "24": ["vertical_and_slash", 1000, 6096, 0.8552843928337097], "25": ["stream_llm", 100, 800, 0.93359375], "26": ["vertical_and_slash", 1000, 6096, 0.8885473012924194], "27": ["vertical_and_slash", 30, 800, 0.9034969210624695], "28": ["vertical_and_slash", 30, 800, 0.8834430575370789], "29": ["stream_llm", 100, 800, 0.59765625], "30": ["stream_llm", 100, 800, 0.98046875], "31": ["vertical_and_slash", 1000, 6096, 0.5801111459732056]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9783773422241211], "1": ["vertical_and_slash", 1000, 6096, 0.9992927312850952], "2": ["vertical_and_slash", 30, 800, 0.9968302845954895], "3": ["vertical_and_slash", 3500, 100, 0.45828360319137573], "4": ["vertical_and_slash", 30, 800, 0.836064875125885], "5": ["vertical_and_slash", 1000, 6096, 0.8009666800498962], "6": ["vertical_and_slash", 3500, 100, 0.6518401503562927], "7": ["vertical_and_slash", 30, 800, 0.9921544790267944], "8": ["vertical_and_slash", 1000, 6096, 0.4855879545211792], "9": ["vertical_and_slash", 1000, 6096, 0.9904646277427673], "10": ["vertical_and_slash", 3500, 100, 0.8973155617713928], "11": ["vertical_and_slash", 1000, 6096, 0.8983845710754395], "12": ["stream_llm", 100, 800, 0.82421875], "13": ["vertical_and_slash", 1000, 6096, 0.8326148390769958], "14": ["vertical_and_slash", 1000, 6096, 0.44982603192329407], "15": ["vertical_and_slash", 30, 800, 0.9292823076248169], "16": ["stream_llm", 100, 800, 0.83203125], "17": ["vertical_and_slash", 500, 700, 0.8943775296211243], "18": ["vertical_and_slash", 3500, 100, 0.8824247121810913], "19": ["vertical_and_slash", 1000, 6096, 0.8916551470756531], "20": ["stream_llm", 100, 800, 0.84765625], "21": ["vertical_and_slash", 1000, 6096, 0.5656689405441284], "22": ["vertical_and_slash", 3500, 100, 0.9858580827713013], "23": ["vertical_and_slash", 3500, 100, 0.6534677743911743], "24": ["vertical_and_slash", 1000, 6096, 0.7796179056167603], "25": ["stream_llm", 100, 800, 0.984375], "26": ["stream_llm", 100, 800, 0.8125], "27": ["vertical_and_slash", 1000, 6096, 0.8051357269287109], "28": ["vertical_and_slash", 1000, 6096, 0.9759415984153748], "29": ["vertical_and_slash", 3500, 100, 0.9613996148109436], "30": ["vertical_and_slash", 30, 800, 0.9861305952072144], "31": ["vertical_and_slash", 1000, 6096, 0.5375377535820007]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9526095390319824], "1": ["vertical_and_slash", 1000, 6096, 0.9219456315040588], "2": ["vertical_and_slash", 1000, 6096, 0.6329025626182556], "3": ["vertical_and_slash", 1000, 6096, 0.9703953862190247], "4": ["vertical_and_slash", 3500, 100, 0.9341285228729248], "5": ["stream_llm", 100, 800, 0.98828125], "6": ["vertical_and_slash", 3500, 100, 0.975139319896698], "7": ["vertical_and_slash", 30, 800, 0.9698626399040222], "8": ["vertical_and_slash", 1000, 6096, 0.8665440082550049], "9": ["vertical_and_slash", 1000, 6096, 0.9887139797210693], "10": ["vertical_and_slash", 1000, 6096, 0.9663894772529602], "11": ["vertical_and_slash", 500, 700, 0.9613908529281616], "12": ["vertical_and_slash", 1000, 6096, 0.9625579118728638], "13": ["vertical_and_slash", 3500, 100, 0.8293338418006897], "14": ["vertical_and_slash", 1000, 6096, 0.9918296933174133], "15": ["vertical_and_slash", 3500, 100, 0.6993081569671631], "16": ["vertical_and_slash", 1000, 6096, 0.7726790904998779], "17": ["vertical_and_slash", 30, 800, 0.9927448034286499], "18": ["vertical_and_slash", 3500, 100, 0.9216746091842651], "19": ["vertical_and_slash", 1000, 6096, 0.9197890758514404], "20": ["vertical_and_slash", 1000, 6096, 0.5418304800987244], "21": ["vertical_and_slash", 3500, 100, 0.7247577905654907], "22": ["vertical_and_slash", 1000, 6096, 0.8909022212028503], "23": ["vertical_and_slash", 3500, 100, 0.6162543892860413], "24": ["vertical_and_slash", 1000, 6096, 0.9798792600631714], "25": ["stream_llm", 100, 800, 0.9921875], "26": ["vertical_and_slash", 1000, 6096, 0.839588463306427], "27": ["stream_llm", 100, 800, 0.921875], "28": ["vertical_and_slash", 1000, 6096, 0.9863616228103638], "29": ["vertical_and_slash", 1000, 6096, 0.9895434975624084], "30": ["vertical_and_slash", 1000, 6096, 0.9338933825492859], "31": ["vertical_and_slash", 1000, 6096, 0.9152888655662537]}, {"0": ["vertical_and_slash", 100, 750, 0.7857484221458435], "1": ["vertical_and_slash", 3500, 100, 0.9863781332969666], "2": ["vertical_and_slash", 3500, 100, 0.9732434153556824], "3": ["vertical_and_slash", 1000, 6096, 0.7411113381385803], "4": ["vertical_and_slash", 1000, 6096, 0.9037321209907532], "5": ["vertical_and_slash", 1000, 6096, 0.7728227376937866], "6": ["vertical_and_slash", 3500, 100, 0.9566982388496399], "7": ["vertical_and_slash", 1000, 6096, 0.8955481648445129], "8": ["vertical_and_slash", 500, 700, 0.8905653357505798], "9": ["vertical_and_slash", 3500, 100, 0.9852890968322754], "10": ["vertical_and_slash", 1000, 6096, 0.5732011795043945], "11": ["vertical_and_slash", 3500, 100, 0.9701256155967712], "12": ["vertical_and_slash", 3500, 100, 0.8983554244041443], "13": ["vertical_and_slash", 100, 750, 0.9726784825325012], "14": ["vertical_and_slash", 3500, 100, 0.6008065938949585], "15": ["vertical_and_slash", 1000, 6096, 0.6582738161087036], "16": ["vertical_and_slash", 3500, 100, 0.9488815665245056], "17": ["vertical_and_slash", 100, 750, 0.9958171844482422], "18": ["vertical_and_slash", 3500, 100, 0.8186895847320557], "19": ["vertical_and_slash", 500, 700, 0.9635193347930908], "20": ["vertical_and_slash", 1000, 6096, 0.9248959422111511], "21": ["vertical_and_slash", 3500, 100, 0.9385164976119995], "22": ["vertical_and_slash", 100, 750, 0.9387568235397339], "23": ["vertical_and_slash", 1000, 6096, 0.8735635876655579], "24": ["vertical_and_slash", 500, 700, 0.890371561050415], "25": ["vertical_and_slash", 100, 750, 0.9905737638473511], "26": ["vertical_and_slash", 3500, 100, 0.946341335773468], "27": ["vertical_and_slash", 3500, 100, 0.942945659160614], "28": ["vertical_and_slash", 100, 750, 0.994683027267456], "29": ["vertical_and_slash", 500, 700, 0.9688966870307922], "30": ["vertical_and_slash", 1000, 6096, 0.9828435778617859], "31": ["vertical_and_slash", 1000, 6096, 0.8722150325775146]}, {"0": ["vertical_and_slash", 500, 700, 0.9728457927703857], "1": ["vertical_and_slash", 100, 750, 0.9586004018783569], "2": ["vertical_and_slash", 3500, 100, 0.9719207882881165], "3": ["vertical_and_slash", 3500, 100, 0.6680086851119995], "4": ["vertical_and_slash", 3500, 100, 0.970458984375], "5": ["vertical_and_slash", 3500, 100, 0.7634486556053162], "6": ["vertical_and_slash", 3500, 100, 0.7259127497673035], "7": ["vertical_and_slash", 100, 750, 0.9781140089035034], "8": ["vertical_and_slash", 3500, 100, 0.9952470064163208], "9": ["vertical_and_slash", 3500, 100, 0.9868772625923157], "10": ["vertical_and_slash", 3500, 100, 0.558458685874939], "11": ["vertical_and_slash", 1000, 6096, 0.7121242880821228], "12": ["vertical_and_slash", 1000, 6096, 0.7061645984649658], "13": ["vertical_and_slash", 3500, 100, 0.923751711845398], "14": ["vertical_and_slash", 1000, 6096, 0.8015576601028442], "15": ["vertical_and_slash", 500, 700, 0.9007270932197571], "16": ["vertical_and_slash", 3500, 100, 0.9591111540794373], "17": ["vertical_and_slash", 500, 700, 0.9750815033912659], "18": ["vertical_and_slash", 100, 750, 0.9805834293365479], "19": ["vertical_and_slash", 3500, 100, 0.8620939254760742], "20": ["vertical_and_slash", 3500, 100, 0.9881291389465332], "21": ["vertical_and_slash", 500, 700, 0.9975225925445557], "22": ["vertical_and_slash", 3500, 100, 0.9125117063522339], "23": ["vertical_and_slash", 3500, 100, 0.8796795010566711], "24": ["vertical_and_slash", 3500, 100, 0.9172841310501099], "25": ["vertical_and_slash", 1000, 6096, 0.8340160846710205], "26": ["vertical_and_slash", 1000, 6096, 0.8479950428009033], "27": ["vertical_and_slash", 3500, 100, 0.9778053164482117], "28": ["vertical_and_slash", 100, 750, 0.9912164211273193], "29": ["vertical_and_slash", 1000, 6096, 0.6634088754653931], "30": ["vertical_and_slash", 3500, 100, 0.9486925601959229], "31": ["vertical_and_slash", 3500, 100, 0.985546350479126]}, {"0": ["vertical_and_slash", 3500, 100, 0.7207826375961304], "1": ["vertical_and_slash", 1000, 6096, 0.7674809098243713], "2": ["vertical_and_slash", 1000, 6096, 0.5480814576148987], "3": ["vertical_and_slash", 3500, 100, 0.974454939365387], "4": ["vertical_and_slash", 100, 750, 0.9901475310325623], "5": ["vertical_and_slash", 3500, 100, 0.9111185073852539], "6": ["vertical_and_slash", 3500, 100, 0.8977652192115784], "7": ["vertical_and_slash", 500, 700, 0.8826637864112854], "8": ["vertical_and_slash", 3500, 100, 0.9674721956253052], "9": ["vertical_and_slash", 500, 700, 0.9511355757713318], "10": ["vertical_and_slash", 3500, 100, 0.9368802309036255], "11": ["vertical_and_slash", 3500, 100, 0.7037530541419983], "12": ["vertical_and_slash", 3500, 100, 0.8404982089996338], "13": ["vertical_and_slash", 3500, 100, 0.9477558732032776], "14": ["vertical_and_slash", 1000, 6096, 0.5408625602722168], "15": ["vertical_and_slash", 1000, 6096, 0.8930901288986206], "16": ["vertical_and_slash", 500, 700, 0.9620649814605713], "17": ["vertical_and_slash", 3500, 100, 0.9665637016296387], "18": ["vertical_and_slash", 3500, 100, 0.9973539710044861], "19": ["vertical_and_slash", 3500, 100, 0.9200847744941711], "20": ["vertical_and_slash", 100, 750, 0.9846996068954468], "21": ["vertical_and_slash", 3500, 100, 0.9522152543067932], "22": ["vertical_and_slash", 3500, 100, 0.9200462102890015], "23": ["vertical_and_slash", 3500, 100, 0.7189115285873413], "24": ["vertical_and_slash", 3500, 100, 0.9400286078453064], "25": ["vertical_and_slash", 3500, 100, 0.9140079617500305], "26": ["vertical_and_slash", 3500, 100, 0.9733141660690308], "27": ["vertical_and_slash", 3500, 100, 0.9182970523834229], "28": ["vertical_and_slash", 500, 700, 0.7845987677574158], "29": ["vertical_and_slash", 500, 700, 0.953305721282959], "30": ["vertical_and_slash", 1000, 6096, 0.9332642555236816], "31": ["vertical_and_slash", 500, 700, 0.8975687026977539]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8796314001083374], "1": ["vertical_and_slash", 3500, 100, 0.9541191458702087], "2": ["vertical_and_slash", 3500, 100, 0.9853596091270447], "3": ["vertical_and_slash", 3500, 100, 0.9959757924079895], "4": ["vertical_and_slash", 500, 700, 0.942274272441864], "5": ["vertical_and_slash", 3500, 100, 0.9958774447441101], "6": ["vertical_and_slash", 3500, 100, 0.762219250202179], "7": ["vertical_and_slash", 3500, 100, 0.9778050780296326], "8": ["vertical_and_slash", 3500, 100, 0.9803900718688965], "9": ["vertical_and_slash", 3500, 100, 0.9493845701217651], "10": ["vertical_and_slash", 100, 750, 0.9833114147186279], "11": ["vertical_and_slash", 3500, 100, 0.9671387076377869], "12": ["vertical_and_slash", 3500, 100, 0.8459083437919617], "13": ["vertical_and_slash", 3500, 100, 0.9625062346458435], "14": ["vertical_and_slash", 3500, 100, 0.9926583766937256], "15": ["vertical_and_slash", 3500, 100, 0.9901418089866638], "16": ["vertical_and_slash", 3500, 100, 0.9975236058235168], "17": ["vertical_and_slash", 3500, 100, 0.8961046934127808], "18": ["vertical_and_slash", 3500, 100, 0.9677743315696716], "19": ["vertical_and_slash", 1000, 6096, 0.7324523329734802], "20": ["vertical_and_slash", 1000, 6096, 0.7565687298774719], "21": ["vertical_and_slash", 3500, 100, 0.9934558272361755], "22": ["vertical_and_slash", 1000, 6096, 0.695542573928833], "23": ["vertical_and_slash", 3500, 100, 0.9594518542289734], "24": ["vertical_and_slash", 3500, 100, 0.9845080375671387], "25": ["vertical_and_slash", 3500, 100, 0.9140312075614929], "26": ["vertical_and_slash", 3500, 100, 0.9816687107086182], "27": ["vertical_and_slash", 3500, 100, 0.9777555465698242], "28": ["vertical_and_slash", 3500, 100, 0.948824405670166], "29": ["vertical_and_slash", 3500, 100, 0.48502659797668457], "30": ["vertical_and_slash", 3500, 100, 0.9340038895606995], "31": ["vertical_and_slash", 3500, 100, 0.9162462949752808]}, {"0": ["vertical_and_slash", 3500, 100, 0.9923238754272461], "1": ["vertical_and_slash", 3500, 100, 0.9678853750228882], "2": ["vertical_and_slash", 100, 750, 0.9968323111534119], "3": ["vertical_and_slash", 500, 700, 0.9936473965644836], "4": ["vertical_and_slash", 3500, 100, 0.9588732123374939], "5": ["vertical_and_slash", 500, 700, 0.9791616797447205], "6": ["vertical_and_slash", 3500, 100, 0.919694721698761], "7": ["vertical_and_slash", 1000, 6096, 0.626932680606842], "8": ["vertical_and_slash", 3500, 100, 0.9546087980270386], "9": ["vertical_and_slash", 500, 700, 0.8930553793907166], "10": ["vertical_and_slash", 100, 750, 0.9767886996269226], "11": ["vertical_and_slash", 1000, 6096, 0.7312592267990112], "12": ["vertical_and_slash", 3500, 100, 0.9913722276687622], "13": ["vertical_and_slash", 3500, 100, 0.9425638914108276], "14": ["vertical_and_slash", 3500, 100, 0.9949523210525513], "15": ["vertical_and_slash", 100, 750, 0.7187187671661377], "16": ["vertical_and_slash", 3500, 100, 0.9734897017478943], "17": ["vertical_and_slash", 3500, 100, 0.9750894904136658], "18": ["vertical_and_slash", 3500, 100, 0.9543801546096802], "19": ["vertical_and_slash", 3500, 100, 0.94287109375], "20": ["vertical_and_slash", 1000, 6096, 0.7409213185310364], "21": ["vertical_and_slash", 3500, 100, 0.9789512753486633], "22": ["vertical_and_slash", 3500, 100, 0.9824472069740295], "23": ["vertical_and_slash", 3500, 100, 0.9614876508712769], "24": ["vertical_and_slash", 500, 700, 0.9097415208816528], "25": ["vertical_and_slash", 3500, 100, 0.7589483857154846], "26": ["vertical_and_slash", 3500, 100, 0.9711624979972839], "27": ["vertical_and_slash", 500, 700, 0.9924762845039368], "28": ["vertical_and_slash", 3500, 100, 0.8917614221572876], "29": ["vertical_and_slash", 500, 700, 0.9802823066711426], "30": ["vertical_and_slash", 3500, 100, 0.9433683156967163], "31": ["vertical_and_slash", 3500, 100, 0.9959222078323364]}, {"0": ["vertical_and_slash", 3500, 100, 0.8028379678726196], "1": ["vertical_and_slash", 3500, 100, 0.9934322237968445], "2": ["vertical_and_slash", 3500, 100, 0.9233330488204956], "3": ["vertical_and_slash", 500, 700, 0.9530222415924072], "4": ["vertical_and_slash", 1000, 6096, 0.7554510831832886], "5": ["vertical_and_slash", 3500, 100, 0.9931245446205139], "6": ["vertical_and_slash", 3500, 100, 0.8175129890441895], "7": ["vertical_and_slash", 500, 700, 0.9769982695579529], "8": ["vertical_and_slash", 3500, 100, 0.7803007364273071], "9": ["vertical_and_slash", 3500, 100, 0.8488234281539917], "10": ["vertical_and_slash", 1000, 6096, 0.7556964159011841], "11": ["vertical_and_slash", 100, 750, 0.9249212145805359], "12": ["vertical_and_slash", 1000, 6096, 0.5030975937843323], "13": ["vertical_and_slash", 3500, 100, 0.7736669778823853], "14": ["vertical_and_slash", 3500, 100, 0.8432313203811646], "15": ["vertical_and_slash", 3500, 100, 0.8078522086143494], "16": ["vertical_and_slash", 1000, 6096, 0.6152622699737549], "17": ["vertical_and_slash", 1000, 6096, 0.4801797866821289], "18": ["vertical_and_slash", 3500, 100, 0.7792356610298157], "19": ["vertical_and_slash", 3500, 100, 0.9260709285736084], "20": ["vertical_and_slash", 3500, 100, 0.9572370052337646], "21": ["vertical_and_slash", 500, 700, 0.9757252335548401], "22": ["vertical_and_slash", 100, 750, 0.9295142889022827], "23": ["vertical_and_slash", 100, 750, 0.8406566381454468], "24": ["vertical_and_slash", 500, 700, 0.9934183955192566], "25": ["vertical_and_slash", 3500, 100, 0.9811476469039917], "26": ["vertical_and_slash", 1000, 6096, 0.43748241662979126], "27": ["vertical_and_slash", 1000, 6096, 0.8173736929893494], "28": ["vertical_and_slash", 1000, 6096, 0.7964892983436584], "29": ["vertical_and_slash", 1000, 6096, 0.5660628080368042], "30": ["vertical_and_slash", 100, 750, 0.8858906626701355], "31": ["vertical_and_slash", 3500, 100, 0.7301779389381409]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8143554925918579], "1": ["vertical_and_slash", 3500, 100, 0.8302785754203796], "2": ["vertical_and_slash", 3500, 100, 0.9859114289283752], "3": ["vertical_and_slash", 3500, 100, 0.6922958493232727], "4": ["vertical_and_slash", 3500, 100, 0.9597254991531372], "5": ["vertical_and_slash", 1000, 6096, 0.8074929714202881], "6": ["vertical_and_slash", 3500, 100, 0.7841739654541016], "7": ["vertical_and_slash", 3500, 100, 0.9443768262863159], "8": ["vertical_and_slash", 3500, 100, 0.9327424764633179], "9": ["vertical_and_slash", 3500, 100, 0.8796824812889099], "10": ["vertical_and_slash", 3500, 100, 0.9468095302581787], "11": ["vertical_and_slash", 3500, 100, 0.9797954559326172], "12": ["vertical_and_slash", 3500, 100, 0.9876496195793152], "13": ["vertical_and_slash", 100, 750, 0.9684455394744873], "14": ["vertical_and_slash", 3500, 100, 0.9720463156700134], "15": ["vertical_and_slash", 3500, 100, 0.9134085774421692], "16": ["vertical_and_slash", 100, 750, 0.9962508678436279], "17": ["vertical_and_slash", 3500, 100, 0.9967661499977112], "18": ["vertical_and_slash", 3500, 100, 0.9218150973320007], "19": ["vertical_and_slash", 3500, 100, 0.9165892601013184], "20": ["vertical_and_slash", 500, 700, 0.9811153411865234], "21": ["vertical_and_slash", 1000, 6096, 0.8401690721511841], "22": ["vertical_and_slash", 100, 750, 0.9827044606208801], "23": ["vertical_and_slash", 500, 700, 0.9265505075454712], "24": ["vertical_and_slash", 3500, 100, 0.8814885020256042], "25": ["vertical_and_slash", 1000, 6096, 0.8774723410606384], "26": ["vertical_and_slash", 1000, 6096, 0.8981026411056519], "27": ["vertical_and_slash", 100, 750, 0.995216429233551], "28": ["vertical_and_slash", 3500, 100, 0.9950628280639648], "29": ["vertical_and_slash", 500, 700, 0.9678530693054199], "30": ["vertical_and_slash", 100, 750, 0.9900303483009338], "31": ["vertical_and_slash", 3500, 100, 0.9148485064506531]}, {"0": ["vertical_and_slash", 3500, 100, 0.7734143137931824], "1": ["vertical_and_slash", 3500, 100, 0.9431662559509277], "2": ["vertical_and_slash", 100, 750, 0.9125087857246399], "3": ["vertical_and_slash", 3500, 100, 0.9382316470146179], "4": ["vertical_and_slash", 1000, 6096, 0.7059416174888611], "5": ["vertical_and_slash", 3500, 100, 0.6978054642677307], "6": ["vertical_and_slash", 3500, 100, 0.9927070140838623], "7": ["vertical_and_slash", 3500, 100, 0.9393529295921326], "8": ["vertical_and_slash", 100, 750, 0.9231113195419312], "9": ["vertical_and_slash", 3500, 100, 0.9985975623130798], "10": ["vertical_and_slash", 500, 700, 0.9555321335792542], "11": ["vertical_and_slash", 3500, 100, 0.9785676002502441], "12": ["vertical_and_slash", 500, 700, 0.9968464374542236], "13": ["vertical_and_slash", 3500, 100, 0.9894333481788635], "14": ["vertical_and_slash", 500, 700, 0.8927757740020752], "15": ["vertical_and_slash", 3500, 100, 0.9463996887207031], "16": ["vertical_and_slash", 3500, 100, 0.9756723642349243], "17": ["vertical_and_slash", 3500, 100, 0.970882773399353], "18": ["vertical_and_slash", 1000, 6096, 0.6809303164482117], "19": ["vertical_and_slash", 3500, 100, 0.9938862919807434], "20": ["vertical_and_slash", 3500, 100, 0.9821802973747253], "21": ["vertical_and_slash", 3500, 100, 0.9383650422096252], "22": ["vertical_and_slash", 3500, 100, 0.8643637299537659], "23": ["vertical_and_slash", 100, 750, 0.9771586656570435], "24": ["vertical_and_slash", 500, 700, 0.976405143737793], "25": ["vertical_and_slash", 3500, 100, 0.9743276238441467], "26": ["vertical_and_slash", 3500, 100, 0.9265220761299133], "27": ["vertical_and_slash", 3500, 100, 0.9841408729553223], "28": ["vertical_and_slash", 500, 700, 0.9391534328460693], "29": ["vertical_and_slash", 3500, 100, 0.9312986135482788], "30": ["vertical_and_slash", 3500, 100, 0.8832992911338806], "31": ["vertical_and_slash", 3500, 100, 0.9811874628067017]}, {"0": ["vertical_and_slash", 3500, 100, 0.9956807494163513], "1": ["vertical_and_slash", 3500, 100, 0.9670407772064209], "2": ["vertical_and_slash", 100, 750, 0.9973832964897156], "3": ["vertical_and_slash", 100, 750, 0.99891597032547], "4": ["vertical_and_slash", 3500, 100, 0.9931758642196655], "5": ["vertical_and_slash", 100, 750, 0.996113121509552], "6": ["vertical_and_slash", 3500, 100, 0.9983065724372864], "7": ["vertical_and_slash", 3500, 100, 0.9833848476409912], "8": ["vertical_and_slash", 3500, 100, 0.9948523640632629], "9": ["vertical_and_slash", 3500, 100, 0.8683006167411804], "10": ["vertical_and_slash", 3500, 100, 0.9931465983390808], "11": ["vertical_and_slash", 100, 750, 0.984261691570282], "12": ["vertical_and_slash", 100, 750, 0.9601353406906128], "13": ["vertical_and_slash", 500, 700, 0.9203216433525085], "14": ["vertical_and_slash", 3500, 100, 0.9650700092315674], "15": ["vertical_and_slash", 100, 750, 0.984341561794281], "16": ["vertical_and_slash", 3500, 100, 0.9989381432533264], "17": ["vertical_and_slash", 1000, 6096, 0.8591818809509277], "18": ["vertical_and_slash", 500, 700, 0.959535539150238], "19": ["vertical_and_slash", 3500, 100, 0.9685975909233093], "20": ["vertical_and_slash", 3500, 100, 0.9992274045944214], "21": ["vertical_and_slash", 3500, 100, 0.9054502248764038], "22": ["vertical_and_slash", 3500, 100, 0.9957486391067505], "23": ["vertical_and_slash", 3500, 100, 0.9970229864120483], "24": ["vertical_and_slash", 3500, 100, 0.933996319770813], "25": ["vertical_and_slash", 3500, 100, 0.9522771239280701], "26": ["vertical_and_slash", 3500, 100, 0.8640444278717041], "27": ["vertical_and_slash", 3500, 100, 0.9864702820777893], "28": ["vertical_and_slash", 1000, 6096, 0.8701584935188293], "29": ["vertical_and_slash", 3500, 100, 0.9872081279754639], "30": ["vertical_and_slash", 3500, 100, 0.9637035727500916], "31": ["vertical_and_slash", 3500, 100, 0.7964584827423096]}, {"0": ["vertical_and_slash", 500, 700, 0.944079577922821], "1": ["vertical_and_slash", 1000, 6096, 0.7686152458190918], "2": ["vertical_and_slash", 3500, 100, 0.9423201680183411], "3": ["vertical_and_slash", 3500, 100, 0.9597930908203125], "4": ["vertical_and_slash", 3500, 100, 0.9981894493103027], "5": ["vertical_and_slash", 100, 750, 0.9951789975166321], "6": ["vertical_and_slash", 3500, 100, 0.9678981304168701], "7": ["vertical_and_slash", 3500, 100, 0.8912110924720764], "8": ["vertical_and_slash", 100, 750, 0.9829361438751221], "9": ["vertical_and_slash", 500, 700, 0.9326693415641785], "10": ["vertical_and_slash", 3500, 100, 0.7954592108726501], "11": ["vertical_and_slash", 3500, 100, 0.9361847639083862], "12": ["vertical_and_slash", 3500, 100, 0.9777213335037231], "13": ["vertical_and_slash", 100, 750, 0.7402770519256592], "14": ["vertical_and_slash", 1000, 6096, 0.8369068503379822], "15": ["vertical_and_slash", 3500, 100, 0.8386251926422119], "16": ["vertical_and_slash", 500, 700, 0.9928125143051147], "17": ["vertical_and_slash", 3500, 100, 0.9980320930480957], "18": ["vertical_and_slash", 100, 750, 0.99200838804245], "19": ["vertical_and_slash", 3500, 100, 0.9937632083892822], "20": ["vertical_and_slash", 1000, 6096, 0.8582853674888611], "21": ["vertical_and_slash", 500, 700, 0.8901017308235168], "22": ["vertical_and_slash", 3500, 100, 0.9825611710548401], "23": ["vertical_and_slash", 3500, 100, 0.9956728219985962], "24": ["vertical_and_slash", 3500, 100, 0.992565929889679], "25": ["vertical_and_slash", 3500, 100, 0.9841880202293396], "26": ["vertical_and_slash", 1000, 6096, 0.8873481750488281], "27": ["vertical_and_slash", 100, 750, 0.9767672419548035], "28": ["vertical_and_slash", 3500, 100, 0.9931612610816956], "29": ["vertical_and_slash", 3500, 100, 0.9209384918212891], "30": ["vertical_and_slash", 100, 750, 0.7578334212303162], "31": ["vertical_and_slash", 3500, 100, 0.9578611850738525]}, {"0": ["vertical_and_slash", 100, 750, 0.9389412999153137], "1": ["vertical_and_slash", 100, 750, 0.9428157210350037], "2": ["vertical_and_slash", 3500, 100, 0.9956400990486145], "3": ["vertical_and_slash", 100, 750, 0.9144065976142883], "4": ["vertical_and_slash", 1000, 6096, 0.8475824594497681], "5": ["vertical_and_slash", 100, 750, 0.996335506439209], "6": ["vertical_and_slash", 3500, 100, 0.9988783597946167], "7": ["vertical_and_slash", 3500, 100, 0.94597989320755], "8": ["vertical_and_slash", 3500, 100, 0.9713111519813538], "9": ["vertical_and_slash", 100, 750, 0.9670871496200562], "10": ["vertical_and_slash", 3500, 100, 0.9996585249900818], "11": ["vertical_and_slash", 3500, 100, 0.9820530414581299], "12": ["vertical_and_slash", 3500, 100, 0.9983968138694763], "13": ["vertical_and_slash", 3500, 100, 0.9315072298049927], "14": ["vertical_and_slash", 3500, 100, 0.9930176138877869], "15": ["vertical_and_slash", 500, 700, 0.9945250749588013], "16": ["vertical_and_slash", 100, 750, 0.9049948453903198], "17": ["vertical_and_slash", 3500, 100, 0.9992651343345642], "18": ["vertical_and_slash", 500, 700, 0.9942126274108887], "19": ["vertical_and_slash", 500, 700, 0.9891477227210999], "20": ["vertical_and_slash", 3500, 100, 0.9028084874153137], "21": ["vertical_and_slash", 100, 750, 0.9475080370903015], "22": ["vertical_and_slash", 500, 700, 0.9690455794334412], "23": ["vertical_and_slash", 3500, 100, 0.9446419477462769], "24": ["vertical_and_slash", 3500, 100, 0.9801247715950012], "25": ["vertical_and_slash", 100, 750, 0.9777910113334656], "26": ["vertical_and_slash", 3500, 100, 0.7017547488212585], "27": ["vertical_and_slash", 3500, 100, 0.9493237137794495], "28": ["vertical_and_slash", 100, 750, 0.9993017315864563], "29": ["vertical_and_slash", 3500, 100, 0.893531858921051], "30": ["vertical_and_slash", 3500, 100, 0.9467594623565674], "31": ["vertical_and_slash", 3500, 100, 0.9743610620498657]}, {"0": ["vertical_and_slash", 3500, 100, 0.985114574432373], "1": ["vertical_and_slash", 500, 700, 0.9950987696647644], "2": ["vertical_and_slash", 3500, 100, 0.7027000784873962], "3": ["vertical_and_slash", 3500, 100, 0.9855831265449524], "4": ["vertical_and_slash", 3500, 100, 0.9874288439750671], "5": ["vertical_and_slash", 1000, 6096, 0.7125917673110962], "6": ["vertical_and_slash", 3500, 100, 0.9454708695411682], "7": ["vertical_and_slash", 3500, 100, 0.9898356199264526], "8": ["vertical_and_slash", 3500, 100, 0.9445544481277466], "9": ["vertical_and_slash", 3500, 100, 0.988140344619751], "10": ["vertical_and_slash", 500, 700, 0.981208860874176], "11": ["vertical_and_slash", 500, 700, 0.9874861836433411], "12": ["vertical_and_slash", 3500, 100, 0.9963038563728333], "13": ["vertical_and_slash", 100, 750, 0.9972052574157715], "14": ["vertical_and_slash", 3500, 100, 0.9943816065788269], "15": ["vertical_and_slash", 100, 750, 0.8364889025688171], "16": ["vertical_and_slash", 100, 750, 0.9870871901512146], "17": ["vertical_and_slash", 100, 750, 0.998099684715271], "18": ["vertical_and_slash", 3500, 100, 0.8674955368041992], "19": ["vertical_and_slash", 500, 700, 0.9969808459281921], "20": ["vertical_and_slash", 3500, 100, 0.8848986625671387], "21": ["vertical_and_slash", 1000, 6096, 0.867315411567688], "22": ["vertical_and_slash", 500, 700, 0.9908551573753357], "23": ["vertical_and_slash", 100, 750, 0.8952099680900574], "24": ["vertical_and_slash", 500, 700, 0.9714990854263306], "25": ["vertical_and_slash", 100, 750, 0.8733819723129272], "26": ["vertical_and_slash", 3500, 100, 0.9205271005630493], "27": ["vertical_and_slash", 3500, 100, 0.9833540916442871], "28": ["vertical_and_slash", 3500, 100, 0.9445760846138], "29": ["vertical_and_slash", 3500, 100, 0.9536135792732239], "30": ["vertical_and_slash", 500, 700, 0.9753504991531372], "31": ["vertical_and_slash", 1000, 6096, 0.8801259398460388]}, {"0": ["vertical_and_slash", 3500, 100, 0.9614631533622742], "1": ["vertical_and_slash", 3500, 100, 0.9763227105140686], "2": ["vertical_and_slash", 100, 750, 0.970956563949585], "3": ["vertical_and_slash", 100, 750, 0.9151788949966431], "4": ["vertical_and_slash", 3500, 100, 0.9920399188995361], "5": ["vertical_and_slash", 3500, 100, 0.9422896504402161], "6": ["vertical_and_slash", 3500, 100, 0.986482560634613], "7": ["vertical_and_slash", 3500, 100, 0.9976206421852112], "8": ["vertical_and_slash", 100, 750, 0.9943424463272095], "9": ["vertical_and_slash", 3500, 100, 0.9936824440956116], "10": ["vertical_and_slash", 3500, 100, 0.9882729649543762], "11": ["vertical_and_slash", 100, 750, 0.9862287640571594], "12": ["vertical_and_slash", 500, 700, 0.9886087775230408], "13": ["vertical_and_slash", 3500, 100, 0.9989089369773865], "14": ["vertical_and_slash", 3500, 100, 0.9651134610176086], "15": ["vertical_and_slash", 3500, 100, 0.9826948046684265], "16": ["vertical_and_slash", 3500, 100, 0.9450136423110962], "17": ["vertical_and_slash", 3500, 100, 0.9979375004768372], "18": ["vertical_and_slash", 3500, 100, 0.9520789384841919], "19": ["vertical_and_slash", 3500, 100, 0.9316532015800476], "20": ["vertical_and_slash", 100, 750, 0.9904720187187195], "21": ["vertical_and_slash", 3500, 100, 0.999125599861145], "22": ["vertical_and_slash", 3500, 100, 0.9995089769363403], "23": ["vertical_and_slash", 100, 750, 0.9886007308959961], "24": ["vertical_and_slash", 3500, 100, 0.9961583018302917], "25": ["vertical_and_slash", 3500, 100, 0.9961526393890381], "26": ["vertical_and_slash", 3500, 100, 0.9557645916938782], "27": ["vertical_and_slash", 3500, 100, 0.8775650262832642], "28": ["vertical_and_slash", 3500, 100, 0.986892580986023], "29": ["vertical_and_slash", 3500, 100, 0.9749740958213806], "30": ["vertical_and_slash", 3500, 100, 0.8765645027160645], "31": ["vertical_and_slash", 3500, 100, 0.9494763016700745]}, {"0": ["vertical_and_slash", 3500, 100, 0.9797922372817993], "1": ["vertical_and_slash", 3500, 100, 0.9958779811859131], "2": ["vertical_and_slash", 3500, 100, 0.9976977705955505], "3": ["vertical_and_slash", 3500, 100, 0.9764806628227234], "4": ["vertical_and_slash", 3500, 100, 0.9868356585502625], "5": ["vertical_and_slash", 1000, 6096, 0.8740545511245728], "6": ["vertical_and_slash", 3500, 100, 0.9939981698989868], "7": ["vertical_and_slash", 1000, 6096, 0.7613811492919922], "8": ["vertical_and_slash", 3500, 100, 0.9811347723007202], "9": ["vertical_and_slash", 3500, 100, 0.9840614795684814], "10": ["vertical_and_slash", 1000, 6096, 0.8657892346382141], "11": ["vertical_and_slash", 3500, 100, 0.9502456188201904], "12": ["vertical_and_slash", 100, 750, 0.9104490280151367], "13": ["vertical_and_slash", 3500, 100, 0.9950721263885498], "14": ["vertical_and_slash", 3500, 100, 0.9724959135055542], "15": ["vertical_and_slash", 1000, 6096, 0.8955191373825073], "16": ["vertical_and_slash", 3500, 100, 0.9936071038246155], "17": ["vertical_and_slash", 3500, 100, 0.9285928606987], "18": ["vertical_and_slash", 3500, 100, 0.756338357925415], "19": ["vertical_and_slash", 3500, 100, 0.9665532112121582], "20": ["vertical_and_slash", 100, 750, 0.9970663785934448], "21": ["vertical_and_slash", 3500, 100, 0.9806201457977295], "22": ["vertical_and_slash", 1000, 6096, 0.8115424513816833], "23": ["vertical_and_slash", 1000, 6096, 0.8631585836410522], "24": ["vertical_and_slash", 3500, 100, 0.9782901406288147], "25": ["vertical_and_slash", 3500, 100, 0.9858242273330688], "26": ["vertical_and_slash", 3500, 100, 0.9617720246315002], "27": ["vertical_and_slash", 3500, 100, 0.997412919998169], "28": ["vertical_and_slash", 3500, 100, 0.8432300090789795], "29": ["vertical_and_slash", 500, 700, 0.9955722093582153], "30": ["vertical_and_slash", 3500, 100, 0.9938695430755615], "31": ["vertical_and_slash", 3500, 100, 0.9511440396308899]}, {"0": ["vertical_and_slash", 3500, 100, 0.988155722618103], "1": ["vertical_and_slash", 3500, 100, 0.9747615456581116], "2": ["vertical_and_slash", 100, 750, 0.9718871712684631], "3": ["vertical_and_slash", 100, 750, 0.9756971597671509], "4": ["vertical_and_slash", 3500, 100, 0.947630763053894], "5": ["vertical_and_slash", 100, 750, 0.99262934923172], "6": ["vertical_and_slash", 3500, 100, 0.9955495595932007], "7": ["vertical_and_slash", 3500, 100, 0.8609271049499512], "8": ["vertical_and_slash", 3500, 100, 0.974815845489502], "9": ["vertical_and_slash", 3500, 100, 0.9884821772575378], "10": ["vertical_and_slash", 3500, 100, 0.9901348352432251], "11": ["vertical_and_slash", 100, 750, 0.9968274831771851], "12": ["vertical_and_slash", 3500, 100, 0.9918603897094727], "13": ["vertical_and_slash", 500, 700, 0.9757610559463501], "14": ["vertical_and_slash", 3500, 100, 0.9900703430175781], "15": ["vertical_and_slash", 500, 700, 0.9938023090362549], "16": ["vertical_and_slash", 1000, 6096, 0.8913345336914062], "17": ["vertical_and_slash", 500, 700, 0.9903258681297302], "18": ["vertical_and_slash", 100, 750, 0.9566823244094849], "19": ["vertical_and_slash", 100, 750, 0.9777167439460754], "20": ["vertical_and_slash", 3500, 100, 0.9674810767173767], "21": ["vertical_and_slash", 100, 750, 0.9178389310836792], "22": ["vertical_and_slash", 100, 750, 0.9882655143737793], "23": ["vertical_and_slash", 100, 750, 0.9989043474197388], "24": ["vertical_and_slash", 1000, 6096, 0.8574219942092896], "25": ["vertical_and_slash", 3500, 100, 0.9944363236427307], "26": ["vertical_and_slash", 3500, 100, 0.9970851540565491], "27": ["vertical_and_slash", 500, 700, 0.9904334545135498], "28": ["vertical_and_slash", 3500, 100, 0.9851230978965759], "29": ["vertical_and_slash", 3500, 100, 0.9900650978088379], "30": ["vertical_and_slash", 3500, 100, 0.9743545055389404], "31": ["vertical_and_slash", 500, 700, 0.9190711975097656]}, {"0": ["vertical_and_slash", 100, 750, 0.9716458320617676], "1": ["vertical_and_slash", 3500, 100, 0.9384027719497681], "2": ["vertical_and_slash", 3500, 100, 0.9696847796440125], "3": ["vertical_and_slash", 3500, 100, 0.9812428951263428], "4": ["vertical_and_slash", 1000, 6096, 0.5853931903839111], "5": ["vertical_and_slash", 3500, 100, 0.7994469404220581], "6": ["vertical_and_slash", 3500, 100, 0.9933062791824341], "7": ["vertical_and_slash", 3500, 100, 0.986369788646698], "8": ["vertical_and_slash", 3500, 100, 0.8895794153213501], "9": ["vertical_and_slash", 1000, 6096, 0.8238524794578552], "10": ["vertical_and_slash", 500, 700, 0.93126380443573], "11": ["vertical_and_slash", 3500, 100, 0.962100088596344], "12": ["vertical_and_slash", 3500, 100, 0.8438158631324768], "13": ["vertical_and_slash", 500, 700, 0.9969620108604431], "14": ["vertical_and_slash", 1000, 6096, 0.8904788494110107], "15": ["vertical_and_slash", 100, 750, 0.9925360679626465], "16": ["vertical_and_slash", 3500, 100, 0.9222993850708008], "17": ["vertical_and_slash", 1000, 6096, 0.6627880334854126], "18": ["vertical_and_slash", 1000, 6096, 0.8668970465660095], "19": ["vertical_and_slash", 3500, 100, 0.9340634346008301], "20": ["vertical_and_slash", 3500, 100, 0.9503065347671509], "21": ["vertical_and_slash", 3500, 100, 0.9436649680137634], "22": ["vertical_and_slash", 3500, 100, 0.9768727421760559], "23": ["vertical_and_slash", 100, 750, 0.988473653793335], "24": ["vertical_and_slash", 3500, 100, 0.8777113556861877], "25": ["vertical_and_slash", 3500, 100, 0.8750200271606445], "26": ["vertical_and_slash", 1000, 6096, 0.4957360625267029], "27": ["vertical_and_slash", 3500, 100, 0.9804278016090393], "28": ["vertical_and_slash", 1000, 6096, 0.8486401438713074], "29": ["vertical_and_slash", 3500, 100, 0.8954175114631653], "30": ["vertical_and_slash", 3500, 100, 0.9651874899864197], "31": ["vertical_and_slash", 3500, 100, 0.9620938301086426]}, {"0": ["vertical_and_slash", 100, 750, 0.920842707157135], "1": ["vertical_and_slash", 3500, 100, 0.7215947508811951], "2": ["vertical_and_slash", 3500, 100, 0.9858340620994568], "3": ["vertical_and_slash", 3500, 100, 0.7861597537994385], "4": ["vertical_and_slash", 3500, 100, 0.7639158964157104], "5": ["vertical_and_slash", 3500, 100, 0.887671947479248], "6": ["vertical_and_slash", 3500, 100, 0.8891316652297974], "7": ["vertical_and_slash", 1000, 6096, 0.8906923532485962], "8": ["vertical_and_slash", 3500, 100, 0.8836961984634399], "9": ["vertical_and_slash", 3500, 100, 0.7728190422058105], "10": ["vertical_and_slash", 3500, 100, 0.9507467746734619], "11": ["vertical_and_slash", 500, 700, 0.7829118967056274], "12": ["vertical_and_slash", 100, 750, 0.8214483857154846], "13": ["vertical_and_slash", 3500, 100, 0.7196475863456726], "14": ["vertical_and_slash", 500, 700, 0.8691932559013367], "15": ["vertical_and_slash", 1000, 6096, 0.6569814085960388], "16": ["vertical_and_slash", 100, 750, 0.9087151288986206], "17": ["vertical_and_slash", 3500, 100, 0.7609643936157227], "18": ["vertical_and_slash", 3500, 100, 0.8670530319213867], "19": ["vertical_and_slash", 1000, 6096, 0.7779831290245056], "20": ["vertical_and_slash", 100, 750, 0.923963725566864], "21": ["vertical_and_slash", 1000, 6096, 0.5714190006256104], "22": ["vertical_and_slash", 500, 700, 0.6351447105407715], "23": ["vertical_and_slash", 100, 750, 0.870464026927948], "24": ["vertical_and_slash", 1000, 6096, 0.6272542476654053], "25": ["vertical_and_slash", 1000, 6096, 0.7302500009536743], "26": ["vertical_and_slash", 3500, 100, 0.9410015940666199], "27": ["vertical_and_slash", 3500, 100, 0.793304979801178], "28": ["vertical_and_slash", 1000, 6096, 0.837500274181366], "29": ["vertical_and_slash", 1000, 6096, 0.766721248626709], "30": ["vertical_and_slash", 1000, 6096, 0.7082650065422058], "31": ["vertical_and_slash", 3500, 100, 0.8947907090187073]}, {"0": ["vertical_and_slash", 100, 750, 0.8983681797981262], "1": ["vertical_and_slash", 1000, 6096, 0.9650430083274841], "2": ["vertical_and_slash", 500, 700, 0.9532706141471863], "3": ["vertical_and_slash", 3500, 100, 0.8198072910308838], "4": ["vertical_and_slash", 1000, 6096, 0.840558648109436], "5": ["vertical_and_slash", 3500, 100, 0.8227204084396362], "6": ["vertical_and_slash", 1000, 6096, 0.5979130268096924], "7": ["vertical_and_slash", 1000, 6096, 0.7691975235939026], "8": ["vertical_and_slash", 1000, 6096, 0.8089779615402222], "9": ["vertical_and_slash", 100, 750, 0.8689324855804443], "10": ["vertical_and_slash", 100, 750, 0.8621079325675964], "11": ["vertical_and_slash", 500, 700, 0.9871177673339844], "12": ["vertical_and_slash", 1000, 6096, 0.9468575716018677], "13": ["vertical_and_slash", 100, 750, 0.9075571894645691], "14": ["vertical_and_slash", 1000, 6096, 0.911694347858429], "15": ["vertical_and_slash", 100, 750, 0.9817390441894531], "16": ["vertical_and_slash", 1000, 6096, 0.7491167783737183], "17": ["vertical_and_slash", 1000, 6096, 0.8255623579025269], "18": ["vertical_and_slash", 1000, 6096, 0.8701649308204651], "19": ["vertical_and_slash", 3500, 100, 0.838506817817688], "20": ["vertical_and_slash", 1000, 6096, 0.8749529123306274], "21": ["vertical_and_slash", 500, 700, 0.8783859610557556], "22": ["vertical_and_slash", 3500, 100, 0.9302544593811035], "23": ["vertical_and_slash", 100, 750, 0.9118035435676575], "24": ["vertical_and_slash", 1000, 6096, 0.7892093658447266], "25": ["vertical_and_slash", 100, 750, 0.904501736164093], "26": ["vertical_and_slash", 3500, 100, 0.947079598903656], "27": ["vertical_and_slash", 1000, 6096, 0.5719630718231201], "28": ["vertical_and_slash", 3500, 100, 0.9740545153617859], "29": ["vertical_and_slash", 100, 750, 0.8365178108215332], "30": ["vertical_and_slash", 3500, 100, 0.8893513083457947], "31": ["vertical_and_slash", 1000, 6096, 0.923209547996521]}]
diff --git a/minference/configs/Qwen2_7B_Instruct_128k_instruct_kv_out_v32_fit_o_best_pattern.json b/minference/configs/Qwen2_7B_Instruct_128k_instruct_kv_out_v32_fit_o_best_pattern.json
new file mode 100644
index 0000000..fd217f3
--- /dev/null
+++ b/minference/configs/Qwen2_7B_Instruct_128k_instruct_kv_out_v32_fit_o_best_pattern.json
@@ -0,0 +1 @@
+[{"0": ["vertical_and_slash", 1000, 6096, 0.7636861205101013], "1": ["vertical_and_slash", 1000, 6096, 0.997340738773346], "2": ["vertical_and_slash", 1000, 6096, 0.9176302552223206], "3": ["vertical_and_slash", 1000, 6096, 0.9874908924102783], "4": ["vertical_and_slash", 1000, 6096, 0.8024926781654358], "5": ["vertical_and_slash", 1000, 6096, 0.998029351234436], "6": ["vertical_and_slash", 1000, 6096, 0.9974402189254761], "7": ["vertical_and_slash", 1000, 6096, 0.9935320019721985], "8": ["vertical_and_slash", 1000, 6096, 0.9520949721336365], "9": ["vertical_and_slash", 1000, 6096, 0.9167510271072388], "10": ["vertical_and_slash", 1000, 6096, 0.9705587029457092], "11": ["vertical_and_slash", 1000, 6096, 0.9429554343223572], "12": ["vertical_and_slash", 1000, 6096, 0.9329395890235901], "13": ["vertical_and_slash", 1000, 6096, 0.9059574604034424], "14": ["vertical_and_slash", 1000, 6096, 0.6389032602310181], "15": ["vertical_and_slash", 1000, 6096, 0.8840840458869934], "16": ["vertical_and_slash", 1000, 6096, 0.9924368262290955], "17": ["vertical_and_slash", 1000, 6096, 0.866145670413971], "18": ["vertical_and_slash", 1000, 6096, 0.7799681425094604], "19": ["vertical_and_slash", 1000, 6096, 0.9277905821800232], "20": ["vertical_and_slash", 1000, 6096, 0.9794695377349854], "21": ["vertical_and_slash", 1000, 6096, 0.9832764267921448], "22": ["vertical_and_slash", 1000, 6096, 0.9140866994857788], "23": ["vertical_and_slash", 1000, 6096, 0.994279146194458], "24": ["vertical_and_slash", 1000, 6096, 0.9952699542045593], "25": ["vertical_and_slash", 1000, 6096, 0.991451621055603], "26": ["vertical_and_slash", 1000, 6096, 0.9946803450584412], "27": ["vertical_and_slash", 1000, 6096, 0.9914543628692627]}, {"0": ["vertical_and_slash", 1000, 6096, 0.999854326248169], "1": ["vertical_and_slash", 3500, 100, 0.7751321792602539], "2": ["vertical_and_slash", 1000, 6096, 0.9999450445175171], "3": ["vertical_and_slash", 1000, 6096, 0.9999180436134338], "4": ["vertical_and_slash", 1000, 6096, 0.999673068523407], "5": ["vertical_and_slash", 1000, 6096, 0.9999436140060425], "6": ["vertical_and_slash", 1000, 6096, 0.9042648673057556], "7": ["vertical_and_slash", 1000, 6096, 0.9249372482299805], "8": ["vertical_and_slash", 1000, 6096, 0.9189217686653137], "9": ["vertical_and_slash", 1000, 6096, 0.5797387361526489], "10": ["vertical_and_slash", 1000, 6096, 0.9757004380226135], "11": ["vertical_and_slash", 1000, 6096, 0.9479057192802429], "12": ["vertical_and_slash", 1000, 6096, 0.9882757663726807], "13": ["vertical_and_slash", 1000, 6096, 0.7872554659843445], "14": ["vertical_and_slash", 1000, 6096, 0.9618582725524902], "15": ["vertical_and_slash", 1000, 6096, 0.9933269023895264], "16": ["vertical_and_slash", 1000, 6096, 0.7769213914871216], "17": ["vertical_and_slash", 1000, 6096, 0.9982901811599731], "18": ["vertical_and_slash", 1000, 6096, 0.8727825880050659], "19": ["vertical_and_slash", 1000, 6096, 0.9111629128456116], "20": ["vertical_and_slash", 1000, 6096, 0.9820850491523743], "21": ["vertical_and_slash", 1000, 6096, 0.6591984033584595], "22": ["vertical_and_slash", 1000, 6096, 0.7593039274215698], "23": ["vertical_and_slash", 1000, 6096, 0.9832978844642639], "24": ["vertical_and_slash", 1000, 6096, 0.9962671995162964], "25": ["vertical_and_slash", 1000, 6096, 0.8571489453315735], "26": ["vertical_and_slash", 1000, 6096, 0.6629067063331604], "27": ["vertical_and_slash", 1000, 6096, 0.9742580652236938]}, {"0": ["vertical_and_slash", 3500, 100, 0.9057124853134155], "1": ["vertical_and_slash", 3500, 100, 0.8475332260131836], "2": ["vertical_and_slash", 3500, 100, 0.8966692090034485], "3": ["vertical_and_slash", 3500, 100, 0.9060406684875488], "4": ["vertical_and_slash", 1000, 6096, 0.7373839020729065], "5": ["vertical_and_slash", 3500, 100, 0.8780195713043213], "6": ["vertical_and_slash", 1000, 6096, 0.8604955077171326], "7": ["vertical_and_slash", 1000, 6096, 0.9269763827323914], "8": ["vertical_and_slash", 1000, 6096, 0.8521820306777954], "9": ["vertical_and_slash", 1000, 6096, 0.9201670289039612], "10": ["vertical_and_slash", 1000, 6096, 0.91225266456604], "11": ["vertical_and_slash", 1000, 6096, 0.9308260679244995], "12": ["vertical_and_slash", 1000, 6096, 0.7598862648010254], "13": ["vertical_and_slash", 1000, 6096, 0.8002303838729858], "14": ["vertical_and_slash", 1000, 6096, 0.6208370327949524], "15": ["vertical_and_slash", 1000, 6096, 0.9072885513305664], "16": ["vertical_and_slash", 1000, 6096, 0.7804597616195679], "17": ["vertical_and_slash", 1000, 6096, 0.7186298370361328], "18": ["vertical_and_slash", 1000, 6096, 0.6658867001533508], "19": ["vertical_and_slash", 1000, 6096, 0.7395877242088318], "20": ["vertical_and_slash", 1000, 6096, 0.7290464043617249], "21": ["vertical_and_slash", 1000, 6096, 0.8678046464920044], "22": ["vertical_and_slash", 1000, 6096, 0.6959983110427856], "23": ["vertical_and_slash", 1000, 6096, 0.9478163123130798], "24": ["vertical_and_slash", 1000, 6096, 0.827946126461029], "25": ["vertical_and_slash", 1000, 6096, 0.9121019840240479], "26": ["vertical_and_slash", 1000, 6096, 0.7985134124755859], "27": ["vertical_and_slash", 1000, 6096, 0.9487085938453674]}, {"0": ["vertical_and_slash", 1000, 6096, 0.7612978219985962], "1": ["vertical_and_slash", 1000, 6096, 0.992921769618988], "2": ["vertical_and_slash", 1000, 6096, 0.9342727661132812], "3": ["vertical_and_slash", 1000, 6096, 0.9877188801765442], "4": ["vertical_and_slash", 1000, 6096, 0.9346758127212524], "5": ["vertical_and_slash", 1000, 6096, 0.966949999332428], "6": ["vertical_and_slash", 1000, 6096, 0.9568571448326111], "7": ["vertical_and_slash", 1000, 6096, 0.9982320666313171], "8": ["vertical_and_slash", 1000, 6096, 0.5712432265281677], "9": ["vertical_and_slash", 1000, 6096, 0.989597737789154], "10": ["vertical_and_slash", 1000, 6096, 0.9983510971069336], "11": ["vertical_and_slash", 1000, 6096, 0.9680652022361755], "12": ["vertical_and_slash", 1000, 6096, 0.9642292857170105], "13": ["vertical_and_slash", 1000, 6096, 0.9425500631332397], "14": ["vertical_and_slash", 1000, 6096, 0.8967985510826111], "15": ["vertical_and_slash", 1000, 6096, 0.9649041295051575], "16": ["vertical_and_slash", 1000, 6096, 0.6667925715446472], "17": ["vertical_and_slash", 1000, 6096, 0.8330606818199158], "18": ["vertical_and_slash", 1000, 6096, 0.6516444683074951], "19": ["vertical_and_slash", 1000, 6096, 0.8706280589103699], "20": ["vertical_and_slash", 1000, 6096, 0.9583560824394226], "21": ["vertical_and_slash", 1000, 6096, 0.8796348571777344], "22": ["vertical_and_slash", 1000, 6096, 0.9694448709487915], "23": ["vertical_and_slash", 1000, 6096, 0.944229006767273], "24": ["vertical_and_slash", 1000, 6096, 0.9868544936180115], "25": ["vertical_and_slash", 1000, 6096, 0.9819129705429077], "26": ["vertical_and_slash", 1000, 6096, 0.9697961211204529], "27": ["vertical_and_slash", 1000, 6096, 0.9816914796829224]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9355640411376953], "1": ["vertical_and_slash", 1000, 6096, 0.9207140803337097], "2": ["vertical_and_slash", 1000, 6096, 0.9829172492027283], "3": ["vertical_and_slash", 1000, 6096, 0.9652756452560425], "4": ["vertical_and_slash", 1000, 6096, 0.9489044547080994], "5": ["vertical_and_slash", 1000, 6096, 0.9721469283103943], "6": ["vertical_and_slash", 1000, 6096, 0.8830953240394592], "7": ["vertical_and_slash", 1000, 6096, 0.9949114918708801], "8": ["vertical_and_slash", 1000, 6096, 0.9481140375137329], "9": ["vertical_and_slash", 1000, 6096, 0.9766684174537659], "10": ["vertical_and_slash", 1000, 6096, 0.8484242558479309], "11": ["vertical_and_slash", 1000, 6096, 0.9932587742805481], "12": ["vertical_and_slash", 100, 800, 1.0], "13": ["vertical_and_slash", 1000, 6096, 0.91309654712677], "14": ["vertical_and_slash", 30, 800, 0.9793747067451477], "15": ["vertical_and_slash", 30, 800, 0.9434259533882141], "16": ["vertical_and_slash", 100, 800, 0.91796875], "17": ["vertical_and_slash", 1000, 6096, 0.6044968962669373], "18": ["vertical_and_slash", 30, 800, 0.9889233112335205], "19": ["vertical_and_slash", 30, 800, 0.9760442972183228], "20": ["vertical_and_slash", 30, 800, 0.9819528460502625], "21": ["vertical_and_slash", 30, 800, 0.9104559421539307], "22": ["vertical_and_slash", 30, 800, 0.9781556129455566], "23": ["vertical_and_slash", 30, 800, 0.9957396984100342], "24": ["vertical_and_slash", 100, 800, 0.6953125], "25": ["vertical_and_slash", 30, 800, 0.9875676035881042], "26": ["vertical_and_slash", 1000, 6096, 0.7930969595909119], "27": ["vertical_and_slash", 30, 800, 0.9832236766815186]}, {"0": ["vertical_and_slash", 30, 800, 0.9581724405288696], "1": ["vertical_and_slash", 30, 800, 0.9862149357795715], "2": ["vertical_and_slash", 30, 800, 0.9727815985679626], "3": ["vertical_and_slash", 30, 800, 0.9878312945365906], "4": ["vertical_and_slash", 30, 800, 0.9732390642166138], "5": ["vertical_and_slash", 100, 800, 0.58203125], "6": ["vertical_and_slash", 100, 800, 0.625], "7": ["vertical_and_slash", 1000, 6096, 0.9923975467681885], "8": ["vertical_and_slash", 1000, 6096, 0.9901614785194397], "9": ["vertical_and_slash", 1000, 6096, 0.8785492181777954], "10": ["vertical_and_slash", 1000, 6096, 0.9548015594482422], "11": ["vertical_and_slash", 1000, 6096, 0.7860113978385925], "12": ["vertical_and_slash", 1000, 6096, 0.975680410861969], "13": ["vertical_and_slash", 1000, 6096, 0.9158719182014465], "14": ["vertical_and_slash", 1000, 6096, 0.988776683807373], "15": ["vertical_and_slash", 1000, 6096, 0.9432694315910339], "16": ["vertical_and_slash", 1000, 6096, 0.982463538646698], "17": ["vertical_and_slash", 1000, 6096, 0.6926687359809875], "18": ["vertical_and_slash", 1000, 6096, 0.7094787359237671], "19": ["vertical_and_slash", 1000, 6096, 0.7520464658737183], "20": ["vertical_and_slash", 1000, 6096, 0.7342078685760498], "21": ["vertical_and_slash", 1000, 6096, 0.5834848880767822], "22": ["vertical_and_slash", 30, 800, 0.9824560284614563], "23": ["vertical_and_slash", 30, 800, 0.986758291721344], "24": ["vertical_and_slash", 30, 800, 0.9540517330169678], "25": ["vertical_and_slash", 30, 800, 0.9864577651023865], "26": ["vertical_and_slash", 30, 800, 0.9416182637214661], "27": ["vertical_and_slash", 30, 800, 0.9522050023078918]}, {"0": ["vertical_and_slash", 30, 800, 0.9929113984107971], "1": ["vertical_and_slash", 30, 800, 0.9960675835609436], "2": ["vertical_and_slash", 30, 800, 0.9982830882072449], "3": ["vertical_and_slash", 30, 800, 0.9949108958244324], "4": ["vertical_and_slash", 30, 800, 0.9948554635047913], "5": ["vertical_and_slash", 30, 800, 0.9805064797401428], "6": ["vertical_and_slash", 30, 800, 0.9989805221557617], "7": ["vertical_and_slash", 3500, 100, 0.8750844597816467], "8": ["vertical_and_slash", 1000, 6096, 0.8880671858787537], "9": ["vertical_and_slash", 1000, 6096, 0.8828122019767761], "10": ["vertical_and_slash", 1000, 6096, 0.8179347515106201], "11": ["vertical_and_slash", 1000, 6096, 0.8709172606468201], "12": ["vertical_and_slash", 1000, 6096, 0.9106304049491882], "13": ["vertical_and_slash", 3500, 100, 0.8999070525169373], "14": ["vertical_and_slash", 30, 800, 0.9866299629211426], "15": ["vertical_and_slash", 30, 800, 0.9957811236381531], "16": ["vertical_and_slash", 30, 800, 0.9961952567100525], "17": ["vertical_and_slash", 1000, 6096, 0.7598972320556641], "18": ["vertical_and_slash", 100, 800, 0.90234375], "19": ["vertical_and_slash", 30, 800, 0.9915949106216431], "20": ["vertical_and_slash", 30, 800, 0.9958064556121826], "21": ["vertical_and_slash", 30, 800, 0.9862909913063049], "22": ["vertical_and_slash", 1000, 6096, 0.9836251139640808], "23": ["vertical_and_slash", 1000, 6096, 0.8204699754714966], "24": ["vertical_and_slash", 1000, 6096, 0.9781834483146667], "25": ["vertical_and_slash", 30, 800, 0.987470269203186], "26": ["vertical_and_slash", 1000, 6096, 0.9895057678222656], "27": ["vertical_and_slash", 1000, 6096, 0.9841405153274536]}, {"0": ["vertical_and_slash", 30, 800, 0.9849892258644104], "1": ["vertical_and_slash", 100, 800, 0.94921875], "2": ["vertical_and_slash", 1000, 6096, 0.9077629446983337], "3": ["vertical_and_slash", 30, 800, 0.9785045385360718], "4": ["vertical_and_slash", 30, 800, 0.992603063583374], "5": ["vertical_and_slash", 30, 800, 0.9923588037490845], "6": ["vertical_and_slash", 1000, 6096, 0.7285677194595337], "7": ["vertical_and_slash", 1000, 6096, 0.8928298950195312], "8": ["vertical_and_slash", 30, 800, 0.9881294965744019], "9": ["vertical_and_slash", 30, 800, 0.9871630072593689], "10": ["vertical_and_slash", 1000, 6096, 0.8675088286399841], "11": ["vertical_and_slash", 30, 800, 0.9899677038192749], "12": ["vertical_and_slash", 30, 800, 0.994294285774231], "13": ["vertical_and_slash", 30, 800, 0.9912780523300171], "14": ["vertical_and_slash", 1000, 6096, 0.817713737487793], "15": ["vertical_and_slash", 1000, 6096, 0.768071174621582], "16": ["vertical_and_slash", 100, 800, 0.99609375], "17": ["vertical_and_slash", 1000, 6096, 0.9927074909210205], "18": ["vertical_and_slash", 30, 800, 0.9627246260643005], "19": ["vertical_and_slash", 1000, 6096, 0.8428043723106384], "20": ["vertical_and_slash", 30, 800, 0.961391031742096], "21": ["vertical_and_slash", 100, 800, 0.98828125], "22": ["vertical_and_slash", 1000, 6096, 0.8955845832824707], "23": ["vertical_and_slash", 1000, 6096, 0.9635751247406006], "24": ["vertical_and_slash", 1000, 6096, 0.9477937817573547], "25": ["vertical_and_slash", 1000, 6096, 0.9843902587890625], "26": ["vertical_and_slash", 1000, 6096, 0.932198703289032], "27": ["vertical_and_slash", 1000, 6096, 0.9374216794967651]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9694727659225464], "1": ["vertical_and_slash", 100, 800, 0.98828125], "2": ["vertical_and_slash", 1000, 6096, 0.9693005681037903], "3": ["vertical_and_slash", 1000, 6096, 0.9399802088737488], "4": ["vertical_and_slash", 1000, 6096, 0.8707883358001709], "5": ["vertical_and_slash", 1000, 6096, 0.9739736914634705], "6": ["vertical_and_slash", 1000, 6096, 0.9804114103317261], "7": ["vertical_and_slash", 1000, 6096, 0.992721676826477], "8": ["vertical_and_slash", 1000, 6096, 0.9970836043357849], "9": ["vertical_and_slash", 1000, 6096, 0.9859921336174011], "10": ["vertical_and_slash", 1000, 6096, 0.9984594583511353], "11": ["vertical_and_slash", 1000, 6096, 0.9865716099739075], "12": ["vertical_and_slash", 1000, 6096, 0.9911996126174927], "13": ["vertical_and_slash", 1000, 6096, 0.9729082584381104], "14": ["vertical_and_slash", 30, 800, 0.9846852421760559], "15": ["vertical_and_slash", 30, 800, 0.9954250454902649], "16": ["vertical_and_slash", 30, 800, 0.9949162006378174], "17": ["vertical_and_slash", 30, 800, 0.9548083543777466], "18": ["vertical_and_slash", 30, 800, 0.986838698387146], "19": ["vertical_and_slash", 30, 800, 0.9908570647239685], "20": ["vertical_and_slash", 30, 800, 0.9853697419166565], "21": ["vertical_and_slash", 1000, 6096, 0.997185468673706], "22": ["vertical_and_slash", 1000, 6096, 0.73985755443573], "23": ["vertical_and_slash", 1000, 6096, 0.8271170258522034], "24": ["vertical_and_slash", 1000, 6096, 0.9937832951545715], "25": ["vertical_and_slash", 1000, 6096, 0.974370002746582], "26": ["vertical_and_slash", 1000, 6096, 0.8334921598434448], "27": ["vertical_and_slash", 1000, 6096, 0.865833580493927]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9121729135513306], "1": ["vertical_and_slash", 1000, 6096, 0.880349338054657], "2": ["vertical_and_slash", 1000, 6096, 0.8574644327163696], "3": ["vertical_and_slash", 1000, 6096, 0.8844216465950012], "4": ["vertical_and_slash", 3500, 100, 0.8841082453727722], "5": ["vertical_and_slash", 1000, 6096, 0.9237666130065918], "6": ["vertical_and_slash", 1000, 6096, 0.9418555498123169], "7": ["vertical_and_slash", 1000, 6096, 0.9271724224090576], "8": ["vertical_and_slash", 1000, 6096, 0.6766623258590698], "9": ["vertical_and_slash", 1000, 6096, 0.9509949684143066], "10": ["vertical_and_slash", 1000, 6096, 0.8359535932540894], "11": ["vertical_and_slash", 1000, 6096, 0.9688594341278076], "12": ["vertical_and_slash", 1000, 6096, 0.9697471261024475], "13": ["vertical_and_slash", 1000, 6096, 0.9048041701316833], "14": ["vertical_and_slash", 30, 800, 0.9523487687110901], "15": ["vertical_and_slash", 3500, 100, 0.8940234780311584], "16": ["vertical_and_slash", 1000, 6096, 0.9469529986381531], "17": ["vertical_and_slash", 1000, 6096, 0.8653460144996643], "18": ["vertical_and_slash", 1000, 6096, 0.9089341759681702], "19": ["vertical_and_slash", 1000, 6096, 0.6309221386909485], "20": ["vertical_and_slash", 1000, 6096, 0.9139751195907593], "21": ["vertical_and_slash", 1000, 6096, 0.8545156121253967], "22": ["vertical_and_slash", 3500, 100, 0.8629888296127319], "23": ["vertical_and_slash", 1000, 6096, 0.8971812129020691], "24": ["vertical_and_slash", 1000, 6096, 0.9051863551139832], "25": ["vertical_and_slash", 3500, 100, 0.9100741147994995], "26": ["vertical_and_slash", 1000, 6096, 0.9065660238265991], "27": ["vertical_and_slash", 1000, 6096, 0.917789876461029]}, {"0": ["vertical_and_slash", 3500, 100, 0.95817631483078], "1": ["vertical_and_slash", 30, 800, 0.9713807106018066], "2": ["vertical_and_slash", 1000, 6096, 0.9439947605133057], "3": ["vertical_and_slash", 100, 800, 0.96484375], "4": ["vertical_and_slash", 500, 700, 0.9897396564483643], "5": ["vertical_and_slash", 30, 800, 0.9849019646644592], "6": ["vertical_and_slash", 3500, 100, 0.7464108467102051], "7": ["vertical_and_slash", 1000, 6096, 0.9914251565933228], "8": ["vertical_and_slash", 1000, 6096, 0.9880635142326355], "9": ["vertical_and_slash", 1000, 6096, 0.9915304780006409], "10": ["vertical_and_slash", 1000, 6096, 0.9843180775642395], "11": ["vertical_and_slash", 30, 800, 0.8710246682167053], "12": ["vertical_and_slash", 1000, 6096, 0.9936488270759583], "13": ["vertical_and_slash", 30, 800, 0.9913105964660645], "14": ["vertical_and_slash", 30, 800, 0.937064528465271], "15": ["vertical_and_slash", 30, 800, 0.9891123175621033], "16": ["vertical_and_slash", 500, 700, 0.9794787168502808], "17": ["vertical_and_slash", 500, 700, 0.9912840127944946], "18": ["vertical_and_slash", 1000, 6096, 0.8226324319839478], "19": ["vertical_and_slash", 1000, 6096, 0.8692429065704346], "20": ["vertical_and_slash", 3500, 100, 0.9418290257453918], "21": ["vertical_and_slash", 30, 800, 0.9891044497489929], "22": ["vertical_and_slash", 30, 800, 0.9982499480247498], "23": ["vertical_and_slash", 30, 800, 0.9932505488395691], "24": ["vertical_and_slash", 1000, 6096, 0.8858837485313416], "25": ["vertical_and_slash", 30, 800, 0.9979614615440369], "26": ["vertical_and_slash", 30, 800, 0.9960733652114868], "27": ["vertical_and_slash", 30, 800, 0.9975556135177612]}, {"0": ["vertical_and_slash", 1000, 6096, 0.7947474122047424], "1": ["vertical_and_slash", 30, 800, 0.9958741664886475], "2": ["vertical_and_slash", 1000, 6096, 0.7516756057739258], "3": ["vertical_and_slash", 30, 800, 0.9849042296409607], "4": ["vertical_and_slash", 30, 800, 0.9620478749275208], "5": ["vertical_and_slash", 30, 800, 0.9647890329360962], "6": ["vertical_and_slash", 30, 800, 0.9281842112541199], "7": ["vertical_and_slash", 100, 800, 0.9921875], "8": ["vertical_and_slash", 1000, 6096, 0.5377398729324341], "9": ["vertical_and_slash", 100, 800, 0.921875], "10": ["vertical_and_slash", 100, 800, 0.9453125], "11": ["vertical_and_slash", 100, 800, 0.92578125], "12": ["vertical_and_slash", 1000, 6096, 0.5261275768280029], "13": ["vertical_and_slash", 100, 800, 0.95703125], "14": ["vertical_and_slash", 30, 800, 0.9814378023147583], "15": ["vertical_and_slash", 30, 800, 0.9902452230453491], "16": ["vertical_and_slash", 30, 800, 0.9406231641769409], "17": ["vertical_and_slash", 30, 800, 0.9874538779258728], "18": ["vertical_and_slash", 30, 800, 0.9904769062995911], "19": ["vertical_and_slash", 30, 800, 0.9870219230651855], "20": ["vertical_and_slash", 30, 800, 0.9883198142051697], "21": ["vertical_and_slash", 1000, 6096, 0.9207147359848022], "22": ["vertical_and_slash", 1000, 6096, 0.9390707612037659], "23": ["vertical_and_slash", 1000, 6096, 0.826806902885437], "24": ["vertical_and_slash", 1000, 6096, 0.7619513869285583], "25": ["vertical_and_slash", 1000, 6096, 0.941148579120636], "26": ["vertical_and_slash", 1000, 6096, 0.978582501411438], "27": ["vertical_and_slash", 1000, 6096, 0.9648736715316772]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9821552634239197], "1": ["vertical_and_slash", 500, 700, 0.9539602994918823], "2": ["vertical_and_slash", 1000, 6096, 0.8077982068061829], "3": ["vertical_and_slash", 1000, 6096, 0.9536257386207581], "4": ["vertical_and_slash", 100, 800, 0.90625], "5": ["vertical_and_slash", 30, 800, 0.9768615961074829], "6": ["vertical_and_slash", 1000, 6096, 0.7399638295173645], "7": ["vertical_and_slash", 1000, 6096, 0.8669665455818176], "8": ["vertical_and_slash", 1000, 6096, 0.95061856508255], "9": ["vertical_and_slash", 1000, 6096, 0.9409676790237427], "10": ["vertical_and_slash", 1000, 6096, 0.9626432657241821], "11": ["vertical_and_slash", 1000, 6096, 0.959475576877594], "12": ["vertical_and_slash", 1000, 6096, 0.9138979315757751], "13": ["vertical_and_slash", 1000, 6096, 0.9594091176986694], "14": ["vertical_and_slash", 1000, 6096, 0.9854135513305664], "15": ["vertical_and_slash", 1000, 6096, 0.8629146814346313], "16": ["vertical_and_slash", 1000, 6096, 0.6293531060218811], "17": ["vertical_and_slash", 1000, 6096, 0.6483508348464966], "18": ["vertical_and_slash", 1000, 6096, 0.9933030009269714], "19": ["vertical_and_slash", 1000, 6096, 0.865077555179596], "20": ["vertical_and_slash", 1000, 6096, 0.8829535245895386], "21": ["vertical_and_slash", 1000, 6096, 0.9376322031021118], "22": ["vertical_and_slash", 1000, 6096, 0.9569116830825806], "23": ["vertical_and_slash", 500, 700, 0.9413052201271057], "24": ["vertical_and_slash", 1000, 6096, 0.8402575850486755], "25": ["vertical_and_slash", 30, 800, 0.988961398601532], "26": ["vertical_and_slash", 1000, 6096, 0.9458250403404236], "27": ["vertical_and_slash", 1000, 6096, 0.9612033367156982]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9506350159645081], "1": ["vertical_and_slash", 1000, 6096, 0.9367551803588867], "2": ["vertical_and_slash", 1000, 6096, 0.9368818998336792], "3": ["vertical_and_slash", 1000, 6096, 0.9282279014587402], "4": ["vertical_and_slash", 1000, 6096, 0.8776121139526367], "5": ["vertical_and_slash", 1000, 6096, 0.9466056227684021], "6": ["vertical_and_slash", 3500, 100, 0.8511718511581421], "7": ["vertical_and_slash", 30, 800, 0.9788528680801392], "8": ["vertical_and_slash", 30, 800, 0.9934800863265991], "9": ["vertical_and_slash", 30, 800, 0.9760361313819885], "10": ["vertical_and_slash", 30, 800, 0.9912894368171692], "11": ["vertical_and_slash", 1000, 6096, 0.7013784646987915], "12": ["vertical_and_slash", 30, 800, 0.9533051252365112], "13": ["vertical_and_slash", 30, 800, 0.9988511204719543], "14": ["vertical_and_slash", 1000, 6096, 0.9363043308258057], "15": ["vertical_and_slash", 1000, 6096, 0.9185793399810791], "16": ["vertical_and_slash", 1000, 6096, 0.8489810824394226], "17": ["vertical_and_slash", 1000, 6096, 0.8818261623382568], "18": ["vertical_and_slash", 1000, 6096, 0.8747986555099487], "19": ["vertical_and_slash", 1000, 6096, 0.8180942535400391], "20": ["vertical_and_slash", 1000, 6096, 0.9162351489067078], "21": ["vertical_and_slash", 1000, 6096, 0.9253169298171997], "22": ["vertical_and_slash", 1000, 6096, 0.9417309761047363], "23": ["vertical_and_slash", 1000, 6096, 0.9265408515930176], "24": ["vertical_and_slash", 1000, 6096, 0.9212976694107056], "25": ["vertical_and_slash", 1000, 6096, 0.8993216156959534], "26": ["vertical_and_slash", 1000, 6096, 0.9412744641304016], "27": ["vertical_and_slash", 1000, 6096, 0.940586268901825]}, {"0": ["vertical_and_slash", 1000, 6096, 0.6151176691055298], "1": ["vertical_and_slash", 1000, 6096, 0.8302485942840576], "2": ["vertical_and_slash", 1000, 6096, 0.9792046546936035], "3": ["vertical_and_slash", 1000, 6096, 0.5594601035118103], "4": ["vertical_and_slash", 1000, 6096, 0.8363944292068481], "5": ["vertical_and_slash", 3500, 100, 0.9996598958969116], "6": ["vertical_and_slash", 1000, 6096, 0.9099676609039307], "7": ["vertical_and_slash", 1000, 6096, 0.8518452644348145], "8": ["vertical_and_slash", 1000, 6096, 0.769738495349884], "9": ["vertical_and_slash", 1000, 6096, 0.9187247157096863], "10": ["vertical_and_slash", 1000, 6096, 0.8222647905349731], "11": ["vertical_and_slash", 1000, 6096, 0.9392577409744263], "12": ["vertical_and_slash", 1000, 6096, 0.7922811508178711], "13": ["vertical_and_slash", 100, 800, 0.9375], "14": ["vertical_and_slash", 1000, 6096, 0.9302575588226318], "15": ["vertical_and_slash", 1000, 6096, 0.9779192805290222], "16": ["vertical_and_slash", 1000, 6096, 0.9686769247055054], "17": ["vertical_and_slash", 1000, 6096, 0.974711537361145], "18": ["vertical_and_slash", 1000, 6096, 0.8760904669761658], "19": ["vertical_and_slash", 1000, 6096, 0.9415637850761414], "20": ["vertical_and_slash", 1000, 6096, 0.8831533789634705], "21": ["vertical_and_slash", 1000, 6096, 0.8641139268875122], "22": ["vertical_and_slash", 1000, 6096, 0.7964520454406738], "23": ["vertical_and_slash", 1000, 6096, 0.9176532030105591], "24": ["vertical_and_slash", 1000, 6096, 0.9218670725822449], "25": ["vertical_and_slash", 1000, 6096, 0.9868776798248291], "26": ["vertical_and_slash", 1000, 6096, 0.942365288734436], "27": ["vertical_and_slash", 1000, 6096, 0.7066196799278259]}, {"0": ["vertical_and_slash", 100, 800, 0.9765625], "1": ["vertical_and_slash", 1000, 6096, 0.8189907670021057], "2": ["vertical_and_slash", 1000, 6096, 0.9638606309890747], "3": ["vertical_and_slash", 100, 800, 0.97265625], "4": ["vertical_and_slash", 1000, 6096, 0.8468702435493469], "5": ["vertical_and_slash", 500, 700, 0.9887101650238037], "6": ["vertical_and_slash", 500, 700, 0.9838299751281738], "7": ["vertical_and_slash", 30, 800, 0.9280517101287842], "8": ["vertical_and_slash", 30, 800, 0.9884114265441895], "9": ["vertical_and_slash", 30, 800, 0.9902153611183167], "10": ["vertical_and_slash", 30, 800, 0.961846113204956], "11": ["vertical_and_slash", 30, 800, 0.9983395934104919], "12": ["vertical_and_slash", 30, 800, 0.9855113625526428], "13": ["vertical_and_slash", 30, 800, 0.9840126633644104], "14": ["vertical_and_slash", 1000, 6096, 0.6998735070228577], "15": ["vertical_and_slash", 1000, 6096, 0.9892531633377075], "16": ["vertical_and_slash", 1000, 6096, 0.9883072972297668], "17": ["vertical_and_slash", 1000, 6096, 0.9294363260269165], "18": ["vertical_and_slash", 1000, 6096, 0.894900381565094], "19": ["vertical_and_slash", 1000, 6096, 0.9803770184516907], "20": ["vertical_and_slash", 1000, 6096, 0.8446154594421387], "21": ["vertical_and_slash", 100, 800, 0.984375], "22": ["vertical_and_slash", 100, 800, 0.90234375], "23": ["vertical_and_slash", 1000, 6096, 0.8615911602973938], "24": ["vertical_and_slash", 1000, 6096, 0.5796183943748474], "25": ["vertical_and_slash", 1000, 6096, 0.8342339396476746], "26": ["vertical_and_slash", 1000, 6096, 0.9186684489250183], "27": ["vertical_and_slash", 30, 800, 0.9606135487556458]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8515287637710571], "1": ["vertical_and_slash", 1000, 6096, 0.933404803276062], "2": ["vertical_and_slash", 1000, 6096, 0.8289247155189514], "3": ["vertical_and_slash", 1000, 6096, 0.8988763689994812], "4": ["vertical_and_slash", 1000, 6096, 0.9394503831863403], "5": ["vertical_and_slash", 1000, 6096, 0.7999369502067566], "6": ["vertical_and_slash", 1000, 6096, 0.9596051573753357], "7": ["vertical_and_slash", 1000, 6096, 0.872355580329895], "8": ["vertical_and_slash", 1000, 6096, 0.9944141507148743], "9": ["vertical_and_slash", 1000, 6096, 0.752680778503418], "10": ["vertical_and_slash", 1000, 6096, 0.9099285006523132], "11": ["vertical_and_slash", 1000, 6096, 0.7915406227111816], "12": ["vertical_and_slash", 1000, 6096, 0.7329405546188354], "13": ["vertical_and_slash", 1000, 6096, 0.5743876099586487], "14": ["vertical_and_slash", 1000, 6096, 0.8348347544670105], "15": ["vertical_and_slash", 1000, 6096, 0.808311402797699], "16": ["vertical_and_slash", 1000, 6096, 0.936697244644165], "17": ["vertical_and_slash", 1000, 6096, 0.9530603289604187], "18": ["vertical_and_slash", 1000, 6096, 0.9473890066146851], "19": ["vertical_and_slash", 1000, 6096, 0.7752445936203003], "20": ["vertical_and_slash", 1000, 6096, 0.8368715643882751], "21": ["vertical_and_slash", 30, 800, 0.9882409572601318], "22": ["vertical_and_slash", 1000, 6096, 0.9715934991836548], "23": ["vertical_and_slash", 30, 800, 0.9752689599990845], "24": ["vertical_and_slash", 1000, 6096, 0.9674347639083862], "25": ["vertical_and_slash", 1000, 6096, 0.9863185286521912], "26": ["vertical_and_slash", 1000, 6096, 0.9270491600036621], "27": ["vertical_and_slash", 1000, 6096, 0.9485634565353394]}, {"0": ["vertical_and_slash", 1000, 6096, 0.8162538409233093], "1": ["vertical_and_slash", 100, 800, 0.94921875], "2": ["vertical_and_slash", 1000, 6096, 0.9297316074371338], "3": ["vertical_and_slash", 30, 800, 0.9461885094642639], "4": ["vertical_and_slash", 30, 800, 0.9714936017990112], "5": ["vertical_and_slash", 500, 700, 0.9129247665405273], "6": ["vertical_and_slash", 1000, 6096, 0.9275624752044678], "7": ["vertical_and_slash", 1000, 6096, 0.8475475907325745], "8": ["vertical_and_slash", 1000, 6096, 0.7916257977485657], "9": ["vertical_and_slash", 1000, 6096, 0.9159509539604187], "10": ["vertical_and_slash", 1000, 6096, 0.5701631307601929], "11": ["vertical_and_slash", 100, 800, 0.98828125], "12": ["vertical_and_slash", 1000, 6096, 0.9667550325393677], "13": ["vertical_and_slash", 1000, 6096, 0.9067221283912659], "14": ["vertical_and_slash", 1000, 6096, 0.8238202929496765], "15": ["vertical_and_slash", 1000, 6096, 0.8566161394119263], "16": ["vertical_and_slash", 1000, 6096, 0.9366363883018494], "17": ["vertical_and_slash", 1000, 6096, 0.8159963488578796], "18": ["vertical_and_slash", 1000, 6096, 0.8766967058181763], "19": ["vertical_and_slash", 1000, 6096, 0.6258933544158936], "20": ["vertical_and_slash", 1000, 6096, 0.7349376678466797], "21": ["vertical_and_slash", 1000, 6096, 0.9358459711074829], "22": ["vertical_and_slash", 1000, 6096, 0.9102528691291809], "23": ["vertical_and_slash", 1000, 6096, 0.9412199854850769], "24": ["vertical_and_slash", 1000, 6096, 0.8408423066139221], "25": ["vertical_and_slash", 1000, 6096, 0.9342436194419861], "26": ["vertical_and_slash", 1000, 6096, 0.8275433778762817], "27": ["vertical_and_slash", 1000, 6096, 0.9086871147155762]}, {"0": ["vertical_and_slash", 30, 800, 0.9529778361320496], "1": ["vertical_and_slash", 1000, 6096, 0.9765377044677734], "2": ["vertical_and_slash", 500, 700, 0.9654987454414368], "3": ["vertical_and_slash", 30, 800, 0.9764272570610046], "4": ["vertical_and_slash", 30, 800, 0.9745785593986511], "5": ["vertical_and_slash", 100, 800, 0.9609375], "6": ["vertical_and_slash", 500, 700, 0.8914738893508911], "7": ["vertical_and_slash", 30, 800, 1.000016689300537], "8": ["vertical_and_slash", 100, 800, 0.94921875], "9": ["vertical_and_slash", 30, 800, 0.9711917042732239], "10": ["vertical_and_slash", 3500, 100, 0.595604419708252], "11": ["vertical_and_slash", 1000, 6096, 0.8624133467674255], "12": ["vertical_and_slash", 100, 800, 0.9140625], "13": ["vertical_and_slash", 100, 800, 0.96484375], "14": ["vertical_and_slash", 1000, 6096, 0.9412851929664612], "15": ["vertical_and_slash", 1000, 6096, 0.960081934928894], "16": ["vertical_and_slash", 1000, 6096, 0.6716403365135193], "17": ["vertical_and_slash", 30, 800, 0.9854384064674377], "18": ["vertical_and_slash", 1000, 6096, 0.9242696166038513], "19": ["vertical_and_slash", 1000, 6096, 0.8541141748428345], "20": ["vertical_and_slash", 1000, 6096, 0.9333781599998474], "21": ["vertical_and_slash", 500, 700, 0.986838698387146], "22": ["vertical_and_slash", 1000, 6096, 0.9749005436897278], "23": ["vertical_and_slash", 1000, 6096, 0.7221431732177734], "24": ["vertical_and_slash", 1000, 6096, 0.9587112665176392], "25": ["vertical_and_slash", 1000, 6096, 0.9741286635398865], "26": ["vertical_and_slash", 1000, 6096, 0.9661264419555664], "27": ["vertical_and_slash", 500, 700, 0.9784269332885742]}, {"0": ["vertical_and_slash", 30, 800, 1.000013828277588], "1": ["vertical_and_slash", 1000, 6096, 0.7964817881584167], "2": ["vertical_and_slash", 3500, 100, 0.9522238969802856], "3": ["vertical_and_slash", 1000, 6096, 0.9842534065246582], "4": ["vertical_and_slash", 1000, 6096, 0.677413284778595], "5": ["vertical_and_slash", 100, 800, 0.921875], "6": ["vertical_and_slash", 3500, 100, 0.6980668902397156], "7": ["vertical_and_slash", 30, 800, 0.9817154407501221], "8": ["vertical_and_slash", 500, 700, 0.9890599846839905], "9": ["vertical_and_slash", 500, 700, 0.9580798745155334], "10": ["vertical_and_slash", 3500, 100, 0.9302932024002075], "11": ["vertical_and_slash", 1000, 6096, 0.8747017979621887], "12": ["vertical_and_slash", 3500, 100, 0.9601419568061829], "13": ["vertical_and_slash", 30, 800, 0.991521954536438], "14": ["vertical_and_slash", 1000, 6096, 0.8870877623558044], "15": ["vertical_and_slash", 1000, 6096, 0.8576323390007019], "16": ["vertical_and_slash", 1000, 6096, 0.9357163310050964], "17": ["vertical_and_slash", 1000, 6096, 0.9630684852600098], "18": ["vertical_and_slash", 1000, 6096, 0.8218123912811279], "19": ["vertical_and_slash", 1000, 6096, 0.9790078401565552], "20": ["vertical_and_slash", 1000, 6096, 0.9365947842597961], "21": ["vertical_and_slash", 1000, 6096, 0.9538845419883728], "22": ["vertical_and_slash", 1000, 6096, 0.8417053818702698], "23": ["vertical_and_slash", 1000, 6096, 0.8086139559745789], "24": ["vertical_and_slash", 1000, 6096, 0.9675533771514893], "25": ["vertical_and_slash", 1000, 6096, 0.9404044151306152], "26": ["vertical_and_slash", 1000, 6096, 0.8472563624382019], "27": ["vertical_and_slash", 1000, 6096, 0.8686702251434326]}, {"0": ["vertical_and_slash", 30, 800, 0.9728208780288696], "1": ["vertical_and_slash", 30, 800, 0.9881014823913574], "2": ["vertical_and_slash", 1000, 6096, 0.8614686131477356], "3": ["vertical_and_slash", 100, 800, 0.9453125], "4": ["vertical_and_slash", 30, 800, 0.9707789421081543], "5": ["vertical_and_slash", 30, 800, 0.8570677638053894], "6": ["vertical_and_slash", 30, 800, 0.9247764348983765], "7": ["vertical_and_slash", 100, 800, 0.9375], "8": ["vertical_and_slash", 1000, 6096, 0.6958666443824768], "9": ["vertical_and_slash", 1000, 6096, 0.7043117880821228], "10": ["vertical_and_slash", 1000, 6096, 0.991999089717865], "11": ["vertical_and_slash", 1000, 6096, 0.9346603155136108], "12": ["vertical_and_slash", 1000, 6096, 0.842792809009552], "13": ["vertical_and_slash", 100, 800, 0.98046875], "14": ["vertical_and_slash", 1000, 6096, 0.9628856778144836], "15": ["vertical_and_slash", 1000, 6096, 0.9630132913589478], "16": ["vertical_and_slash", 1000, 6096, 0.9867842197418213], "17": ["vertical_and_slash", 1000, 6096, 0.7828136086463928], "18": ["vertical_and_slash", 1000, 6096, 0.8424676656723022], "19": ["vertical_and_slash", 1000, 6096, 0.9816663265228271], "20": ["vertical_and_slash", 100, 800, 0.97265625], "21": ["vertical_and_slash", 1000, 6096, 0.8810685276985168], "22": ["vertical_and_slash", 1000, 6096, 0.9782314300537109], "23": ["vertical_and_slash", 1000, 6096, 0.9908930659294128], "24": ["vertical_and_slash", 1000, 6096, 0.9589965343475342], "25": ["vertical_and_slash", 1000, 6096, 0.9606223106384277], "26": ["vertical_and_slash", 1000, 6096, 0.9031217694282532], "27": ["vertical_and_slash", 1000, 6096, 0.9787063002586365]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9495534300804138], "1": ["vertical_and_slash", 1000, 6096, 0.9725526571273804], "2": ["vertical_and_slash", 1000, 6096, 0.9376304745674133], "3": ["vertical_and_slash", 1000, 6096, 0.9878981709480286], "4": ["vertical_and_slash", 1000, 6096, 0.9897223711013794], "5": ["vertical_and_slash", 1000, 6096, 0.9507505297660828], "6": ["vertical_and_slash", 1000, 6096, 0.9743550419807434], "7": ["vertical_and_slash", 1000, 6096, 0.9827336072921753], "8": ["vertical_and_slash", 100, 800, 0.9765625], "9": ["vertical_and_slash", 100, 800, 0.98828125], "10": ["vertical_and_slash", 3500, 100, 0.9955865740776062], "11": ["vertical_and_slash", 3500, 100, 0.992493748664856], "12": ["vertical_and_slash", 100, 800, 0.9765625], "13": ["vertical_and_slash", 1000, 6096, 0.9148811101913452], "14": ["vertical_and_slash", 1000, 6096, 0.9430108070373535], "15": ["vertical_and_slash", 30, 800, 0.9996715784072876], "16": ["vertical_and_slash", 30, 800, 0.9875264167785645], "17": ["vertical_and_slash", 1000, 6096, 0.7165599465370178], "18": ["vertical_and_slash", 1000, 6096, 0.9757721424102783], "19": ["vertical_and_slash", 100, 800, 0.9453125], "20": ["vertical_and_slash", 30, 800, 0.9793203473091125], "21": ["vertical_and_slash", 1000, 6096, 0.9872748851776123], "22": ["vertical_and_slash", 1000, 6096, 0.9818704128265381], "23": ["vertical_and_slash", 1000, 6096, 0.9898734092712402], "24": ["vertical_and_slash", 1000, 6096, 0.9885611534118652], "25": ["vertical_and_slash", 1000, 6096, 0.9344024062156677], "26": ["vertical_and_slash", 1000, 6096, 0.9874017834663391], "27": ["vertical_and_slash", 1000, 6096, 0.9862232208251953]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9669107794761658], "1": ["vertical_and_slash", 1000, 6096, 0.9780780076980591], "2": ["vertical_and_slash", 1000, 6096, 0.9900755286216736], "3": ["vertical_and_slash", 1000, 6096, 0.8777333498001099], "4": ["vertical_and_slash", 1000, 6096, 0.9100558161735535], "5": ["vertical_and_slash", 1000, 6096, 0.8004213571548462], "6": ["vertical_and_slash", 1000, 6096, 0.8124027848243713], "7": ["vertical_and_slash", 1000, 6096, 0.9189204573631287], "8": ["vertical_and_slash", 1000, 6096, 0.954335629940033], "9": ["vertical_and_slash", 1000, 6096, 0.9588495492935181], "10": ["vertical_and_slash", 1000, 6096, 0.9853689074516296], "11": ["vertical_and_slash", 1000, 6096, 0.9952172636985779], "12": ["vertical_and_slash", 1000, 6096, 0.9971593618392944], "13": ["vertical_and_slash", 1000, 6096, 0.9899795055389404], "14": ["vertical_and_slash", 1000, 6096, 0.9826633334159851], "15": ["vertical_and_slash", 1000, 6096, 0.9986451864242554], "16": ["vertical_and_slash", 1000, 6096, 0.9772005081176758], "17": ["vertical_and_slash", 1000, 6096, 0.9955437183380127], "18": ["vertical_and_slash", 1000, 6096, 0.9906066060066223], "19": ["vertical_and_slash", 1000, 6096, 0.9854442477226257], "20": ["vertical_and_slash", 1000, 6096, 0.967056155204773], "21": ["vertical_and_slash", 1000, 6096, 0.9550986289978027], "22": ["vertical_and_slash", 1000, 6096, 0.9237328171730042], "23": ["vertical_and_slash", 1000, 6096, 0.972143292427063], "24": ["vertical_and_slash", 1000, 6096, 0.990289568901062], "25": ["vertical_and_slash", 1000, 6096, 0.9908021688461304], "26": ["vertical_and_slash", 1000, 6096, 0.9915176630020142], "27": ["vertical_and_slash", 1000, 6096, 0.9932308793067932]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9979202747344971], "1": ["vertical_and_slash", 1000, 6096, 0.9930388927459717], "2": ["vertical_and_slash", 1000, 6096, 0.9974814057350159], "3": ["vertical_and_slash", 1000, 6096, 0.9859879612922668], "4": ["vertical_and_slash", 1000, 6096, 0.983476996421814], "5": ["vertical_and_slash", 1000, 6096, 0.9953585863113403], "6": ["vertical_and_slash", 1000, 6096, 0.9945823550224304], "7": ["vertical_and_slash", 1000, 6096, 0.983530580997467], "8": ["vertical_and_slash", 1000, 6096, 0.9425436854362488], "9": ["vertical_and_slash", 1000, 6096, 0.9299989938735962], "10": ["vertical_and_slash", 1000, 6096, 0.9963868260383606], "11": ["vertical_and_slash", 1000, 6096, 0.9589547514915466], "12": ["vertical_and_slash", 1000, 6096, 0.9669103026390076], "13": ["vertical_and_slash", 1000, 6096, 0.9646584391593933], "14": ["vertical_and_slash", 1000, 6096, 0.9834031462669373], "15": ["vertical_and_slash", 1000, 6096, 0.9837222695350647], "16": ["vertical_and_slash", 1000, 6096, 0.9521802067756653], "17": ["vertical_and_slash", 1000, 6096, 0.9933254718780518], "18": ["vertical_and_slash", 1000, 6096, 0.9367445111274719], "19": ["vertical_and_slash", 1000, 6096, 0.9253289699554443], "20": ["vertical_and_slash", 1000, 6096, 0.9213846325874329], "21": ["vertical_and_slash", 3500, 100, 0.9439836740493774], "22": ["vertical_and_slash", 1000, 6096, 0.8624261617660522], "23": ["vertical_and_slash", 3500, 100, 0.9876448512077332], "24": ["vertical_and_slash", 3500, 100, 0.9694659113883972], "25": ["vertical_and_slash", 1000, 6096, 0.8862125873565674], "26": ["vertical_and_slash", 3500, 100, 0.9761732816696167], "27": ["vertical_and_slash", 30, 800, 0.9564419388771057]}, {"0": ["vertical_and_slash", 30, 800, 0.9152433276176453], "1": ["vertical_and_slash", 30, 800, 0.9991941452026367], "2": ["vertical_and_slash", 30, 800, 0.983933687210083], "3": ["vertical_and_slash", 30, 800, 0.9974339604377747], "4": ["vertical_and_slash", 30, 800, 0.9911845326423645], "5": ["vertical_and_slash", 30, 800, 0.9775597453117371], "6": ["vertical_and_slash", 30, 800, 0.9880896806716919], "7": ["vertical_and_slash", 3500, 100, 0.9697815775871277], "8": ["vertical_and_slash", 1000, 6096, 0.9791455864906311], "9": ["vertical_and_slash", 1000, 6096, 0.9768369793891907], "10": ["vertical_and_slash", 1000, 6096, 0.983481764793396], "11": ["vertical_and_slash", 1000, 6096, 0.9650024771690369], "12": ["vertical_and_slash", 3500, 100, 0.974149227142334], "13": ["vertical_and_slash", 1000, 6096, 0.8544312119483948], "14": ["vertical_and_slash", 3500, 100, 0.9894920587539673], "15": ["vertical_and_slash", 1000, 6096, 0.9930623769760132], "16": ["vertical_and_slash", 1000, 6096, 0.99419766664505], "17": ["vertical_and_slash", 3500, 100, 0.9738808870315552], "18": ["vertical_and_slash", 1000, 6096, 0.9566258192062378], "19": ["vertical_and_slash", 3500, 100, 0.9966306686401367], "20": ["vertical_and_slash", 1000, 6096, 0.9950397610664368], "21": ["vertical_and_slash", 3500, 100, 0.980926513671875], "22": ["vertical_and_slash", 1000, 6096, 0.990209698677063], "23": ["vertical_and_slash", 1000, 6096, 0.9948497414588928], "24": ["vertical_and_slash", 1000, 6096, 0.9675090909004211], "25": ["vertical_and_slash", 1000, 6096, 0.9923503398895264], "26": ["vertical_and_slash", 1000, 6096, 0.9827864766120911], "27": ["vertical_and_slash", 1000, 6096, 0.9880841374397278]}, {"0": ["vertical_and_slash", 1000, 6096, 0.967562735080719], "1": ["vertical_and_slash", 1000, 6096, 0.9504984617233276], "2": ["vertical_and_slash", 100, 800, 0.98046875], "3": ["vertical_and_slash", 100, 800, 0.9609375], "4": ["vertical_and_slash", 1000, 6096, 0.961335301399231], "5": ["vertical_and_slash", 1000, 6096, 0.9848175644874573], "6": ["vertical_and_slash", 3500, 100, 0.9635114073753357], "7": ["vertical_and_slash", 1000, 6096, 0.9763992428779602], "8": ["vertical_and_slash", 1000, 6096, 0.9722656011581421], "9": ["vertical_and_slash", 1000, 6096, 0.970198929309845], "10": ["vertical_and_slash", 1000, 6096, 0.9567866325378418], "11": ["vertical_and_slash", 1000, 6096, 0.9743251204490662], "12": ["vertical_and_slash", 1000, 6096, 0.9080458879470825], "13": ["vertical_and_slash", 1000, 6096, 0.9336158037185669], "14": ["vertical_and_slash", 30, 800, 0.9828786849975586], "15": ["vertical_and_slash", 1000, 6096, 0.8201127052307129], "16": ["vertical_and_slash", 100, 800, 0.890625], "17": ["vertical_and_slash", 3500, 100, 0.9600610136985779], "18": ["vertical_and_slash", 30, 800, 0.9485377669334412], "19": ["vertical_and_slash", 30, 800, 0.9659656882286072], "20": ["vertical_and_slash", 1000, 6096, 0.8275288343429565], "21": ["vertical_and_slash", 1000, 6096, 0.991800844669342], "22": ["vertical_and_slash", 1000, 6096, 0.9874403476715088], "23": ["vertical_and_slash", 1000, 6096, 0.9939562082290649], "24": ["vertical_and_slash", 1000, 6096, 0.959643542766571], "25": ["vertical_and_slash", 1000, 6096, 0.9857483506202698], "26": ["vertical_and_slash", 1000, 6096, 0.9910452961921692], "27": ["vertical_and_slash", 1000, 6096, 0.9901595115661621]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9662902355194092], "1": ["vertical_and_slash", 1000, 6096, 0.9520133137702942], "2": ["vertical_and_slash", 1000, 6096, 0.9550362229347229], "3": ["vertical_and_slash", 1000, 6096, 0.9629506468772888], "4": ["vertical_and_slash", 1000, 6096, 0.9720785021781921], "5": ["vertical_and_slash", 1000, 6096, 0.9648401737213135], "6": ["vertical_and_slash", 1000, 6096, 0.9602342844009399], "7": ["vertical_and_slash", 1000, 6096, 0.8614271879196167], "8": ["vertical_and_slash", 30, 800, 0.9938231110572815], "9": ["vertical_and_slash", 1000, 6096, 0.8552648425102234], "10": ["vertical_and_slash", 1000, 6096, 0.8744264245033264], "11": ["vertical_and_slash", 500, 700, 0.9902867078781128], "12": ["vertical_and_slash", 1000, 6096, 0.8832966685295105], "13": ["vertical_and_slash", 1000, 6096, 0.8797697424888611], "14": ["vertical_and_slash", 1000, 6096, 0.8878644108772278], "15": ["vertical_and_slash", 1000, 6096, 0.9430946707725525], "16": ["vertical_and_slash", 1000, 6096, 0.9398497343063354], "17": ["vertical_and_slash", 1000, 6096, 0.9297257661819458], "18": ["vertical_and_slash", 1000, 6096, 0.9645345211029053], "19": ["vertical_and_slash", 1000, 6096, 0.9631009697914124], "20": ["vertical_and_slash", 1000, 6096, 0.9436025619506836], "21": ["vertical_and_slash", 1000, 6096, 0.8522894978523254], "22": ["vertical_and_slash", 1000, 6096, 0.8642758131027222], "23": ["vertical_and_slash", 1000, 6096, 0.9266183972358704], "24": ["vertical_and_slash", 1000, 6096, 0.9002980589866638], "25": ["vertical_and_slash", 1000, 6096, 0.9449893236160278], "26": ["vertical_and_slash", 1000, 6096, 0.9670844674110413], "27": ["vertical_and_slash", 1000, 6096, 0.9114388227462769]}, {"0": ["vertical_and_slash", 1000, 6096, 0.9020618796348572], "1": ["vertical_and_slash", 1000, 6096, 0.9286912083625793], "2": ["vertical_and_slash", 1000, 6096, 0.8976420164108276], "3": ["vertical_and_slash", 1000, 6096, 0.9164029955863953], "4": ["vertical_and_slash", 1000, 6096, 0.9384458661079407], "5": ["vertical_and_slash", 1000, 6096, 0.9148637056350708], "6": ["vertical_and_slash", 1000, 6096, 0.8738253116607666], "7": ["vertical_and_slash", 1000, 6096, 0.9889154434204102], "8": ["vertical_and_slash", 1000, 6096, 0.9813238978385925], "9": ["vertical_and_slash", 1000, 6096, 0.8961515426635742], "10": ["vertical_and_slash", 1000, 6096, 0.9995065927505493], "11": ["vertical_and_slash", 30, 800, 0.9942719340324402], "12": ["vertical_and_slash", 1000, 6096, 0.9956918954849243], "13": ["vertical_and_slash", 1000, 6096, 0.9992254972457886], "14": ["vertical_and_slash", 1000, 6096, 0.9229907989501953], "15": ["vertical_and_slash", 1000, 6096, 0.9264236688613892], "16": ["vertical_and_slash", 1000, 6096, 0.9291247725486755], "17": ["vertical_and_slash", 1000, 6096, 0.9146082401275635], "18": ["vertical_and_slash", 1000, 6096, 0.946480393409729], "19": ["vertical_and_slash", 1000, 6096, 0.8833398222923279], "20": ["vertical_and_slash", 1000, 6096, 0.8919368386268616], "21": ["vertical_and_slash", 1000, 6096, 0.9127027988433838], "22": ["vertical_and_slash", 1000, 6096, 0.9224942922592163], "23": ["vertical_and_slash", 1000, 6096, 0.9477068781852722], "24": ["vertical_and_slash", 1000, 6096, 0.8317608833312988], "25": ["vertical_and_slash", 1000, 6096, 0.9355064630508423], "26": ["vertical_and_slash", 1000, 6096, 0.9410194754600525], "27": ["vertical_and_slash", 1000, 6096, 0.8446194529533386]}]
diff --git a/minference/configs/Yi_9B_200k_kv_out_v32_fit_o_best_pattern.json b/minference/configs/Yi_9B_200k_kv_out_v32_fit_o_best_pattern.json
new file mode 100644
index 0000000..e25d158
--- /dev/null
+++ b/minference/configs/Yi_9B_200k_kv_out_v32_fit_o_best_pattern.json
@@ -0,0 +1 @@
+[{"0": ["vertical_and_slash", 1000, 4096, 12982], "1": ["vertical_and_slash", 1000, 4096, 54], "2": ["vertical_and_slash", 1000, 4096, 0], "3": ["vertical_and_slash", 1000, 4096, 5], "4": ["vertical_and_slash", 1000, 4096, 57], "5": ["vertical_and_slash", 1000, 4096, 93], "6": ["vertical_and_slash", 1000, 4096, 5], "7": ["vertical_and_slash", 1000, 4096, 0], "8": ["vertical_and_slash", 1000, 4096, 4], "9": ["vertical_and_slash", 1000, 4096, 8], "10": ["vertical_and_slash", 1000, 4096, 10020], "11": ["vertical_and_slash", 1000, 4096, 0], "12": ["vertical_and_slash", 1000, 4096, 222290], "13": ["vertical_and_slash", 1000, 4096, 162], "14": ["vertical_and_slash", 1000, 4096, 3], "15": ["vertical_and_slash", 1000, 4096, 11], "16": ["vertical_and_slash", 1000, 4096, 10], "17": ["vertical_and_slash", 1000, 4096, 4], "18": ["vertical_and_slash", 1000, 4096, 26297], "19": ["vertical_and_slash", 1000, 4096, 3], "20": ["vertical_and_slash", 1000, 4096, 0], "21": ["vertical_and_slash", 1000, 4096, 0], "22": ["vertical_and_slash", 1000, 4096, 1627], "23": ["vertical_and_slash", 1000, 4096, 7], "24": ["vertical_and_slash", 1000, 4096, 0], "25": ["vertical_and_slash", 1000, 4096, 859], "26": ["vertical_and_slash", 1000, 4096, 0], "27": ["vertical_and_slash", 1000, 4096, 0], "28": ["vertical_and_slash", 1000, 4096, 484], "29": ["vertical_and_slash", 1000, 4096, 1239], "30": ["vertical_and_slash", 1000, 4096, 0], "31": ["vertical_and_slash", 1000, 4096, 0]}, {"0": ["vertical_and_slash", 1000, 4096, 430388], "1": ["vertical_and_slash", 1000, 4096, 299591], "2": ["vertical_and_slash", 1000, 4096, 5802], "3": ["vertical_and_slash", 1000, 4096, 22390], "4": ["vertical_and_slash", 1000, 4096, 284950], "5": ["vertical_and_slash", 1000, 4096, 237516], "6": ["vertical_and_slash", 1000, 4096, 39541], "7": ["vertical_and_slash", 1000, 4096, 46216], "8": ["vertical_and_slash", 1000, 4096, 782645], "9": ["vertical_and_slash", 1000, 4096, 8], "10": ["vertical_and_slash", 1000, 4096, 18], "11": ["vertical_and_slash", 1000, 4096, 18890], "12": ["vertical_and_slash", 1000, 4096, 141], "13": ["vertical_and_slash", 1000, 4096, 53457], "14": ["vertical_and_slash", 1000, 4096, 34], "15": ["vertical_and_slash", 1000, 4096, 0], "16": ["vertical_and_slash", 1000, 4096, 246481], "17": ["vertical_and_slash", 1000, 4096, 135148], "18": ["vertical_and_slash", 1000, 4096, 48561], "19": ["vertical_and_slash", 1000, 4096, 54785], "20": ["vertical_and_slash", 1000, 4096, 95382], "21": ["vertical_and_slash", 1000, 4096, 387], "22": ["vertical_and_slash", 1000, 4096, 1750], "23": ["vertical_and_slash", 1000, 4096, 201661], "24": ["vertical_and_slash", 1000, 4096, 51272], "25": ["vertical_and_slash", 1000, 4096, 115255], "26": ["vertical_and_slash", 1000, 4096, 6], "27": ["vertical_and_slash", 1000, 4096, 6895], "28": ["vertical_and_slash", 1000, 4096, 2335], "29": ["vertical_and_slash", 1000, 4096, 23041], "30": ["vertical_and_slash", 1000, 4096, 6270087], "31": ["vertical_and_slash", 1000, 4096, 0]}, {"0": ["vertical_and_slash", 100, 800, 11], "1": ["vertical_and_slash", 30, 800, 5], "2": ["vertical_and_slash", 30, 800, 2790], "3": ["vertical_and_slash", 30, 800, 37], "4": ["vertical_and_slash", 30, 800, 2903], "5": ["vertical_and_slash", 30, 800, 1], "6": ["vertical_and_slash", 30, 800, 101], "7": ["vertical_and_slash", 100, 800, 16677], "8": ["vertical_and_slash", 1000, 4096, 99796], "9": ["vertical_and_slash", 30, 800, 8116], "10": ["vertical_and_slash", 30, 800, 1993], "11": ["vertical_and_slash", 1000, 4096, 2561], "12": ["vertical_and_slash", 30, 800, 21], "13": ["vertical_and_slash", 30, 800, 9624], "14": ["vertical_and_slash", 1000, 4096, 3894510], "15": ["vertical_and_slash", 1000, 4096, 66775], "16": ["vertical_and_slash", 30, 800, 1569], "17": ["vertical_and_slash", 1000, 4096, 146958], "18": ["vertical_and_slash", 30, 800, 29976], "19": ["vertical_and_slash", 1000, 4096, 269566], "20": ["vertical_and_slash", 100, 800, 50639], "21": ["vertical_and_slash", 30, 800, 114641], "22": ["vertical_and_slash", 1000, 4096, 238607], "23": ["vertical_and_slash", 100, 800, 302385], "24": ["vertical_and_slash", 1000, 4096, 4893], "25": ["vertical_and_slash", 30, 800, 322], "26": ["vertical_and_slash", 1000, 4096, 3639], "27": ["vertical_and_slash", 100, 800, 131], "28": ["vertical_and_slash", 1000, 4096, 348560], "29": ["vertical_and_slash", 1000, 4096, 14611], "30": ["vertical_and_slash", 30, 800, 86], "31": ["vertical_and_slash", 1000, 4096, 900]}, {"0": ["vertical_and_slash", 100, 800, 64], "1": ["vertical_and_slash", 1000, 4096, 10], "2": ["vertical_and_slash", 500, 700, 77], "3": ["vertical_and_slash", 1000, 4096, 4193], "4": ["vertical_and_slash", 100, 800, 83525], "5": ["vertical_and_slash", 1000, 4096, 6], "6": ["vertical_and_slash", 1000, 4096, 27907], "7": ["vertical_and_slash", 1000, 4096, 42], "8": ["vertical_and_slash", 30, 800, 21349], "9": ["vertical_and_slash", 30, 800, 5018], "10": ["vertical_and_slash", 30, 800, 1663], "11": ["vertical_and_slash", 30, 800, 86902], "12": ["vertical_and_slash", 30, 800, 781], "13": ["vertical_and_slash", 100, 800, 339811], "14": ["vertical_and_slash", 100, 800, 696206], "15": ["vertical_and_slash", 30, 800, 47681], "16": ["vertical_and_slash", 30, 800, 4251], "17": ["vertical_and_slash", 1000, 4096, 6373945], "18": ["vertical_and_slash", 100, 800, 289132], "19": ["vertical_and_slash", 1000, 4096, 10273], "20": ["vertical_and_slash", 1000, 4096, 457078], "21": ["vertical_and_slash", 1000, 4096, 1372461], "22": ["vertical_and_slash", 100, 800, 11108], "23": ["vertical_and_slash", 100, 800, 2979], "24": ["vertical_and_slash", 1000, 4096, 30365], "25": ["vertical_and_slash", 500, 700, 142429], "26": ["vertical_and_slash", 500, 700, 6300], "27": ["vertical_and_slash", 30, 800, 4711], "28": ["vertical_and_slash", 500, 700, 4810], "29": ["vertical_and_slash", 500, 700, 25571], "30": ["vertical_and_slash", 500, 700, 7924], "31": ["vertical_and_slash", 500, 700, 3337]}, {"0": ["vertical_and_slash", 30, 800, 34678], "1": ["vertical_and_slash", 30, 800, 13104], "2": ["vertical_and_slash", 30, 800, 4929], "3": ["vertical_and_slash", 100, 800, 9351380], "4": ["vertical_and_slash", 100, 800, 333814], "5": ["vertical_and_slash", 100, 800, 603408], "6": ["vertical_and_slash", 30, 800, 18975], "7": ["vertical_and_slash", 30, 800, 8848], "8": ["vertical_and_slash", 100, 800, 1690132], "9": ["vertical_and_slash", 30, 800, 59610], "10": ["vertical_and_slash", 500, 700, 1234], "11": ["vertical_and_slash", 1000, 4096, 74422], "12": ["vertical_and_slash", 1000, 4096, 504212], "13": ["vertical_and_slash", 30, 800, 3100], "14": ["vertical_and_slash", 100, 800, 1160], "15": ["vertical_and_slash", 500, 700, 5784], "16": ["vertical_and_slash", 30, 800, 18695], "17": ["vertical_and_slash", 30, 800, 2090], "18": ["vertical_and_slash", 30, 800, 28562], "19": ["vertical_and_slash", 30, 800, 34339], "20": ["vertical_and_slash", 30, 800, 2544], "21": ["vertical_and_slash", 30, 800, 1914], "22": ["vertical_and_slash", 30, 800, 83258], "23": ["vertical_and_slash", 30, 800, 7898], "24": ["vertical_and_slash", 30, 800, 11609], "25": ["vertical_and_slash", 1000, 4096, 64138], "26": ["vertical_and_slash", 1000, 4096, 514471], "27": ["vertical_and_slash", 500, 700, 39930], "28": ["vertical_and_slash", 30, 800, 477456], "29": ["vertical_and_slash", 100, 800, 4526], "30": ["vertical_and_slash", 1000, 4096, 30006], "31": ["vertical_and_slash", 30, 800, 92845]}, {"0": ["vertical_and_slash", 30, 800, 55378], "1": ["vertical_and_slash", 1000, 4096, 17441], "2": ["vertical_and_slash", 100, 800, 1890658], "3": ["vertical_and_slash", 30, 800, 39922], "4": ["vertical_and_slash", 30, 800, 3841], "5": ["vertical_and_slash", 30, 800, 16402], "6": ["vertical_and_slash", 30, 800, 9274], "7": ["vertical_and_slash", 100, 800, 2756], "8": ["vertical_and_slash", 100, 800, 190896], "9": ["vertical_and_slash", 1000, 4096, 30060], "10": ["vertical_and_slash", 1000, 4096, 1123342], "11": ["vertical_and_slash", 1000, 4096, 260812], "12": ["vertical_and_slash", 1000, 4096, 4395769], "13": ["vertical_and_slash", 1000, 4096, 1803359], "14": ["vertical_and_slash", 30, 800, 17625], "15": ["vertical_and_slash", 1000, 4096, 1501177], "16": ["vertical_and_slash", 1000, 4096, 236955], "17": ["vertical_and_slash", 1000, 4096, 27239], "18": ["vertical_and_slash", 1000, 4096, 84045], "19": ["vertical_and_slash", 1000, 4096, 112395], "20": ["vertical_and_slash", 1000, 4096, 289351], "21": ["vertical_and_slash", 1000, 4096, 1200493], "22": ["vertical_and_slash", 100, 800, 5628], "23": ["vertical_and_slash", 1000, 4096, 53], "24": ["vertical_and_slash", 30, 800, 1001179], "25": ["vertical_and_slash", 1000, 4096, 1417294], "26": ["vertical_and_slash", 100, 800, 712290], "27": ["vertical_and_slash", 1000, 4096, 111462], "28": ["vertical_and_slash", 1000, 4096, 2382091], "29": ["vertical_and_slash", 30, 800, 10632], "30": ["vertical_and_slash", 100, 800, 404628], "31": ["vertical_and_slash", 1000, 4096, 36025]}, {"0": ["vertical_and_slash", 1000, 4096, 683931], "1": ["vertical_and_slash", 1000, 4096, 1978224], "2": ["vertical_and_slash", 30, 800, 529064], "3": ["vertical_and_slash", 30, 800, 20483], "4": ["vertical_and_slash", 30, 800, 226587], "5": ["vertical_and_slash", 30, 800, 100650], "6": ["vertical_and_slash", 30, 800, 88814], "7": ["vertical_and_slash", 30, 800, 25415], "8": ["vertical_and_slash", 1000, 4096, 126846], "9": ["vertical_and_slash", 100, 800, 83585], "10": ["vertical_and_slash", 1000, 4096, 53117], "11": ["vertical_and_slash", 1000, 4096, 30196], "12": ["vertical_and_slash", 1000, 4096, 81511], "13": ["vertical_and_slash", 1000, 4096, 25087], "14": ["vertical_and_slash", 1000, 4096, 52332], "15": ["vertical_and_slash", 1000, 4096, 1662596], "16": ["vertical_and_slash", 30, 800, 26199], "17": ["vertical_and_slash", 30, 800, 72420], "18": ["vertical_and_slash", 30, 800, 74770], "19": ["vertical_and_slash", 30, 800, 94064], "20": ["vertical_and_slash", 30, 800, 10369], "21": ["vertical_and_slash", 1000, 4096, 2802268], "22": ["vertical_and_slash", 30, 800, 32077], "23": ["vertical_and_slash", 500, 700, 751949], "24": ["vertical_and_slash", 100, 800, 23111], "25": ["vertical_and_slash", 100, 800, 13161], "26": ["vertical_and_slash", 100, 800, 164196], "27": ["vertical_and_slash", 1000, 4096, 12766], "28": ["vertical_and_slash", 1000, 4096, 37748], "29": ["vertical_and_slash", 1000, 4096, 394580], "30": ["vertical_and_slash", 30, 800, 1161581], "31": ["vertical_and_slash", 1000, 4096, 1070988]}, {"0": ["vertical_and_slash", 100, 800, 4619], "1": ["vertical_and_slash", 1000, 4096, 3223], "2": ["vertical_and_slash", 100, 800, 65675], "3": ["vertical_and_slash", 30, 800, 56], "4": ["vertical_and_slash", 30, 800, 93], "5": ["vertical_and_slash", 30, 800, 72], "6": ["vertical_and_slash", 500, 700, 3523], "7": ["vertical_and_slash", 1000, 4096, 12230], "8": ["vertical_and_slash", 100, 800, 9301307], "9": ["vertical_and_slash", 1000, 4096, 418350], "10": ["vertical_and_slash", 1000, 4096, 994569], "11": ["vertical_and_slash", 100, 800, 399778], "12": ["vertical_and_slash", 1000, 4096, 2677334], "13": ["vertical_and_slash", 1000, 4096, 409432], "14": ["vertical_and_slash", 30, 800, 1233050], "15": ["vertical_and_slash", 1000, 4096, 5697704], "16": ["vertical_and_slash", 100, 800, 294], "17": ["vertical_and_slash", 30, 800, 50017], "18": ["vertical_and_slash", 30, 800, 1566], "19": ["vertical_and_slash", 30, 800, 2368], "20": ["vertical_and_slash", 30, 800, 3051012], "21": ["vertical_and_slash", 1000, 4096, 15983], "22": ["vertical_and_slash", 1000, 4096, 48], "23": ["vertical_and_slash", 1000, 4096, 312543], "24": ["vertical_and_slash", 30, 800, 4820], "25": ["vertical_and_slash", 30, 800, 100931], "26": ["vertical_and_slash", 30, 800, 69743], "27": ["vertical_and_slash", 30, 800, 22187], "28": ["vertical_and_slash", 30, 800, 3936], "29": ["vertical_and_slash", 30, 800, 4611], "30": ["vertical_and_slash", 30, 800, 21928], "31": ["vertical_and_slash", 30, 800, 133206]}, {"0": ["vertical_and_slash", 100, 800, 41811], "1": ["vertical_and_slash", 30, 800, 4226], "2": ["vertical_and_slash", 100, 800, 11930], "3": ["vertical_and_slash", 30, 800, 629146], "4": ["vertical_and_slash", 100, 800, 511736], "5": ["vertical_and_slash", 100, 800, 1408], "6": ["vertical_and_slash", 30, 800, 18012], "7": ["vertical_and_slash", 30, 800, 897], "8": ["vertical_and_slash", 30, 800, 107705], "9": ["vertical_and_slash", 30, 800, 152957], "10": ["vertical_and_slash", 30, 800, 272002], "11": ["vertical_and_slash", 30, 800, 5216722], "12": ["vertical_and_slash", 30, 800, 509504], "13": ["vertical_and_slash", 30, 800, 72091], "14": ["vertical_and_slash", 30, 800, 166293], "15": ["vertical_and_slash", 30, 800, 426344], "16": ["vertical_and_slash", 30, 800, 316624], "17": ["vertical_and_slash", 1000, 4096, 158902], "18": ["vertical_and_slash", 30, 800, 162502], "19": ["vertical_and_slash", 1000, 4096, 2464314], "20": ["vertical_and_slash", 1000, 4096, 5817909], "21": ["vertical_and_slash", 100, 800, 1141235], "22": ["vertical_and_slash", 30, 800, 452577], "23": ["vertical_and_slash", 30, 800, 193960], "24": ["vertical_and_slash", 30, 800, 538157], "25": ["vertical_and_slash", 30, 800, 1355759], "26": ["vertical_and_slash", 100, 800, 141236], "27": ["vertical_and_slash", 30, 800, 87608], "28": ["vertical_and_slash", 30, 800, 102946], "29": ["vertical_and_slash", 30, 800, 81254], "30": ["vertical_and_slash", 30, 800, 6194794], "31": ["vertical_and_slash", 30, 800, 2092660]}, {"0": ["vertical_and_slash", 30, 800, 278589], "1": ["vertical_and_slash", 30, 800, 1071731], "2": ["vertical_and_slash", 30, 800, 1991650], "3": ["vertical_and_slash", 30, 800, 308703], "4": ["vertical_and_slash", 30, 800, 1024242], "5": ["vertical_and_slash", 30, 800, 3107957], "6": ["vertical_and_slash", 30, 800, 926801], "7": ["vertical_and_slash", 30, 800, 2887199], "8": ["vertical_and_slash", 1000, 4096, 4152662], "9": ["vertical_and_slash", 100, 800, 15773492], "10": ["vertical_and_slash", 30, 800, 667496], "11": ["vertical_and_slash", 30, 800, 767325], "12": ["vertical_and_slash", 30, 800, 490706], "13": ["vertical_and_slash", 100, 800, 3083166], "14": ["vertical_and_slash", 100, 800, 14433242], "15": ["vertical_and_slash", 30, 800, 514502], "16": ["vertical_and_slash", 1000, 4096, 4574900], "17": ["vertical_and_slash", 1000, 4096, 1828093], "18": ["vertical_and_slash", 30, 800, 3790483], "19": ["vertical_and_slash", 1000, 4096, 9164424], "20": ["vertical_and_slash", 1000, 4096, 1011346], "21": ["vertical_and_slash", 1000, 4096, 1768867], "22": ["vertical_and_slash", 100, 800, 3253894], "23": ["vertical_and_slash", 1000, 4096, 882663], "24": ["vertical_and_slash", 100, 800, 1974998], "25": ["vertical_and_slash", 500, 700, 1452483], "26": ["vertical_and_slash", 100, 800, 12992816], "27": ["vertical_and_slash", 1000, 4096, 4441511], "28": ["vertical_and_slash", 100, 800, 3146531], "29": ["vertical_and_slash", 1000, 4096, 7002295], "30": ["vertical_and_slash", 100, 800, 7974855], "31": ["vertical_and_slash", 1000, 4096, 2767293]}, {"0": ["vertical_and_slash", 30, 800, 517042], "1": ["vertical_and_slash", 30, 800, 9471250], "2": ["vertical_and_slash", 30, 800, 67128], "3": ["vertical_and_slash", 100, 800, 13225828], "4": ["vertical_and_slash", 1000, 4096, 8138531], "5": ["vertical_and_slash", 30, 800, 169424], "6": ["vertical_and_slash", 30, 800, 165102], "7": ["vertical_and_slash", 1000, 4096, 898000], "8": ["vertical_and_slash", 100, 800, 498306], "9": ["vertical_and_slash", 100, 800, 12016777], "10": ["vertical_and_slash", 100, 800, 13078398], "11": ["vertical_and_slash", 1000, 4096, 569449], "12": ["vertical_and_slash", 1000, 4096, 4419468], "13": ["vertical_and_slash", 100, 800, 2308923], "14": ["vertical_and_slash", 100, 800, 188999], "15": ["vertical_and_slash", 30, 800, 685736], "16": ["vertical_and_slash", 100, 800, 161819], "17": ["vertical_and_slash", 100, 800, 1878966], "18": ["vertical_and_slash", 100, 800, 7840855], "19": ["vertical_and_slash", 30, 800, 207320], "20": ["vertical_and_slash", 100, 800, 2233365], "21": ["vertical_and_slash", 100, 800, 685239], "22": ["vertical_and_slash", 1000, 4096, 1493618], "23": ["vertical_and_slash", 30, 800, 1137958], "24": ["vertical_and_slash", 30, 800, 115113], "25": ["vertical_and_slash", 30, 800, 809754], "26": ["vertical_and_slash", 30, 800, 1328591], "27": ["vertical_and_slash", 30, 800, 697970], "28": ["vertical_and_slash", 1000, 4096, 14409], "29": ["vertical_and_slash", 30, 800, 376399], "30": ["vertical_and_slash", 30, 800, 71599], "31": ["vertical_and_slash", 30, 800, 431162]}, {"0": ["vertical_and_slash", 30, 800, 7073664], "1": ["vertical_and_slash", 100, 800, 4139486], "2": ["vertical_and_slash", 30, 800, 126298], "3": ["vertical_and_slash", 30, 800, 626891], "4": ["vertical_and_slash", 1000, 4096, 244457], "5": ["vertical_and_slash", 30, 800, 338124], "6": ["vertical_and_slash", 100, 800, 4247346], "7": ["vertical_and_slash", 100, 800, 1853876], "8": ["vertical_and_slash", 1000, 4096, 6355420], "9": ["vertical_and_slash", 100, 800, 988264], "10": ["vertical_and_slash", 1000, 4096, 984583], "11": ["vertical_and_slash", 100, 800, 914211], "12": ["vertical_and_slash", 1000, 4096, 570502], "13": ["vertical_and_slash", 1000, 4096, 10187572], "14": ["vertical_and_slash", 1000, 4096, 3408578], "15": ["vertical_and_slash", 1000, 4096, 11375984], "16": ["vertical_and_slash", 100, 800, 5144098], "17": ["vertical_and_slash", 1000, 4096, 350031], "18": ["vertical_and_slash", 1000, 4096, 1299268], "19": ["vertical_and_slash", 1000, 4096, 790117], "20": ["vertical_and_slash", 100, 800, 24094], "21": ["vertical_and_slash", 30, 800, 3856442], "22": ["vertical_and_slash", 100, 800, 383726], "23": ["vertical_and_slash", 500, 700, 832], "24": ["vertical_and_slash", 100, 800, 7717427], "25": ["vertical_and_slash", 1000, 4096, 4545251], "26": ["vertical_and_slash", 30, 800, 7922478], "27": ["vertical_and_slash", 1000, 4096, 2809849], "28": ["vertical_and_slash", 1000, 4096, 4392930], "29": ["vertical_and_slash", 100, 800, 2998060], "30": ["vertical_and_slash", 100, 800, 6173903], "31": ["vertical_and_slash", 1000, 4096, 2536227]}, {"0": ["vertical_and_slash", 30, 800, 1733117], "1": ["vertical_and_slash", 100, 800, 2514524], "2": ["vertical_and_slash", 1000, 4096, 12567570], "3": ["vertical_and_slash", 1000, 4096, 2817534], "4": ["vertical_and_slash", 1000, 4096, 10571712], "5": ["vertical_and_slash", 100, 800, 1311331], "6": ["vertical_and_slash", 30, 800, 4202358], "7": ["vertical_and_slash", 30, 800, 4970102], "8": ["vertical_and_slash", 30, 800, 88687], "9": ["vertical_and_slash", 30, 800, 293880], "10": ["vertical_and_slash", 500, 700, 70693], "11": ["vertical_and_slash", 30, 800, 13849], "12": ["vertical_and_slash", 30, 800, 238706], "13": ["vertical_and_slash", 30, 800, 78435], "14": ["vertical_and_slash", 30, 800, 164251], "15": ["vertical_and_slash", 30, 800, 199789], "16": ["vertical_and_slash", 30, 800, 200684], "17": ["vertical_and_slash", 1000, 4096, 1761919], "18": ["vertical_and_slash", 30, 800, 210071], "19": ["vertical_and_slash", 30, 800, 68554], "20": ["vertical_and_slash", 30, 800, 484345], "21": ["vertical_and_slash", 30, 800, 1489873], "22": ["vertical_and_slash", 30, 800, 301028], "23": ["vertical_and_slash", 30, 800, 1124431], "24": ["vertical_and_slash", 100, 800, 636179], "25": ["vertical_and_slash", 100, 800, 611008], "26": ["vertical_and_slash", 1000, 4096, 1639], "27": ["vertical_and_slash", 1000, 4096, 8255730], "28": ["vertical_and_slash", 1000, 4096, 6678469], "29": ["vertical_and_slash", 1000, 4096, 628985], "30": ["vertical_and_slash", 1000, 4096, 348316], "31": ["vertical_and_slash", 1000, 4096, 2159698]}, {"0": ["vertical_and_slash", 100, 800, 7105558], "1": ["vertical_and_slash", 30, 800, 1085603], "2": ["vertical_and_slash", 1000, 4096, 7896209], "3": ["vertical_and_slash", 30, 800, 193488], "4": ["vertical_and_slash", 100, 800, 1467223], "5": ["vertical_and_slash", 30, 800, 13794329], "6": ["vertical_and_slash", 1000, 4096, 15661583], "7": ["vertical_and_slash", 1000, 4096, 21334871], "8": ["vertical_and_slash", 1000, 4096, 6158120], "9": ["vertical_and_slash", 1000, 4096, 7414022], "10": ["vertical_and_slash", 100, 800, 14091447], "11": ["vertical_and_slash", 1000, 4096, 15589771], "12": ["vertical_and_slash", 1000, 4096, 14632639], "13": ["vertical_and_slash", 100, 800, 1695539], "14": ["vertical_and_slash", 30, 800, 2605978], "15": ["vertical_and_slash", 1000, 4096, 12495330], "16": ["vertical_and_slash", 1000, 4096, 14564586], "17": ["vertical_and_slash", 500, 700, 962969], "18": ["vertical_and_slash", 1000, 4096, 12281016], "19": ["vertical_and_slash", 1000, 4096, 4614742], "20": ["vertical_and_slash", 100, 800, 11940535], "21": ["vertical_and_slash", 100, 800, 2445981], "22": ["vertical_and_slash", 100, 800, 2485005], "23": ["vertical_and_slash", 1000, 4096, 6864324], "24": ["vertical_and_slash", 1000, 4096, 16230551], "25": ["vertical_and_slash", 100, 800, 9358656], "26": ["vertical_and_slash", 100, 800, 14973598], "27": ["vertical_and_slash", 1000, 4096, 14250781], "28": ["vertical_and_slash", 1000, 4096, 18030248], "29": ["vertical_and_slash", 1000, 4096, 20247786], "30": ["vertical_and_slash", 1000, 4096, 12736495], "31": ["vertical_and_slash", 100, 800, 9012943]}, {"0": ["vertical_and_slash", 100, 800, 4792757], "1": ["vertical_and_slash", 100, 800, 5568805], "2": ["vertical_and_slash", 1000, 4096, 12086343], "3": ["vertical_and_slash", 100, 800, 7359182], "4": ["vertical_and_slash", 100, 800, 13719718], "5": ["vertical_and_slash", 1000, 4096, 17051068], "6": ["vertical_and_slash", 100, 800, 15947388], "7": ["vertical_and_slash", 1000, 4096, 9143327], "8": ["vertical_and_slash", 1000, 4096, 21263361], "9": ["vertical_and_slash", 1000, 4096, 17189141], "10": ["vertical_and_slash", 1000, 4096, 7802422], "11": ["vertical_and_slash", 1000, 4096, 18488560], "12": ["vertical_and_slash", 100, 800, 14938800], "13": ["vertical_and_slash", 100, 800, 11012944], "14": ["vertical_and_slash", 1000, 4096, 19104830], "15": ["vertical_and_slash", 3500, 100, 32379], "16": ["vertical_and_slash", 100, 800, 3067742], "17": ["vertical_and_slash", 100, 800, 1977488], "18": ["vertical_and_slash", 1000, 4096, 15351109], "19": ["vertical_and_slash", 30, 800, 1627281], "20": ["vertical_and_slash", 30, 800, 1280991], "21": ["vertical_and_slash", 100, 800, 12133497], "22": ["vertical_and_slash", 1000, 4096, 17870425], "23": ["vertical_and_slash", 30, 800, 4040253], "24": ["vertical_and_slash", 1000, 4096, 6272625], "25": ["vertical_and_slash", 100, 800, 1225145], "26": ["vertical_and_slash", 100, 800, 2746332], "27": ["vertical_and_slash", 100, 800, 4525182], "28": ["vertical_and_slash", 100, 800, 6274770], "29": ["vertical_and_slash", 100, 800, 6919161], "30": ["vertical_and_slash", 100, 800, 3456148], "31": ["vertical_and_slash", 100, 800, 23867]}, {"0": ["vertical_and_slash", 1000, 4096, 7275761], "1": ["vertical_and_slash", 100, 800, 5068315], "2": ["vertical_and_slash", 100, 800, 11162394], "3": ["vertical_and_slash", 100, 800, 3672939], "4": ["vertical_and_slash", 3500, 100, 20894613], "5": ["vertical_and_slash", 1000, 4096, 7938372], "6": ["vertical_and_slash", 100, 800, 12544912], "7": ["vertical_and_slash", 100, 800, 2008695], "8": ["vertical_and_slash", 1000, 4096, 3368310], "9": ["vertical_and_slash", 30, 800, 1508993], "10": ["vertical_and_slash", 1000, 4096, 3495386], "11": ["vertical_and_slash", 3500, 100, 16438193], "12": ["vertical_and_slash", 100, 800, 7069375], "13": ["vertical_and_slash", 100, 800, 10686684], "14": ["vertical_and_slash", 30, 800, 501489], "15": ["vertical_and_slash", 100, 800, 6067001], "16": ["vertical_and_slash", 100, 800, 6935788], "17": ["vertical_and_slash", 1000, 4096, 3300792], "18": ["vertical_and_slash", 100, 800, 7398154], "19": ["vertical_and_slash", 100, 800, 5788636], "20": ["vertical_and_slash", 100, 800, 4456802], "21": ["vertical_and_slash", 100, 800, 2680176], "22": ["vertical_and_slash", 100, 800, 5544567], "23": ["vertical_and_slash", 1000, 4096, 13475356], "24": ["vertical_and_slash", 1000, 4096, 4901727], "25": ["vertical_and_slash", 1000, 4096, 3768996], "26": ["vertical_and_slash", 1000, 4096, 5368869], "27": ["vertical_and_slash", 3500, 100, 14218181], "28": ["vertical_and_slash", 1000, 4096, 13003444], "29": ["vertical_and_slash", 1000, 4096, 5716382], "30": ["vertical_and_slash", 3500, 100, 19916116], "31": ["vertical_and_slash", 1000, 4096, 11776798]}, {"0": ["vertical_and_slash", 100, 800, 13001986], "1": ["vertical_and_slash", 1000, 4096, 7570569], "2": ["vertical_and_slash", 100, 800, 951160], "3": ["vertical_and_slash", 100, 800, 11933179], "4": ["vertical_and_slash", 30, 800, 5365811], "5": ["vertical_and_slash", 100, 800, 10272574], "6": ["vertical_and_slash", 1000, 4096, 6527670], "7": ["vertical_and_slash", 100, 800, 12930014], "8": ["vertical_and_slash", 100, 800, 359537], "9": ["vertical_and_slash", 100, 800, 10654966], "10": ["vertical_and_slash", 100, 800, 1330316], "11": ["vertical_and_slash", 100, 800, 9971156], "12": ["vertical_and_slash", 1000, 4096, 5781478], "13": ["vertical_and_slash", 100, 800, 6032127], "14": ["vertical_and_slash", 100, 800, 1418329], "15": ["vertical_and_slash", 100, 800, 13069922], "16": ["vertical_and_slash", 100, 800, 8547563], "17": ["vertical_and_slash", 100, 800, 970921], "18": ["vertical_and_slash", 1000, 4096, 9256328], "19": ["vertical_and_slash", 1000, 4096, 12447206], "20": ["vertical_and_slash", 100, 800, 153856], "21": ["vertical_and_slash", 100, 800, 8022371], "22": ["vertical_and_slash", 3500, 100, 18626483], "23": ["vertical_and_slash", 100, 800, 3180643], "24": ["vertical_and_slash", 30, 800, 3549186], "25": ["vertical_and_slash", 100, 800, 2600992], "26": ["vertical_and_slash", 3500, 100, 21080570], "27": ["vertical_and_slash", 1000, 4096, 2995096], "28": ["vertical_and_slash", 30, 800, 13324952], "29": ["vertical_and_slash", 100, 800, 7015426], "30": ["vertical_and_slash", 100, 800, 17142326], "31": ["vertical_and_slash", 30, 800, 2059831]}, {"0": ["vertical_and_slash", 100, 800, 336984], "1": ["vertical_and_slash", 1000, 4096, 11908787], "2": ["vertical_and_slash", 1000, 4096, 11465673], "3": ["vertical_and_slash", 1000, 4096, 3870378], "4": ["vertical_and_slash", 1000, 4096, 1000373], "5": ["vertical_and_slash", 1000, 4096, 6450804], "6": ["vertical_and_slash", 1000, 4096, 6602987], "7": ["vertical_and_slash", 1000, 4096, 6552477], "8": ["vertical_and_slash", 30, 800, 8671938], "9": ["vertical_and_slash", 100, 800, 3906764], "10": ["vertical_and_slash", 1000, 4096, 7300294], "11": ["vertical_and_slash", 100, 800, 9068418], "12": ["vertical_and_slash", 100, 800, 5573415], "13": ["vertical_and_slash", 100, 800, 4302354], "14": ["vertical_and_slash", 30, 800, 969401], "15": ["vertical_and_slash", 100, 800, 132492], "16": ["vertical_and_slash", 1000, 4096, 10575265], "17": ["vertical_and_slash", 30, 800, 114557], "18": ["vertical_and_slash", 1000, 4096, 1669778], "19": ["vertical_and_slash", 30, 800, 244697], "20": ["vertical_and_slash", 30, 800, 401989], "21": ["vertical_and_slash", 1000, 4096, 257876], "22": ["vertical_and_slash", 100, 800, 1656276], "23": ["vertical_and_slash", 100, 800, 6627755], "24": ["vertical_and_slash", 100, 800, 17069094], "25": ["vertical_and_slash", 1000, 4096, 17310922], "26": ["vertical_and_slash", 3500, 100, 19238326], "27": ["vertical_and_slash", 100, 800, 10416201], "28": ["vertical_and_slash", 1000, 4096, 9125015], "29": ["vertical_and_slash", 100, 800, 17113558], "30": ["vertical_and_slash", 1000, 4096, 12041930], "31": ["vertical_and_slash", 1000, 4096, 6060396]}, {"0": ["vertical_and_slash", 1000, 4096, 9259982], "1": ["vertical_and_slash", 1000, 4096, 8618567], "2": ["vertical_and_slash", 100, 800, 3876940], "3": ["vertical_and_slash", 1000, 4096, 12767960], "4": ["vertical_and_slash", 1000, 4096, 6112941], "5": ["vertical_and_slash", 1000, 4096, 9851048], "6": ["vertical_and_slash", 1000, 4096, 5763271], "7": ["vertical_and_slash", 1000, 4096, 12744434], "8": ["vertical_and_slash", 100, 800, 12512293], "9": ["vertical_and_slash", 1000, 4096, 2367543], "10": ["vertical_and_slash", 100, 800, 12342103], "11": ["vertical_and_slash", 100, 800, 3126675], "12": ["vertical_and_slash", 1000, 4096, 13617286], "13": ["vertical_and_slash", 1000, 4096, 8094518], "14": ["vertical_and_slash", 1000, 4096, 851614], "15": ["vertical_and_slash", 1000, 4096, 10519480], "16": ["vertical_and_slash", 100, 800, 1706372], "17": ["vertical_and_slash", 100, 800, 248757], "18": ["vertical_and_slash", 100, 800, 4394336], "19": ["vertical_and_slash", 100, 800, 1886529], "20": ["vertical_and_slash", 1000, 4096, 6486541], "21": ["vertical_and_slash", 100, 800, 1175436], "22": ["vertical_and_slash", 100, 800, 7864652], "23": ["vertical_and_slash", 100, 800, 1001917], "24": ["vertical_and_slash", 100, 800, 2494293], "25": ["vertical_and_slash", 1000, 4096, 7698995], "26": ["vertical_and_slash", 100, 800, 2946712], "27": ["vertical_and_slash", 100, 800, 5464103], "28": ["vertical_and_slash", 100, 800, 2608538], "29": ["vertical_and_slash", 100, 800, 1606308], "30": ["vertical_and_slash", 1000, 4096, 5981702], "31": ["vertical_and_slash", 3500, 100, 18590832]}, {"0": ["vertical_and_slash", 100, 800, 4688244], "1": ["vertical_and_slash", 100, 800, 11368272], "2": ["vertical_and_slash", 100, 800, 2558719], "3": ["vertical_and_slash", 1000, 4096, 9536926], "4": ["vertical_and_slash", 1000, 4096, 12315283], "5": ["vertical_and_slash", 1000, 4096, 6272119], "6": ["vertical_and_slash", 1000, 4096, 4450200], "7": ["vertical_and_slash", 100, 800, 5822568], "8": ["vertical_and_slash", 1000, 4096, 13523232], "9": ["vertical_and_slash", 100, 800, 816607], "10": ["vertical_and_slash", 1000, 4096, 15825338], "11": ["vertical_and_slash", 100, 800, 1133867], "12": ["vertical_and_slash", 100, 800, 10722989], "13": ["vertical_and_slash", 100, 800, 2466001], "14": ["vertical_and_slash", 100, 800, 16732584], "15": ["vertical_and_slash", 100, 800, 1052553], "16": ["vertical_and_slash", 100, 800, 8602649], "17": ["vertical_and_slash", 100, 800, 8851217], "18": ["vertical_and_slash", 100, 800, 6104130], "19": ["vertical_and_slash", 1000, 4096, 18459502], "20": ["vertical_and_slash", 100, 800, 8076967], "21": ["vertical_and_slash", 1000, 4096, 4863209], "22": ["vertical_and_slash", 1000, 4096, 8892415], "23": ["vertical_and_slash", 1000, 4096, 9542798], "24": ["vertical_and_slash", 100, 800, 1384183], "25": ["vertical_and_slash", 100, 800, 4035455], "26": ["vertical_and_slash", 100, 800, 536763], "27": ["vertical_and_slash", 1000, 4096, 2058585], "28": ["vertical_and_slash", 100, 800, 4195607], "29": ["vertical_and_slash", 100, 800, 2407136], "30": ["vertical_and_slash", 100, 800, 2106926], "31": ["vertical_and_slash", 100, 800, 3807607]}, {"0": ["vertical_and_slash", 100, 800, 15975096], "1": ["vertical_and_slash", 3500, 100, 20664973], "2": ["vertical_and_slash", 1000, 4096, 943914], "3": ["vertical_and_slash", 100, 800, 14363276], "4": ["vertical_and_slash", 100, 800, 720326], "5": ["vertical_and_slash", 1000, 4096, 7725879], "6": ["vertical_and_slash", 1000, 4096, 11411255], "7": ["vertical_and_slash", 1000, 4096, 9492657], "8": ["vertical_and_slash", 1000, 4096, 16448227], "9": ["vertical_and_slash", 100, 800, 6180918], "10": ["vertical_and_slash", 1000, 4096, 10942342], "11": ["vertical_and_slash", 1000, 4096, 12047657], "12": ["vertical_and_slash", 100, 800, 2376658], "13": ["vertical_and_slash", 1000, 4096, 17780083], "14": ["vertical_and_slash", 1000, 4096, 8548356], "15": ["vertical_and_slash", 100, 800, 4545880], "16": ["vertical_and_slash", 30, 800, 2020350], "17": ["vertical_and_slash", 100, 800, 15875867], "18": ["vertical_and_slash", 30, 800, 661201], "19": ["vertical_and_slash", 1000, 4096, 14915782], "20": ["vertical_and_slash", 100, 800, 4106388], "21": ["vertical_and_slash", 30, 800, 14163451], "22": ["vertical_and_slash", 100, 800, 1759639], "23": ["vertical_and_slash", 1000, 4096, 2391070], "24": ["vertical_and_slash", 100, 800, 10749758], "25": ["vertical_and_slash", 100, 800, 8022438], "26": ["vertical_and_slash", 100, 800, 1013941], "27": ["vertical_and_slash", 100, 800, 3537516], "28": ["vertical_and_slash", 100, 800, 1252545], "29": ["vertical_and_slash", 100, 800, 1155740], "30": ["vertical_and_slash", 1000, 4096, 2590667], "31": ["vertical_and_slash", 100, 800, 3320946]}, {"0": ["vertical_and_slash", 1000, 4096, 8025205], "1": ["vertical_and_slash", 500, 700, 2286667], "2": ["vertical_and_slash", 1000, 4096, 2104863], "3": ["vertical_and_slash", 1000, 4096, 2160060], "4": ["vertical_and_slash", 1000, 4096, 4209178], "5": ["vertical_and_slash", 1000, 4096, 5703899], "6": ["vertical_and_slash", 100, 800, 15566139], "7": ["vertical_and_slash", 500, 700, 464012], "8": ["vertical_and_slash", 1000, 4096, 632556], "9": ["vertical_and_slash", 1000, 4096, 10933130], "10": ["vertical_and_slash", 3500, 100, 6376023], "11": ["vertical_and_slash", 30, 800, 53293], "12": ["vertical_and_slash", 3500, 100, 9195722], "13": ["vertical_and_slash", 100, 800, 130891], "14": ["vertical_and_slash", 100, 800, 1266310], "15": ["vertical_and_slash", 100, 800, 12042893], "16": ["vertical_and_slash", 100, 800, 1440252], "17": ["vertical_and_slash", 100, 800, 5003178], "18": ["vertical_and_slash", 100, 800, 9451180], "19": ["vertical_and_slash", 100, 800, 16518635], "20": ["vertical_and_slash", 1000, 4096, 16574448], "21": ["vertical_and_slash", 100, 800, 10001073], "22": ["vertical_and_slash", 100, 800, 6194150], "23": ["vertical_and_slash", 100, 800, 1990080], "24": ["vertical_and_slash", 100, 800, 14105574], "25": ["vertical_and_slash", 3500, 100, 49578], "26": ["vertical_and_slash", 100, 800, 1368613], "27": ["vertical_and_slash", 100, 800, 882483], "28": ["vertical_and_slash", 100, 800, 200592], "29": ["vertical_and_slash", 100, 800, 4144857], "30": ["vertical_and_slash", 30, 800, 2059620], "31": ["vertical_and_slash", 1000, 4096, 7650136]}, {"0": ["vertical_and_slash", 3500, 100, 20200147], "1": ["vertical_and_slash", 100, 800, 18033672], "2": ["vertical_and_slash", 100, 800, 19227421], "3": ["vertical_and_slash", 1000, 4096, 7658465], "4": ["vertical_and_slash", 100, 800, 4862174], "5": ["vertical_and_slash", 100, 800, 6197824], "6": ["vertical_and_slash", 100, 800, 5687873], "7": ["vertical_and_slash", 100, 800, 13005015], "8": ["vertical_and_slash", 1000, 4096, 6677727], "9": ["vertical_and_slash", 500, 700, 1282697], "10": ["vertical_and_slash", 30, 800, 3148411], "11": ["vertical_and_slash", 500, 700, 8985965], "12": ["vertical_and_slash", 100, 800, 11107850], "13": ["vertical_and_slash", 30, 800, 2077544], "14": ["vertical_and_slash", 1000, 4096, 10030857], "15": ["vertical_and_slash", 100, 800, 1625067], "16": ["vertical_and_slash", 100, 800, 332660], "17": ["vertical_and_slash", 3500, 100, 17539067], "18": ["vertical_and_slash", 500, 700, 97483], "19": ["vertical_and_slash", 30, 800, 10910089], "20": ["vertical_and_slash", 500, 700, 49927], "21": ["vertical_and_slash", 1000, 4096, 2959963], "22": ["vertical_and_slash", 1000, 4096, 1232910], "23": ["vertical_and_slash", 100, 800, 482216], "24": ["vertical_and_slash", 3500, 100, 2789809], "25": ["vertical_and_slash", 3500, 100, 1787013], "26": ["vertical_and_slash", 100, 800, 6121965], "27": ["vertical_and_slash", 100, 800, 10417031], "28": ["vertical_and_slash", 100, 800, 476098], "29": ["vertical_and_slash", 3500, 100, 13019985], "30": ["vertical_and_slash", 100, 800, 15057321], "31": ["vertical_and_slash", 100, 800, 7206530]}, {"0": ["vertical_and_slash", 30, 800, 3863946], "1": ["vertical_and_slash", 3500, 100, 373838], "2": ["vertical_and_slash", 30, 800, 2498107], "3": ["vertical_and_slash", 30, 800, 1774834], "4": ["vertical_and_slash", 30, 800, 13518574], "5": ["vertical_and_slash", 30, 800, 17864279], "6": ["vertical_and_slash", 30, 800, 4971247], "7": ["vertical_and_slash", 30, 800, 15064092], "8": ["vertical_and_slash", 1000, 4096, 173702], "9": ["vertical_and_slash", 100, 800, 2079528], "10": ["vertical_and_slash", 1000, 4096, 1395995], "11": ["vertical_and_slash", 100, 800, 16807189], "12": ["vertical_and_slash", 1000, 4096, 3387818], "13": ["vertical_and_slash", 1000, 4096, 215373], "14": ["vertical_and_slash", 1000, 4096, 7656048], "15": ["vertical_and_slash", 1000, 4096, 3284167], "16": ["vertical_and_slash", 100, 800, 208560], "17": ["vertical_and_slash", 100, 800, 12910224], "18": ["vertical_and_slash", 100, 800, 2482406], "19": ["vertical_and_slash", 100, 800, 591300], "20": ["vertical_and_slash", 500, 700, 2512230], "21": ["vertical_and_slash", 100, 800, 650819], "22": ["vertical_and_slash", 100, 800, 750172], "23": ["vertical_and_slash", 100, 800, 98380], "24": ["vertical_and_slash", 1000, 4096, 12591674], "25": ["vertical_and_slash", 100, 800, 7520129], "26": ["vertical_and_slash", 3500, 100, 19780031], "27": ["vertical_and_slash", 1000, 4096, 11324806], "28": ["vertical_and_slash", 100, 800, 2339301], "29": ["vertical_and_slash", 3500, 100, 20537162], "30": ["vertical_and_slash", 100, 800, 1802458], "31": ["vertical_and_slash", 1000, 4096, 4121953]}, {"0": ["vertical_and_slash", 100, 800, 1406058], "1": ["vertical_and_slash", 30, 800, 20495], "2": ["vertical_and_slash", 100, 800, 265247], "3": ["vertical_and_slash", 30, 800, 6044172], "4": ["vertical_and_slash", 100, 800, 15417162], "5": ["vertical_and_slash", 100, 800, 20101], "6": ["vertical_and_slash", 30, 800, 12443], "7": ["vertical_and_slash", 100, 800, 1029], "8": ["vertical_and_slash", 30, 800, 49334], "9": ["vertical_and_slash", 30, 800, 30976], "10": ["vertical_and_slash", 30, 800, 127540], "11": ["vertical_and_slash", 30, 800, 3597689], "12": ["vertical_and_slash", 30, 800, 32317], "13": ["vertical_and_slash", 30, 800, 202557], "14": ["vertical_and_slash", 30, 800, 531805], "15": ["vertical_and_slash", 30, 800, 606518], "16": ["vertical_and_slash", 30, 800, 1152706], "17": ["vertical_and_slash", 1000, 4096, 5604379], "18": ["vertical_and_slash", 30, 800, 663403], "19": ["vertical_and_slash", 1000, 4096, 11655952], "20": ["vertical_and_slash", 100, 800, 15102172], "21": ["vertical_and_slash", 100, 800, 4674143], "22": ["vertical_and_slash", 500, 700, 1539328], "23": ["vertical_and_slash", 100, 800, 3051857], "24": ["vertical_and_slash", 30, 800, 123576], "25": ["vertical_and_slash", 100, 800, 964667], "26": ["vertical_and_slash", 30, 800, 41505], "27": ["vertical_and_slash", 30, 800, 59560], "28": ["vertical_and_slash", 100, 800, 17208], "29": ["vertical_and_slash", 30, 800, 82626], "30": ["vertical_and_slash", 30, 800, 1815531], "31": ["vertical_and_slash", 100, 800, 2897668]}, {"0": ["vertical_and_slash", 30, 800, 48323], "1": ["vertical_and_slash", 30, 800, 689675], "2": ["vertical_and_slash", 30, 800, 542041], "3": ["vertical_and_slash", 30, 800, 8544], "4": ["vertical_and_slash", 30, 800, 102588], "5": ["vertical_and_slash", 100, 800, 2064154], "6": ["vertical_and_slash", 30, 800, 845227], "7": ["vertical_and_slash", 30, 800, 2922720], "8": ["vertical_and_slash", 1000, 4096, 2932415], "9": ["vertical_and_slash", 1000, 4096, 3062180], "10": ["vertical_and_slash", 100, 800, 485119], "11": ["vertical_and_slash", 30, 800, 215049], "12": ["vertical_and_slash", 100, 800, 387511], "13": ["vertical_and_slash", 100, 800, 1447813], "14": ["vertical_and_slash", 1000, 4096, 3878389], "15": ["vertical_and_slash", 100, 800, 376333], "16": ["vertical_and_slash", 3500, 100, 13506969], "17": ["vertical_and_slash", 100, 800, 12850708], "18": ["vertical_and_slash", 30, 800, 372529], "19": ["vertical_and_slash", 1000, 4096, 3746168], "20": ["vertical_and_slash", 100, 800, 170359], "21": ["vertical_and_slash", 100, 800, 1130785], "22": ["vertical_and_slash", 100, 800, 116224], "23": ["vertical_and_slash", 100, 800, 1001182], "24": ["vertical_and_slash", 100, 800, 335681], "25": ["vertical_and_slash", 100, 800, 3392285], "26": ["vertical_and_slash", 1000, 4096, 4420760], "27": ["vertical_and_slash", 3500, 100, 12258981], "28": ["vertical_and_slash", 500, 700, 1941188], "29": ["vertical_and_slash", 1000, 4096, 7639240], "30": ["vertical_and_slash", 500, 700, 8277346], "31": ["vertical_and_slash", 3500, 100, 3442659]}, {"0": ["vertical_and_slash", 30, 800, 945264], "1": ["vertical_and_slash", 1000, 4096, 3474994], "2": ["vertical_and_slash", 500, 700, 218918], "3": ["vertical_and_slash", 3500, 100, 20221076], "4": ["vertical_and_slash", 3500, 100, 21680113], "5": ["vertical_and_slash", 30, 800, 94866], "6": ["vertical_and_slash", 30, 800, 190907], "7": ["vertical_and_slash", 1000, 4096, 1708889], "8": ["vertical_and_slash", 100, 800, 2832752], "9": ["vertical_and_slash", 1000, 4096, 613061], "10": ["vertical_and_slash", 1000, 4096, 7381575], "11": ["vertical_and_slash", 1000, 4096, 1462120], "12": ["vertical_and_slash", 1000, 4096, 3338671], "13": ["vertical_and_slash", 100, 800, 1664528], "14": ["vertical_and_slash", 500, 700, 143074], "15": ["vertical_and_slash", 30, 800, 433035], "16": ["vertical_and_slash", 500, 700, 210886], "17": ["vertical_and_slash", 100, 800, 8632139], "18": ["vertical_and_slash", 100, 800, 17521811], "19": ["vertical_and_slash", 30, 800, 194306], "20": ["vertical_and_slash", 100, 800, 3156950], "21": ["vertical_and_slash", 100, 800, 2413125], "22": ["vertical_and_slash", 1000, 4096, 10110205], "23": ["vertical_and_slash", 100, 800, 695569], "24": ["vertical_and_slash", 30, 800, 32256], "25": ["vertical_and_slash", 30, 800, 396762], "26": ["vertical_and_slash", 30, 800, 726815], "27": ["vertical_and_slash", 30, 800, 499056], "28": ["vertical_and_slash", 30, 800, 24234], "29": ["vertical_and_slash", 30, 800, 87299], "30": ["vertical_and_slash", 30, 800, 82758], "31": ["vertical_and_slash", 30, 800, 447266]}, {"0": ["vertical_and_slash", 100, 800, 13520320], "1": ["vertical_and_slash", 100, 800, 1746572], "2": ["vertical_and_slash", 100, 800, 81358], "3": ["vertical_and_slash", 100, 800, 53915], "4": ["vertical_and_slash", 100, 800, 16824352], "5": ["vertical_and_slash", 100, 800, 124419], "6": ["vertical_and_slash", 100, 800, 5336412], "7": ["vertical_and_slash", 100, 800, 1005227], "8": ["vertical_and_slash", 1000, 4096, 17919472], "9": ["vertical_and_slash", 100, 800, 5089389], "10": ["vertical_and_slash", 1000, 4096, 2318753], "11": ["vertical_and_slash", 100, 800, 2351529], "12": ["vertical_and_slash", 1000, 4096, 1068220], "13": ["vertical_and_slash", 1000, 4096, 18765314], "14": ["vertical_and_slash", 1000, 4096, 11512280], "15": ["vertical_and_slash", 1000, 4096, 14722530], "16": ["vertical_and_slash", 100, 800, 1542041], "17": ["vertical_and_slash", 3500, 100, 19279869], "18": ["vertical_and_slash", 100, 800, 4711439], "19": ["vertical_and_slash", 3500, 100, 3688560], "20": ["vertical_and_slash", 3500, 100, 224250], "21": ["vertical_and_slash", 100, 800, 10537230], "22": ["vertical_and_slash", 100, 800, 749819], "23": ["vertical_and_slash", 100, 800, 25187], "24": ["vertical_and_slash", 100, 800, 13068183], "25": ["vertical_and_slash", 100, 800, 17508351], "26": ["vertical_and_slash", 100, 800, 12981109], "27": ["vertical_and_slash", 100, 800, 15314279], "28": ["vertical_and_slash", 100, 800, 15558838], "29": ["vertical_and_slash", 100, 800, 3774507], "30": ["vertical_and_slash", 100, 800, 6486179], "31": ["vertical_and_slash", 100, 800, 15420283]}, {"0": ["vertical_and_slash", 100, 800, 1793383], "1": ["vertical_and_slash", 100, 800, 8103093], "2": ["vertical_and_slash", 1000, 4096, 12596743], "3": ["vertical_and_slash", 1000, 4096, 5012316], "4": ["vertical_and_slash", 1000, 4096, 12870742], "5": ["vertical_and_slash", 100, 800, 3459141], "6": ["vertical_and_slash", 30, 800, 10224901], "7": ["vertical_and_slash", 100, 800, 3753981], "8": ["vertical_and_slash", 30, 800, 140040], "9": ["vertical_and_slash", 30, 800, 550671], "10": ["vertical_and_slash", 100, 800, 94454], "11": ["vertical_and_slash", 30, 800, 8909], "12": ["vertical_and_slash", 30, 800, 152077], "13": ["vertical_and_slash", 30, 800, 49171], "14": ["vertical_and_slash", 30, 800, 107813], "15": ["vertical_and_slash", 30, 800, 128764], "16": ["vertical_and_slash", 30, 800, 617322], "17": ["vertical_and_slash", 1000, 4096, 6019612], "18": ["vertical_and_slash", 100, 800, 766582], "19": ["vertical_and_slash", 30, 800, 52503], "20": ["vertical_and_slash", 30, 800, 300294], "21": ["vertical_and_slash", 30, 800, 1577098], "22": ["vertical_and_slash", 100, 800, 838126], "23": ["vertical_and_slash", 100, 800, 1218912], "24": ["vertical_and_slash", 100, 800, 1720664], "25": ["vertical_and_slash", 100, 800, 1377743], "26": ["vertical_and_slash", 1000, 4096, 900287], "27": ["vertical_and_slash", 1000, 4096, 12066126], "28": ["vertical_and_slash", 1000, 4096, 14264762], "29": ["vertical_and_slash", 1000, 4096, 71284], "30": ["vertical_and_slash", 1000, 4096, 3218291], "31": ["vertical_and_slash", 1000, 4096, 13215387]}, {"0": ["vertical_and_slash", 100, 800, 18645971], "1": ["vertical_and_slash", 30, 800, 587932], "2": ["vertical_and_slash", 1000, 4096, 10538505], "3": ["vertical_and_slash", 30, 800, 158559], "4": ["vertical_and_slash", 100, 800, 3376593], "5": ["vertical_and_slash", 100, 800, 18383338], "6": ["vertical_and_slash", 1000, 4096, 10074810], "7": ["vertical_and_slash", 1000, 4096, 19347044], "8": ["vertical_and_slash", 1000, 4096, 6794450], "9": ["vertical_and_slash", 1000, 4096, 3529136], "10": ["vertical_and_slash", 1000, 4096, 6952639], "11": ["vertical_and_slash", 1000, 4096, 9362393], "12": ["vertical_and_slash", 1000, 4096, 5368732], "13": ["vertical_and_slash", 100, 800, 705065], "14": ["vertical_and_slash", 100, 800, 628184], "15": ["vertical_and_slash", 1000, 4096, 7575979], "16": ["vertical_and_slash", 1000, 4096, 14825324], "17": ["vertical_and_slash", 100, 800, 584190], "18": ["vertical_and_slash", 1000, 4096, 14770220], "19": ["vertical_and_slash", 100, 800, 7324628], "20": ["vertical_and_slash", 100, 800, 13439080], "21": ["vertical_and_slash", 100, 800, 2173728], "22": ["vertical_and_slash", 100, 800, 1300676], "23": ["vertical_and_slash", 3500, 100, 20507565], "24": ["vertical_and_slash", 3500, 100, 20826931], "25": ["vertical_and_slash", 100, 800, 16503925], "26": ["vertical_and_slash", 3500, 100, 20607984], "27": ["vertical_and_slash", 1000, 4096, 9100775], "28": ["vertical_and_slash", 3500, 100, 20540180], "29": ["vertical_and_slash", 1000, 4096, 19978707], "30": ["vertical_and_slash", 100, 800, 18084829], "31": ["vertical_and_slash", 100, 800, 15584755]}, {"0": ["vertical_and_slash", 100, 800, 14519032], "1": ["vertical_and_slash", 100, 800, 13637880], "2": ["vertical_and_slash", 3500, 100, 19712241], "3": ["vertical_and_slash", 100, 800, 14417159], "4": ["vertical_and_slash", 100, 800, 18931772], "5": ["vertical_and_slash", 3500, 100, 20278735], "6": ["vertical_and_slash", 100, 800, 21000177], "7": ["vertical_and_slash", 3500, 100, 20181815], "8": ["vertical_and_slash", 1000, 4096, 20667264], "9": ["vertical_and_slash", 1000, 4096, 13546806], "10": ["vertical_and_slash", 1000, 4096, 8056555], "11": ["vertical_and_slash", 1000, 4096, 14544259], "12": ["vertical_and_slash", 3500, 100, 14988539], "13": ["vertical_and_slash", 100, 800, 9925552], "14": ["vertical_and_slash", 1000, 4096, 16502140], "15": ["vertical_and_slash", 3500, 100, 1394], "16": ["vertical_and_slash", 100, 800, 6786191], "17": ["vertical_and_slash", 100, 800, 5142369], "18": ["vertical_and_slash", 1000, 4096, 18139060], "19": ["vertical_and_slash", 100, 800, 1817633], "20": ["vertical_and_slash", 100, 800, 1586931], "21": ["vertical_and_slash", 1000, 4096, 2981991], "22": ["vertical_and_slash", 1000, 4096, 19814245], "23": ["vertical_and_slash", 100, 800, 3823591], "24": ["vertical_and_slash", 1000, 4096, 11968181], "25": ["vertical_and_slash", 100, 800, 4245870], "26": ["vertical_and_slash", 100, 800, 6065658], "27": ["vertical_and_slash", 100, 800, 17099315], "28": ["vertical_and_slash", 100, 800, 14002976], "29": ["vertical_and_slash", 100, 800, 15062395], "30": ["vertical_and_slash", 3500, 100, 9832421], "31": ["vertical_and_slash", 100, 800, 329163]}, {"0": ["vertical_and_slash", 100, 800, 17881284], "1": ["vertical_and_slash", 100, 800, 6096065], "2": ["vertical_and_slash", 100, 800, 19512309], "3": ["vertical_and_slash", 100, 800, 1361094], "4": ["vertical_and_slash", 3500, 100, 21385650], "5": ["vertical_and_slash", 100, 800, 14152330], "6": ["vertical_and_slash", 100, 800, 15379238], "7": ["vertical_and_slash", 100, 800, 936209], "8": ["vertical_and_slash", 3500, 100, 7644919], "9": ["vertical_and_slash", 100, 800, 162434], "10": ["vertical_and_slash", 100, 800, 11548456], "11": ["vertical_and_slash", 100, 800, 11141282], "12": ["vertical_and_slash", 3500, 100, 6011727], "13": ["vertical_and_slash", 100, 800, 16026110], "14": ["vertical_and_slash", 100, 800, 466578], "15": ["vertical_and_slash", 100, 800, 4799040], "16": ["vertical_and_slash", 100, 800, 15252019], "17": ["vertical_and_slash", 1000, 4096, 7350605], "18": ["vertical_and_slash", 100, 800, 16896477], "19": ["vertical_and_slash", 1000, 4096, 5715502], "20": ["vertical_and_slash", 100, 800, 9885275], "21": ["vertical_and_slash", 100, 800, 8062274], "22": ["vertical_and_slash", 100, 800, 11341966], "23": ["vertical_and_slash", 3500, 100, 21639689], "24": ["vertical_and_slash", 1000, 4096, 7313536], "25": ["vertical_and_slash", 1000, 4096, 1858640], "26": ["vertical_and_slash", 100, 800, 17665215], "27": ["vertical_and_slash", 100, 800, 13827567], "28": ["vertical_and_slash", 1000, 4096, 16279088], "29": ["vertical_and_slash", 1000, 4096, 2728376], "30": ["vertical_and_slash", 1000, 4096, 20378804], "31": ["vertical_and_slash", 1000, 4096, 11218561]}, {"0": ["vertical_and_slash", 100, 800, 10702989], "1": ["vertical_and_slash", 100, 800, 13911357], "2": ["vertical_and_slash", 100, 800, 2089505], "3": ["vertical_and_slash", 100, 800, 5795130], "4": ["vertical_and_slash", 100, 800, 6198580], "5": ["vertical_and_slash", 100, 800, 11025874], "6": ["vertical_and_slash", 1000, 4096, 4765707], "7": ["vertical_and_slash", 100, 800, 9275261], "8": ["vertical_and_slash", 100, 800, 356772], "9": ["vertical_and_slash", 100, 800, 6507763], "10": ["vertical_and_slash", 100, 800, 1057022], "11": ["vertical_and_slash", 100, 800, 16390639], "12": ["vertical_and_slash", 1000, 4096, 6504148], "13": ["vertical_and_slash", 100, 800, 5815163], "14": ["vertical_and_slash", 100, 800, 781258], "15": ["vertical_and_slash", 1000, 4096, 5306413], "16": ["vertical_and_slash", 100, 800, 7571947], "17": ["vertical_and_slash", 100, 800, 2246584], "18": ["vertical_and_slash", 1000, 4096, 6370179], "19": ["vertical_and_slash", 1000, 4096, 16329738], "20": ["vertical_and_slash", 100, 800, 810202], "21": ["vertical_and_slash", 100, 800, 9614219], "22": ["vertical_and_slash", 3500, 100, 21023608], "23": ["vertical_and_slash", 100, 800, 3697853], "24": ["vertical_and_slash", 500, 700, 623385], "25": ["vertical_and_slash", 100, 800, 2872545], "26": ["vertical_and_slash", 3500, 100, 21443890], "27": ["vertical_and_slash", 1000, 4096, 964593], "28": ["vertical_and_slash", 1000, 4096, 6046647], "29": ["vertical_and_slash", 1000, 4096, 3390663], "30": ["vertical_and_slash", 3500, 100, 21396110], "31": ["vertical_and_slash", 500, 700, 1185821]}, {"0": ["vertical_and_slash", 100, 800, 929038], "1": ["vertical_and_slash", 1000, 4096, 11917459], "2": ["vertical_and_slash", 1000, 4096, 11189817], "3": ["vertical_and_slash", 1000, 4096, 5290948], "4": ["vertical_and_slash", 100, 800, 2444153], "5": ["vertical_and_slash", 1000, 4096, 7367448], "6": ["vertical_and_slash", 1000, 4096, 3929914], "7": ["vertical_and_slash", 1000, 4096, 2907293], "8": ["vertical_and_slash", 30, 800, 8631190], "9": ["vertical_and_slash", 100, 800, 7657567], "10": ["vertical_and_slash", 1000, 4096, 5754225], "11": ["vertical_and_slash", 100, 800, 16484372], "12": ["vertical_and_slash", 100, 800, 7369987], "13": ["vertical_and_slash", 100, 800, 3365312], "14": ["vertical_and_slash", 30, 800, 461151], "15": ["vertical_and_slash", 500, 700, 315608], "16": ["vertical_and_slash", 1000, 4096, 16240364], "17": ["vertical_and_slash", 100, 800, 253597], "18": ["vertical_and_slash", 1000, 4096, 925109], "19": ["vertical_and_slash", 100, 800, 133339], "20": ["vertical_and_slash", 100, 800, 578256], "21": ["vertical_and_slash", 1000, 4096, 1817521], "22": ["vertical_and_slash", 3500, 100, 4918245], "23": ["vertical_and_slash", 1000, 4096, 114317], "24": ["vertical_and_slash", 3500, 100, 20949654], "25": ["vertical_and_slash", 3500, 100, 21380515], "26": ["vertical_and_slash", 1000, 4096, 20796309], "27": ["vertical_and_slash", 100, 800, 11897642], "28": ["vertical_and_slash", 1000, 4096, 17534343], "29": ["vertical_and_slash", 1000, 4096, 20051889], "30": ["vertical_and_slash", 1000, 4096, 20184777], "31": ["vertical_and_slash", 3500, 100, 20262011]}, {"0": ["vertical_and_slash", 1000, 4096, 8179346], "1": ["vertical_and_slash", 1000, 4096, 2423899], "2": ["vertical_and_slash", 100, 800, 13818895], "3": ["vertical_and_slash", 1000, 4096, 6522601], "4": ["vertical_and_slash", 1000, 4096, 1060263], "5": ["vertical_and_slash", 1000, 4096, 4157137], "6": ["vertical_and_slash", 1000, 4096, 6990380], "7": ["vertical_and_slash", 1000, 4096, 10763715], "8": ["vertical_and_slash", 100, 800, 10123257], "9": ["vertical_and_slash", 1000, 4096, 9156840], "10": ["vertical_and_slash", 1000, 4096, 16029616], "11": ["vertical_and_slash", 100, 800, 1673944], "12": ["vertical_and_slash", 1000, 4096, 15001358], "13": ["vertical_and_slash", 1000, 4096, 11496585], "14": ["vertical_and_slash", 100, 800, 9006039], "15": ["vertical_and_slash", 1000, 4096, 13032008], "16": ["vertical_and_slash", 100, 800, 4813070], "17": ["vertical_and_slash", 100, 800, 1475285], "18": ["vertical_and_slash", 100, 800, 8000337], "19": ["vertical_and_slash", 100, 800, 8837856], "20": ["vertical_and_slash", 1000, 4096, 16977677], "21": ["vertical_and_slash", 100, 800, 4416649], "22": ["vertical_and_slash", 100, 800, 17025902], "23": ["vertical_and_slash", 100, 800, 602195], "24": ["vertical_and_slash", 3500, 100, 5765045], "25": ["vertical_and_slash", 100, 800, 13009069], "26": ["vertical_and_slash", 100, 800, 3523767], "27": ["vertical_and_slash", 100, 800, 6546733], "28": ["vertical_and_slash", 3500, 100, 3452012], "29": ["vertical_and_slash", 100, 800, 1510491], "30": ["vertical_and_slash", 3500, 100, 17227596], "31": ["vertical_and_slash", 3500, 100, 19660969]}, {"0": ["vertical_and_slash", 3500, 100, 6623789], "1": ["vertical_and_slash", 3500, 100, 3902994], "2": ["vertical_and_slash", 3500, 100, 6994928], "3": ["vertical_and_slash", 1000, 4096, 5149770], "4": ["vertical_and_slash", 3500, 100, 14836158], "5": ["vertical_and_slash", 100, 800, 17515427], "6": ["vertical_and_slash", 3500, 100, 7911558], "7": ["vertical_and_slash", 3500, 100, 9338861], "8": ["vertical_and_slash", 3500, 100, 14090410], "9": ["vertical_and_slash", 100, 800, 2492955], "10": ["vertical_and_slash", 3500, 100, 21732500], "11": ["vertical_and_slash", 100, 800, 2898121], "12": ["vertical_and_slash", 3500, 100, 10852444], "13": ["vertical_and_slash", 100, 800, 1940039], "14": ["vertical_and_slash", 3500, 100, 16338195], "15": ["vertical_and_slash", 100, 800, 2006495], "16": ["vertical_and_slash", 3500, 100, 10259390], "17": ["vertical_and_slash", 100, 800, 4065419], "18": ["vertical_and_slash", 100, 800, 12733273], "19": ["vertical_and_slash", 1000, 4096, 11751394], "20": ["vertical_and_slash", 100, 800, 15251186], "21": ["vertical_and_slash", 1000, 4096, 12287035], "22": ["vertical_and_slash", 1000, 4096, 5114508], "23": ["vertical_and_slash", 1000, 4096, 13162100], "24": ["vertical_and_slash", 100, 800, 8000122], "25": ["vertical_and_slash", 100, 800, 9281634], "26": ["vertical_and_slash", 100, 800, 1846488], "27": ["vertical_and_slash", 3500, 100, 8590692], "28": ["vertical_and_slash", 100, 800, 8643063], "29": ["vertical_and_slash", 100, 800, 5758817], "30": ["vertical_and_slash", 100, 800, 5877183], "31": ["vertical_and_slash", 100, 800, 7796906]}, {"0": ["vertical_and_slash", 100, 800, 20597532], "1": ["vertical_and_slash", 3500, 100, 21758452], "2": ["vertical_and_slash", 1000, 4096, 4144141], "3": ["vertical_and_slash", 100, 800, 20261887], "4": ["vertical_and_slash", 1000, 4096, 2512370], "5": ["vertical_and_slash", 3500, 100, 17706009], "6": ["vertical_and_slash", 1000, 4096, 19693735], "7": ["vertical_and_slash", 1000, 4096, 12879585], "8": ["vertical_and_slash", 3500, 100, 18330550], "9": ["vertical_and_slash", 1000, 4096, 395315], "10": ["vertical_and_slash", 100, 800, 12936460], "11": ["vertical_and_slash", 3500, 100, 20489362], "12": ["vertical_and_slash", 100, 800, 2920447], "13": ["vertical_and_slash", 3500, 100, 19704987], "14": ["vertical_and_slash", 3500, 100, 19332279], "15": ["vertical_and_slash", 100, 800, 8771256], "16": ["vertical_and_slash", 100, 800, 5611994], "17": ["vertical_and_slash", 100, 800, 16087138], "18": ["vertical_and_slash", 500, 700, 891236], "19": ["vertical_and_slash", 3500, 100, 21427139], "20": ["vertical_and_slash", 100, 800, 1823410], "21": ["vertical_and_slash", 30, 800, 15408418], "22": ["vertical_and_slash", 500, 700, 9266226], "23": ["vertical_and_slash", 3500, 100, 17195724], "24": ["vertical_and_slash", 1000, 4096, 7809063], "25": ["vertical_and_slash", 100, 800, 14083150], "26": ["vertical_and_slash", 100, 800, 4139113], "27": ["vertical_and_slash", 100, 800, 10706318], "28": ["vertical_and_slash", 1000, 4096, 1105380], "29": ["vertical_and_slash", 100, 800, 3630717], "30": ["vertical_and_slash", 1000, 4096, 10664933], "31": ["vertical_and_slash", 100, 800, 9143007]}, {"0": ["vertical_and_slash", 1000, 4096, 301018], "1": ["vertical_and_slash", 3500, 100, 1784828], "2": ["vertical_and_slash", 3500, 100, 7055406], "3": ["vertical_and_slash", 3500, 100, 2086934], "4": ["vertical_and_slash", 1000, 4096, 4101320], "5": ["vertical_and_slash", 1000, 4096, 1042376], "6": ["vertical_and_slash", 3500, 100, 16976048], "7": ["vertical_and_slash", 500, 700, 1459641], "8": ["vertical_and_slash", 3500, 100, 1180323], "9": ["vertical_and_slash", 3500, 100, 21763195], "10": ["vertical_and_slash", 3500, 100, 5825008], "11": ["vertical_and_slash", 100, 800, 53453], "12": ["vertical_and_slash", 3500, 100, 11794796], "13": ["vertical_and_slash", 3500, 100, 1783957], "14": ["vertical_and_slash", 100, 800, 1440345], "15": ["vertical_and_slash", 100, 800, 16828397], "16": ["vertical_and_slash", 100, 800, 2469338], "17": ["vertical_and_slash", 100, 800, 4665593], "18": ["vertical_and_slash", 3500, 100, 10580848], "19": ["vertical_and_slash", 3500, 100, 19252331], "20": ["vertical_and_slash", 3500, 100, 20024825], "21": ["vertical_and_slash", 100, 800, 14850871], "22": ["vertical_and_slash", 3500, 100, 12678003], "23": ["vertical_and_slash", 100, 800, 1782447], "24": ["vertical_and_slash", 1000, 4096, 13287971], "25": ["vertical_and_slash", 3500, 100, 1097488], "26": ["vertical_and_slash", 1000, 4096, 2633009], "27": ["vertical_and_slash", 3500, 100, 1055757], "28": ["vertical_and_slash", 3500, 100, 742496], "29": ["vertical_and_slash", 1000, 4096, 4194904], "30": ["vertical_and_slash", 3500, 100, 1577446], "31": ["vertical_and_slash", 1000, 4096, 10526781]}, {"0": ["vertical_and_slash", 1000, 4096, 12079479], "1": ["vertical_and_slash", 3500, 100, 19962962], "2": ["vertical_and_slash", 1000, 4096, 12450062], "3": ["vertical_and_slash", 1000, 4096, 10400447], "4": ["vertical_and_slash", 100, 800, 11323650], "5": ["vertical_and_slash", 1000, 4096, 4102038], "6": ["vertical_and_slash", 1000, 4096, 3338557], "7": ["vertical_and_slash", 3500, 100, 9984816], "8": ["vertical_and_slash", 100, 800, 14524592], "9": ["vertical_and_slash", 100, 800, 2065326], "10": ["vertical_and_slash", 30, 800, 4596708], "11": ["vertical_and_slash", 500, 700, 10708236], "12": ["vertical_and_slash", 500, 700, 13397191], "13": ["vertical_and_slash", 500, 700, 1011260], "14": ["vertical_and_slash", 1000, 4096, 13165340], "15": ["vertical_and_slash", 1000, 4096, 825692], "16": ["vertical_and_slash", 3500, 100, 2810461], "17": ["vertical_and_slash", 3500, 100, 19569698], "18": ["vertical_and_slash", 3500, 100, 2251981], "19": ["vertical_and_slash", 500, 700, 5559642], "20": ["vertical_and_slash", 3500, 100, 1522515], "21": ["vertical_and_slash", 1000, 4096, 982286], "22": ["vertical_and_slash", 1000, 4096, 2085881], "23": ["vertical_and_slash", 100, 800, 2055023], "24": ["vertical_and_slash", 1000, 4096, 1242380], "25": ["vertical_and_slash", 3500, 100, 1869920], "26": ["vertical_and_slash", 3500, 100, 12180284], "27": ["vertical_and_slash", 3500, 100, 14622044], "28": ["vertical_and_slash", 1000, 4096, 557560], "29": ["vertical_and_slash", 1000, 4096, 6987039], "30": ["vertical_and_slash", 100, 800, 15769951], "31": ["vertical_and_slash", 100, 800, 7721569]}, {"0": ["vertical_and_slash", 500, 700, 4382254], "1": ["vertical_and_slash", 3500, 100, 84219], "2": ["vertical_and_slash", 500, 700, 4734463], "3": ["vertical_and_slash", 500, 700, 3186548], "4": ["vertical_and_slash", 1000, 4096, 4063246], "5": ["vertical_and_slash", 1000, 4096, 12708225], "6": ["vertical_and_slash", 500, 700, 7742943], "7": ["vertical_and_slash", 100, 800, 15424159], "8": ["vertical_and_slash", 1000, 4096, 6301506], "9": ["vertical_and_slash", 1000, 4096, 2079847], "10": ["vertical_and_slash", 1000, 4096, 4217027], "11": ["vertical_and_slash", 1000, 4096, 6297884], "12": ["vertical_and_slash", 3500, 100, 4824003], "13": ["vertical_and_slash", 1000, 4096, 3960801], "14": ["vertical_and_slash", 1000, 4096, 10405673], "15": ["vertical_and_slash", 1000, 4096, 8272702], "16": ["vertical_and_slash", 3500, 100, 2874719], "17": ["vertical_and_slash", 1000, 4096, 13248253], "18": ["vertical_and_slash", 3500, 100, 16731069], "19": ["vertical_and_slash", 1000, 4096, 3488474], "20": ["vertical_and_slash", 3500, 100, 4911794], "21": ["vertical_and_slash", 3500, 100, 3300649], "22": ["vertical_and_slash", 3500, 100, 2239972], "23": ["vertical_and_slash", 1000, 4096, 847410], "24": ["vertical_and_slash", 1000, 4096, 12556756], "25": ["vertical_and_slash", 3500, 100, 10893823], "26": ["vertical_and_slash", 1000, 4096, 14168165], "27": ["vertical_and_slash", 1000, 4096, 14127548], "28": ["vertical_and_slash", 1000, 4096, 5277617], "29": ["vertical_and_slash", 1000, 4096, 16652651], "30": ["vertical_and_slash", 1000, 4096, 7991739], "31": ["vertical_and_slash", 3500, 100, 16136482]}, {"0": ["vertical_and_slash", 100, 800, 3776409], "1": ["vertical_and_slash", 100, 800, 3972530], "2": ["vertical_and_slash", 100, 800, 10166976], "3": ["vertical_and_slash", 100, 800, 13449519], "4": ["vertical_and_slash", 30, 800, 4621777], "5": ["vertical_and_slash", 30, 800, 17026761], "6": ["vertical_and_slash", 30, 800, 11401344], "7": ["vertical_and_slash", 100, 800, 3178997], "8": ["vertical_and_slash", 1000, 4096, 14919677], "9": ["vertical_and_slash", 100, 800, 13489170], "10": ["vertical_and_slash", 1000, 4096, 12483196], "11": ["vertical_and_slash", 1000, 4096, 18647183], "12": ["vertical_and_slash", 1000, 4096, 18488628], "13": ["vertical_and_slash", 3500, 100, 18285318], "14": ["vertical_and_slash", 3500, 100, 19771087], "15": ["vertical_and_slash", 100, 800, 11952058], "16": ["vertical_and_slash", 1000, 4096, 671303], "17": ["vertical_and_slash", 3500, 100, 20413410], "18": ["vertical_and_slash", 1000, 4096, 693843], "19": ["vertical_and_slash", 3500, 100, 20183012], "20": ["vertical_and_slash", 3500, 100, 4751982], "21": ["vertical_and_slash", 1000, 4096, 1190840], "22": ["vertical_and_slash", 3500, 100, 8189368], "23": ["vertical_and_slash", 3500, 100, 4191516], "24": ["vertical_and_slash", 100, 800, 9072597], "25": ["vertical_and_slash", 3500, 100, 6214053], "26": ["vertical_and_slash", 1000, 4096, 8848124], "27": ["vertical_and_slash", 3500, 100, 1231805], "28": ["vertical_and_slash", 3500, 100, 3468573], "29": ["vertical_and_slash", 3500, 100, 16841594], "30": ["vertical_and_slash", 3500, 100, 12565098], "31": ["vertical_and_slash", 3500, 100, 4308210]}, {"0": ["vertical_and_slash", 100, 800, 405030], "1": ["vertical_and_slash", 3500, 100, 12737242], "2": ["vertical_and_slash", 1000, 4096, 6996254], "3": ["vertical_and_slash", 3500, 100, 4831216], "4": ["vertical_and_slash", 3500, 100, 5890590], "5": ["vertical_and_slash", 1000, 4096, 3008671], "6": ["vertical_and_slash", 1000, 4096, 4998230], "7": ["vertical_and_slash", 1000, 4096, 6509194], "8": ["vertical_and_slash", 3500, 100, 1774041], "9": ["vertical_and_slash", 3500, 100, 1372562], "10": ["vertical_and_slash", 3500, 100, 9111804], "11": ["vertical_and_slash", 1000, 4096, 1109182], "12": ["vertical_and_slash", 100, 800, 371771], "13": ["vertical_and_slash", 3500, 100, 905824], "14": ["vertical_and_slash", 1000, 4096, 4934535], "15": ["vertical_and_slash", 1000, 4096, 2841896], "16": ["vertical_and_slash", 3500, 100, 4614245], "17": ["vertical_and_slash", 3500, 100, 6900617], "18": ["vertical_and_slash", 3500, 100, 2824788], "19": ["vertical_and_slash", 100, 800, 6589423], "20": ["vertical_and_slash", 500, 700, 6357101], "21": ["vertical_and_slash", 30, 800, 5731632], "22": ["vertical_and_slash", 30, 800, 7261064], "23": ["vertical_and_slash", 500, 700, 9172114], "24": ["vertical_and_slash", 1000, 4096, 210349], "25": ["vertical_and_slash", 1000, 4096, 4526369], "26": ["vertical_and_slash", 1000, 4096, 2326769], "27": ["vertical_and_slash", 3500, 100, 5989844], "28": ["vertical_and_slash", 3500, 100, 1393004], "29": ["vertical_and_slash", 3500, 100, 2114704], "30": ["vertical_and_slash", 3500, 100, 776564], "31": ["vertical_and_slash", 3500, 100, 2826514]}, {"0": ["vertical_and_slash", 1000, 4096, 4747927], "1": ["vertical_and_slash", 3500, 100, 14468785], "2": ["vertical_and_slash", 3500, 100, 10124003], "3": ["vertical_and_slash", 3500, 100, 6702061], "4": ["vertical_and_slash", 1000, 4096, 2311190], "5": ["vertical_and_slash", 1000, 4096, 2412642], "6": ["vertical_and_slash", 1000, 4096, 2782532], "7": ["vertical_and_slash", 3500, 100, 6699063], "8": ["vertical_and_slash", 100, 800, 10899273], "9": ["vertical_and_slash", 100, 800, 571205], "10": ["vertical_and_slash", 1000, 4096, 2224039], "11": ["vertical_and_slash", 3500, 100, 5206481], "12": ["vertical_and_slash", 100, 800, 6039530], "13": ["vertical_and_slash", 3500, 100, 6121024], "14": ["vertical_and_slash", 1000, 4096, 915849], "15": ["vertical_and_slash", 3500, 100, 4393793], "16": ["vertical_and_slash", 1000, 4096, 4168491], "17": ["vertical_and_slash", 3500, 100, 5568206], "18": ["vertical_and_slash", 1000, 4096, 1087118], "19": ["vertical_and_slash", 1000, 4096, 2691708], "20": ["vertical_and_slash", 3500, 100, 4351677], "21": ["vertical_and_slash", 3500, 100, 3933999], "22": ["vertical_and_slash", 3500, 100, 3997663], "23": ["vertical_and_slash", 3500, 100, 3522236], "24": ["vertical_and_slash", 3500, 100, 9956224], "25": ["vertical_and_slash", 3500, 100, 4192895], "26": ["vertical_and_slash", 3500, 100, 9150842], "27": ["vertical_and_slash", 3500, 100, 12754903], "28": ["vertical_and_slash", 3500, 100, 7346979], "29": ["vertical_and_slash", 100, 800, 9422285], "30": ["vertical_and_slash", 100, 800, 3140769], "31": ["vertical_and_slash", 500, 700, 2415994]}, {"0": ["vertical_and_slash", 3500, 100, 4352921], "1": ["vertical_and_slash", 1000, 4096, 3398326], "2": ["vertical_and_slash", 3500, 100, 5788760], "3": ["vertical_and_slash", 1000, 4096, 2945608], "4": ["vertical_and_slash", 3500, 100, 1988612], "5": ["vertical_and_slash", 1000, 4096, 3736165], "6": ["vertical_and_slash", 1000, 4096, 9670660], "7": ["vertical_and_slash", 3500, 100, 3803388], "8": ["vertical_and_slash", 3500, 100, 3612542], "9": ["vertical_and_slash", 3500, 100, 4948698], "10": ["vertical_and_slash", 3500, 100, 4880140], "11": ["vertical_and_slash", 3500, 100, 2083345], "12": ["vertical_and_slash", 3500, 100, 4683160], "13": ["vertical_and_slash", 3500, 100, 3650326], "14": ["vertical_and_slash", 3500, 100, 1071456], "15": ["vertical_and_slash", 1000, 4096, 3490570], "16": ["vertical_and_slash", 1000, 4096, 1082160], "17": ["vertical_and_slash", 3500, 100, 6888781], "18": ["vertical_and_slash", 1000, 4096, 2664476], "19": ["vertical_and_slash", 3500, 100, 2759933], "20": ["vertical_and_slash", 100, 800, 653736], "21": ["vertical_and_slash", 3500, 100, 9517662], "22": ["vertical_and_slash", 3500, 100, 3973048], "23": ["vertical_and_slash", 3500, 100, 5761264], "24": ["vertical_and_slash", 3500, 100, 13615692], "25": ["vertical_and_slash", 1000, 4096, 5235320], "26": ["vertical_and_slash", 3500, 100, 10009513], "27": ["vertical_and_slash", 1000, 4096, 2682717], "28": ["vertical_and_slash", 3500, 100, 11382630], "29": ["vertical_and_slash", 3500, 100, 3802301], "30": ["vertical_and_slash", 1000, 4096, 3025864], "31": ["vertical_and_slash", 1000, 4096, 1725752]}, {"0": ["vertical_and_slash", 1000, 4096, 12877084], "1": ["vertical_and_slash", 1000, 4096, 11642564], "2": ["vertical_and_slash", 1000, 4096, 10978654], "3": ["vertical_and_slash", 3500, 100, 14674762], "4": ["vertical_and_slash", 1000, 4096, 8335239], "5": ["vertical_and_slash", 1000, 4096, 11808042], "6": ["vertical_and_slash", 1000, 4096, 10213550], "7": ["vertical_and_slash", 3500, 100, 14957853], "8": ["vertical_and_slash", 500, 700, 19867441], "9": ["vertical_and_slash", 100, 800, 10566603], "10": ["vertical_and_slash", 3500, 100, 19670449], "11": ["vertical_and_slash", 1000, 4096, 12608408], "12": ["vertical_and_slash", 3500, 100, 19432490], "13": ["vertical_and_slash", 3500, 100, 21127812], "14": ["vertical_and_slash", 3500, 100, 16648204], "15": ["vertical_and_slash", 1000, 4096, 10819630], "16": ["vertical_and_slash", 3500, 100, 5741199], "17": ["vertical_and_slash", 3500, 100, 2265976], "18": ["vertical_and_slash", 1000, 4096, 1571848], "19": ["vertical_and_slash", 3500, 100, 12168656], "20": ["vertical_and_slash", 3500, 100, 12687129], "21": ["vertical_and_slash", 1000, 4096, 4052254], "22": ["vertical_and_slash", 3500, 100, 9260206], "23": ["vertical_and_slash", 1000, 4096, 4467273], "24": ["vertical_and_slash", 100, 800, 17813181], "25": ["vertical_and_slash", 3500, 100, 21532596], "26": ["vertical_and_slash", 1000, 4096, 14291589], "27": ["vertical_and_slash", 1000, 4096, 17941032], "28": ["vertical_and_slash", 1000, 4096, 20269858], "29": ["vertical_and_slash", 100, 800, 16481898], "30": ["vertical_and_slash", 100, 800, 14035138], "31": ["vertical_and_slash", 3500, 100, 5218579]}, {"0": ["vertical_and_slash", 1000, 4096, 15472775], "1": ["vertical_and_slash", 500, 700, 16487444], "2": ["vertical_and_slash", 1000, 4096, 13062108], "3": ["vertical_and_slash", 1000, 4096, 17155780], "4": ["vertical_and_slash", 1000, 4096, 9528835], "5": ["vertical_and_slash", 1000, 4096, 18482684], "6": ["vertical_and_slash", 1000, 4096, 17086801], "7": ["vertical_and_slash", 100, 800, 16495168], "8": ["vertical_and_slash", 1000, 4096, 6931295], "9": ["vertical_and_slash", 3500, 100, 21960054], "10": ["vertical_and_slash", 1000, 4096, 13941150], "11": ["vertical_and_slash", 3500, 100, 6249722], "12": ["vertical_and_slash", 1000, 4096, 12292065], "13": ["vertical_and_slash", 3500, 100, 14056066], "14": ["vertical_and_slash", 1000, 4096, 17988711], "15": ["vertical_and_slash", 3500, 100, 13838932], "16": ["vertical_and_slash", 3500, 100, 11542474], "17": ["vertical_and_slash", 1000, 4096, 10272174], "18": ["vertical_and_slash", 3500, 100, 10106952], "19": ["vertical_and_slash", 1000, 4096, 11953729], "20": ["vertical_and_slash", 1000, 4096, 12125335], "21": ["vertical_and_slash", 1000, 4096, 5421557], "22": ["vertical_and_slash", 1000, 4096, 17046156], "23": ["vertical_and_slash", 1000, 4096, 13763363], "24": ["vertical_and_slash", 1000, 4096, 14971340], "25": ["vertical_and_slash", 1000, 4096, 13949429], "26": ["vertical_and_slash", 1000, 4096, 13427580], "27": ["vertical_and_slash", 1000, 4096, 12712355], "28": ["vertical_and_slash", 1000, 4096, 10262417], "29": ["vertical_and_slash", 1000, 4096, 14593517], "30": ["vertical_and_slash", 3500, 100, 19020287], "31": ["vertical_and_slash", 1000, 4096, 16309396]}, {"0": ["vertical_and_slash", 100, 800, 6402139], "1": ["vertical_and_slash", 500, 700, 8580595], "2": ["vertical_and_slash", 3500, 100, 6974040], "3": ["vertical_and_slash", 500, 700, 9230357], "4": ["vertical_and_slash", 500, 700, 1458178], "5": ["vertical_and_slash", 3500, 100, 12626929], "6": ["vertical_and_slash", 500, 700, 7367522], "7": ["vertical_and_slash", 30, 800, 16753754], "8": ["vertical_and_slash", 100, 800, 16185443], "9": ["vertical_and_slash", 30, 800, 13212259], "10": ["vertical_and_slash", 30, 800, 16869582], "11": ["vertical_and_slash", 100, 800, 8982160], "12": ["vertical_and_slash", 3500, 100, 15101824], "13": ["vertical_and_slash", 500, 700, 10028751], "14": ["vertical_and_slash", 30, 800, 18999889], "15": ["vertical_and_slash", 100, 800, 15535188], "16": ["vertical_and_slash", 1000, 4096, 3376934], "17": ["vertical_and_slash", 1000, 4096, 3838435], "18": ["vertical_and_slash", 1000, 4096, 2789787], "19": ["vertical_and_slash", 1000, 4096, 9668519], "20": ["vertical_and_slash", 500, 700, 16137894], "21": ["vertical_and_slash", 1000, 4096, 3380197], "22": ["vertical_and_slash", 500, 700, 6788616], "23": ["vertical_and_slash", 1000, 4096, 4978497], "24": ["vertical_and_slash", 3500, 100, 9896749], "25": ["vertical_and_slash", 500, 700, 20982412], "26": ["vertical_and_slash", 1000, 4096, 5738438], "27": ["vertical_and_slash", 1000, 4096, 14533987], "28": ["vertical_and_slash", 3500, 100, 11385648], "29": ["vertical_and_slash", 30, 800, 11091461], "30": ["vertical_and_slash", 1000, 4096, 7801211], "31": ["vertical_and_slash", 1000, 4096, 12946499]}, {"0": ["vertical_and_slash", 1000, 4096, 8005141], "1": ["vertical_and_slash", 30, 800, 9683398], "2": ["vertical_and_slash", 100, 800, 15684848], "3": ["vertical_and_slash", 30, 800, 10783581], "4": ["vertical_and_slash", 30, 800, 12674711], "5": ["vertical_and_slash", 100, 800, 17627426], "6": ["vertical_and_slash", 500, 700, 6603740], "7": ["vertical_and_slash", 30, 800, 8037793], "8": ["vertical_and_slash", 1000, 4096, 18603355], "9": ["vertical_and_slash", 100, 800, 18175297], "10": ["vertical_and_slash", 1000, 4096, 15415235], "11": ["vertical_and_slash", 100, 800, 8188133], "12": ["vertical_and_slash", 100, 800, 16790430], "13": ["vertical_and_slash", 1000, 4096, 4440951], "14": ["vertical_and_slash", 1000, 4096, 12155674], "15": ["vertical_and_slash", 3500, 100, 18728501], "16": ["vertical_and_slash", 30, 800, 8282869], "17": ["vertical_and_slash", 30, 800, 18611641], "18": ["vertical_and_slash", 30, 800, 7125529], "19": ["vertical_and_slash", 30, 800, 9867525], "20": ["vertical_and_slash", 100, 800, 8121064], "21": ["vertical_and_slash", 100, 800, 8406786], "22": ["vertical_and_slash", 30, 800, 11020990], "23": ["vertical_and_slash", 30, 800, 4944682], "24": ["vertical_and_slash", 30, 800, 16714152], "25": ["vertical_and_slash", 30, 800, 9194588], "26": ["vertical_and_slash", 500, 700, 9003731], "27": ["vertical_and_slash", 1000, 4096, 6939820], "28": ["vertical_and_slash", 500, 700, 10839557], "29": ["vertical_and_slash", 500, 700, 14432584], "30": ["vertical_and_slash", 100, 800, 12363347], "31": ["vertical_and_slash", 30, 800, 14465663]}]
diff --git a/minference/configs/model2path.py b/minference/configs/model2path.py
new file mode 100644
index 0000000..16ec0fb
--- /dev/null
+++ b/minference/configs/model2path.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import os
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+MODEL2PATH = {
+ "gradientai/Llama-3-8B-Instruct-262k": os.path.join(
+ BASE_DIR, "Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "gradientai/Llama-3-8B-Instruct-Gradient-1048k": os.path.join(
+ BASE_DIR, "Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "gradientai/Llama-3-8B-Instruct-Gradient-4194k": os.path.join(
+ BASE_DIR, "Llama_3_8B_Instruct_262k_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "01-ai/Yi-9B-200K": os.path.join(
+ BASE_DIR, "Yi_9B_200k_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "microsoft/Phi-3-mini-128k-instruct": os.path.join(
+ BASE_DIR, "Phi_3_mini_128k_instruct_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "Qwen/Qwen2-7B-Instruct": os.path.join(
+ BASE_DIR, "Qwen2_7B_Instruct_128k_instruct_kv_out_v32_fit_o_best_pattern.json"
+ ),
+ "THUDM/glm-4-9b-chat-1m": os.path.join(
+ BASE_DIR, "GLM_4_9B_1M_instruct_kv_out_v32_fit_o_best_pattern.json"
+ ),
+}
+
+
+def get_support_models():
+ return list(MODEL2PATH.keys())
diff --git a/minference/minference_configuration.py b/minference/minference_configuration.py
new file mode 100644
index 0000000..204fae9
--- /dev/null
+++ b/minference/minference_configuration.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import os
+
+from .configs.model2path import MODEL2PATH
+
+
+class MInferenceConfig:
+ MINFERENCE_ATTENTION_TYPES = [
+ "minference",
+ "vllm",
+ ]
+ STASTIC_ATTENTION_TYPES = [
+ "minference_with_dense",
+ "static",
+ "dilated1",
+ "dilated2",
+ "streaming",
+ "inf_llm",
+ "hf",
+ ]
+
+ def __init__(
+ self,
+ attn_type: str = "minference",
+ model_name: str = None,
+ config_path: str = None,
+ starting_layer: int = -1,
+ kv_cache_cpu: bool = False,
+ kv_cache_cpu_device: str = "cpu",
+ use_snapkv: bool = False,
+ is_search: bool = False,
+ attn_kwargs: dict = {},
+ **kwargs,
+ ):
+ super(MInferenceConfig, self).__init__()
+ assert (
+ attn_type in self.MINFERENCE_ATTENTION_TYPES + self.STASTIC_ATTENTION_TYPES
+ ), f"The attention_type {attn_type} you specified is not supported."
+ self.attn_type = attn_type
+ self.config_path = self.update_config_path(config_path, model_name)
+ self.model_name = model_name
+ self.is_search = is_search
+ self.starting_layer = starting_layer
+ self.kv_cache_cpu = kv_cache_cpu
+ self.kv_cache_cpu_device = kv_cache_cpu_device
+ self.use_snapkv = use_snapkv
+ self.attn_kwargs = attn_kwargs
+
+ def update_config_path(self, config_path: str, model_name: str):
+ if self.attn_type in self.STASTIC_ATTENTION_TYPES:
+ return ""
+ if config_path is not None:
+ return config_path
+ assert (
+ model_name in MODEL2PATH
+ ), f"The model {model_name} you specified is not supported. You are welcome to add it and open a PR :)"
+ return MODEL2PATH[model_name]
diff --git a/minference/models_patch.py b/minference/models_patch.py
new file mode 100644
index 0000000..d23141f
--- /dev/null
+++ b/minference/models_patch.py
@@ -0,0 +1,107 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import os
+
+from .minference_configuration import MInferenceConfig
+from .patch import minference_patch, minference_patch_vllm, patch_hf
+
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+class MInference:
+ def __init__(
+ self,
+ attn_type: str = "minference",
+ model_name: str = None,
+ config_path: str = None,
+ starting_layer: int = -1,
+ kv_cache_cpu: bool = False,
+ use_snapkv: bool = False,
+ is_search: bool = False,
+ attn_kwargs: dict = {},
+ **kwargs,
+ ):
+ super(MInference, self).__init__()
+ self.config = MInferenceConfig(
+ attn_type=attn_type,
+ model_name=model_name,
+ config_path=config_path,
+ starting_layer=starting_layer,
+ kv_cache_cpu=kv_cache_cpu,
+ use_snapkv=use_snapkv,
+ is_search=is_search,
+ attn_kwargs=attn_kwargs,
+ **kwargs,
+ )
+
+ def __call__(self, model):
+ return self.patch_model(model)
+
+ def patch_model(self, model):
+ if self.config.attn_type != "vllm":
+ model.config.starting_layer = self.config.starting_layer
+ model.config.config_path = self.config.config_path
+
+ if self.config.attn_type == "minference":
+ model.config.is_search = self.config.is_search
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "minference_with_dense":
+ model.config.dense = True
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "dilated1":
+ model.config.dilated1 = True
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "static":
+ model.config.static_pattern = True
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "dilated2":
+ model.config.dilated2 = True
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "streaming":
+ model.config.streaming = True
+ model.config.streaming_kwargs = {
+ "n_local": 3968,
+ "n_init": 128,
+ **self.config.attn_kwargs,
+ }
+ model = minference_patch(model, self.config)
+
+ elif self.config.attn_type == "streaming2":
+ model = patch_hf(
+ model,
+ attn_type="streaming",
+ attn_kwargs={"n_local": 3968, "n_init": 128, **self.config.attn_kwargs},
+ )
+ elif self.config.attn_type == "hf":
+ pass
+ elif self.config.attn_type == "inf_llm":
+ model = patch_hf(
+ model,
+ attn_type="inf_llm",
+ attn_kwargs={
+ "block_size": 128,
+ "n_init": 128,
+ "n_local": 4096,
+ "topk": 16,
+ "repr_topk": 4,
+ "max_cached_block": 32,
+ "exc_block_size": 512,
+ "base": 1000000,
+ "distance_scale": 1.0,
+ "dense_decoding": True,
+ **self.config.attn_kwargs,
+ },
+ )
+ elif self.config.attn_type == "vllm":
+ model = minference_patch_vllm(model, self.config.config_path)
+ else:
+ raise ValueError(
+ f"The attention type {self.config.attn_type} you specified is not supported."
+ )
+ return model
diff --git a/minference/modules/inf_llm.py b/minference/modules/inf_llm.py
new file mode 100644
index 0000000..f247dec
--- /dev/null
+++ b/minference/modules/inf_llm.py
@@ -0,0 +1,1300 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Refer to the code in https://github.com/thunlp/InfLLM/blob/main/inf_llm/attention/context_manager.py
+
+from copy import deepcopy
+from typing import Optional, Tuple
+
+import torch
+from flash_attn import flash_attn_func
+from transformers.modeling_outputs import CausalLMOutput
+
+from ..ops.streaming_kernel import TritonMultiStageDotProductionAttention
+
+
+class CudaCache:
+ def __init__(self, num_units, unit_size, dtype):
+ self.num_units = num_units
+ self.unit_size = unit_size
+ self.dtype = dtype
+ self.data = torch.empty((num_units, unit_size), device="cuda", dtype=dtype)
+ self.idle_set = set(list(range(num_units)))
+
+ def alloc(self):
+ assert len(self.idle_set) > 0
+
+ idx = self.idle_set.pop()
+ return self.data[idx], idx
+
+ def delete(self, idx):
+ assert idx not in self.idle_set
+ self.idle_set.add(idx)
+
+
+class MemoryUnit:
+ def __init__(
+ self,
+ kv: Tuple[torch.Tensor, torch.Tensor],
+ cache: CudaCache,
+ load_to_cache: bool = False,
+ pin_memory: bool = False,
+ ):
+ self.cache = cache
+
+ if kv[0].is_cuda:
+ cpu_data = tuple(_t.contiguous().to("cpu", non_blocking=True) for _t in kv)
+ else:
+ cpu_data = tuple(_t.contiguous() for _t in kv)
+
+ if pin_memory:
+ cpu_data = tuple(_t.pin_memory() for _t in cpu_data)
+
+ if load_to_cache:
+ gpu_data, gpu_data_id = cache.alloc()
+ gpu_data = gpu_data.view((2,) + kv[0].shape)
+ gpu_data[0].copy_(kv[0], non_blocking=True)
+ gpu_data[1].copy_(kv[1], non_blocking=True)
+ event = torch.cuda.Event()
+ event.record(torch.cuda.current_stream())
+ else:
+ gpu_data, gpu_data_id = None, None
+ event = None
+
+ self.cpu_data = cpu_data
+ self.gpu_data = gpu_data
+ self.gpu_data_id = gpu_data_id
+ self.event = event
+
+ def load(self, target: Optional[Tuple[torch.Tensor, torch.Tensor]] = None) -> bool:
+ if self.gpu_data is not None:
+ if target is not None:
+ target[0].copy_(self.gpu_data[0], non_blocking=True)
+ target[1].copy_(self.gpu_data[1], non_blocking=True)
+ target_event = torch.cuda.Event()
+ target_event.record(torch.cuda.current_stream())
+ else:
+ target_event = None
+
+ return False, target_event
+
+ gpu_data, gpu_data_id = self.cache.alloc()
+ gpu_data = gpu_data.view((2,) + self.cpu_data[0].shape)
+ if target is not None:
+ target[0].copy_(self.cpu_data[0], non_blocking=True)
+ target[1].copy_(self.cpu_data[1], non_blocking=True)
+ target_event = torch.cuda.Event()
+ target_event.record(torch.cuda.current_stream())
+ gpu_data[0].copy_(target[0], non_blocking=True)
+ gpu_data[1].copy_(target[1], non_blocking=True)
+
+ else:
+ gpu_data[0].copy_(self.cpu_data[0], non_blocking=True)
+ gpu_data[1].copy_(self.cpu_data[1], non_blocking=True)
+
+ event = torch.cuda.Event()
+ event.record(torch.cuda.current_stream())
+ self.event = event
+ self.gpu_data = gpu_data
+ self.gpu_data_id = gpu_data_id
+
+ return True, target_event
+
+ def get(self):
+ assert self.gpu_data is not None
+ self.event.wait()
+ return self.gpu_data
+
+ def offload(self):
+ assert self.gpu_data is not None
+ self.event.wait()
+ self.gpu_data = None
+ self.cache.delete(self.gpu_data_id)
+ self.gpu_data_id = None
+
+
+class VectorTensor:
+ def __init__(self, hidden_size, element_dtype):
+ init_cached_size = 16
+ self.data = torch.empty(
+ (init_cached_size, hidden_size), dtype=element_dtype, device="cuda"
+ )
+ self.length = 0
+ self.cache_size = init_cached_size
+ self.hidden_size = hidden_size
+
+ def append_cache(self):
+ new_cache_size = self.cache_size * 2
+ data_shape = self.data.shape
+ new_data = torch.empty(
+ (new_cache_size,) + data_shape[1:], device="cuda", dtype=self.data.dtype
+ )
+ new_data[: self.cache_size, ...].copy_(self.data)
+ self.data = new_data
+ self.cache_size = new_cache_size
+
+ def append(self, tensor: torch.Tensor):
+ assert tensor.dtype == self.data.dtype
+ assert tensor.size(1) == self.hidden_size
+ assert tensor.is_contiguous()
+
+ append_l = tensor.size(0)
+
+ while self.length + append_l > self.cache_size:
+ self.append_cache()
+
+ self.data[self.length : self.length + append_l, ...].copy_(tensor)
+
+ self.length += append_l
+
+ def get_data(self):
+ return self.data[: self.length, ...]
+
+ def get_topk(self, tensor: torch.Tensor, topk): # inner product
+ assert tensor.dim() == 1 and tensor.size(0) == self.hidden_size
+ logits = torch.matmul(self.data[: self.length], tensor[:, None]).squeeze(dim=-1)
+ assert logits.dim() == 1 and logits.size(0) == self.length
+ return logits.topk(topk, dim=0).indices.cpu().tolist()
+
+ def __len__(self):
+ return self.length
+
+
+class Faiss:
+ def __init__(self, hidden_size, element_dtype):
+ import faiss
+
+ # We use the CPU index here because the GPU index requires a long initialization time
+ self.index = faiss.IndexFlatIP(hidden_size)
+ self.hidden_size = hidden_size
+
+ def append(self, tensor: torch.Tensor):
+ assert tensor.dim() == 2 and tensor.size(1) == self.hidden_size
+ self.index.add(tensor.cpu().float().numpy().astype("float32"))
+
+ def get_data(self):
+ raise ValueError
+
+ def get_topk(self, tensor: torch.Tensor, topk):
+ assert tensor.dim() == 1 and tensor.size(0) == self.hidden_size
+ xq = tensor[None, :].cpu().float().numpy().astype("float32")
+ topk_index = self.index.search(xq, topk)[1][0].tolist()
+ return topk_index
+
+ def __len__(self):
+ return self.index.ntotal
+
+
+GLOBAL_STREAM = None
+
+
+class ContextManager:
+ def __init__(
+ self,
+ position_embedding,
+ n_init,
+ n_local,
+ block_size,
+ max_cached_block,
+ topk,
+ exc_block_size,
+ score_decay: Optional[float] = None,
+ repr_topk: int = 1,
+ cache_strategy="lru",
+ chunk_topk_calc: Optional[int] = None,
+ async_global_stream: bool = False,
+ pin_memory: bool = False,
+ faiss: bool = False,
+ perhead: bool = False,
+ dense_decoding: bool = False,
+ ):
+ self.length = 0
+ self.position_embedding = position_embedding
+ self.n_init = n_init
+ self.n_local = n_local
+ self.block_size = block_size
+ self.max_cached_block = max_cached_block
+ self.exc_block_size = exc_block_size
+ self.score_decay = score_decay
+ assert exc_block_size <= n_local # no global token in input
+ self.topk = topk
+ self.Attn = TritonMultiStageDotProductionAttention
+ self.initialized = False
+ self.repr_topk = repr_topk
+ self.cache_strategy = cache_strategy
+ self.load_count = 0
+ self.chunk_topk_calc = chunk_topk_calc
+ self.async_global_stream = async_global_stream
+ self.pin_memory = pin_memory
+ self.faiss = faiss
+ self.perhead = perhead
+
+ self.dense_decoding = dense_decoding
+
+ global GLOBAL_STREAM
+ if self.async_global_stream and GLOBAL_STREAM is None:
+ GLOBAL_STREAM = torch.cuda.Stream()
+
+ assert cache_strategy in ["lru", "lru-s"]
+
+ if cache_strategy == "lru-s":
+ self.calc_block_score = True
+ else:
+ self.calc_block_score = False
+
+ def remove_lru_blocks(
+ self, u, num_remove: Optional[int] = None, ignore_blocks=None
+ ):
+ if num_remove is None:
+ num_remove = len(self.cached_blocks[u]) - self.max_cached_block
+
+ if num_remove <= 0:
+ return
+
+ lst = list(self.cached_blocks[u].items())
+ lst.sort(key=lambda x: x[1])
+
+ removed = 0
+ for i in range(len(lst)):
+ idx = lst[i][0]
+ if ignore_blocks is None or (idx not in ignore_blocks):
+ self.global_blocks[u][idx].offload()
+ self.cached_blocks[u].pop(idx)
+ removed += 1
+
+ if removed >= num_remove:
+ return
+
+ def get_block_k(self, k, score):
+ assert isinstance(score, torch.Tensor)
+ assert k.dim() >= 2
+ k = self.from_group_kv(k)
+ assert k.shape[:-1] == score.shape
+ assert k.shape[-2] == self.block_size
+ score_topk = score.topk(self.repr_topk, dim=-1).indices
+ assert score_topk.shape == (self.num_units, self.unit_size, self.repr_topk)
+ ret = torch.gather(
+ k,
+ -2,
+ score_topk[:, :, :, None].expand(
+ self.num_units, self.unit_size, self.repr_topk, self.dim_head
+ ),
+ )
+ return ret
+
+ def from_group_kv(self, tensor):
+ assert tensor.dim() == 4
+ assert tensor.size(1) == self.num_heads_kv
+ if self.num_heads == self.num_heads_kv:
+ return tensor
+ _, _, length, dim_head = tensor.shape
+ num_group = self.num_heads // self.num_heads_kv
+ tensor = tensor.view((self.num_units, self.unit_size_kv, 1, length, dim_head))
+ tensor = tensor.expand(
+ (self.num_units, self.unit_size_kv, num_group, length, dim_head)
+ ).reshape((self.num_units, self.num_heads, length, dim_head))
+ return tensor
+
+ def init(self, local_q, local_k, local_v, global_q, global_k, global_v):
+ assert local_q.dim() == 4
+ batch_size, num_heads, len_q, dim_head = local_q.shape
+ num_heads_kv = local_k.size(1)
+
+ for _t in [local_q, local_k, local_v, global_q, global_k, global_v]:
+ assert _t.size(0) == batch_size
+ assert _t.size(1) == num_heads or _t.size(1) == num_heads_kv
+ assert _t.size(2) == len_q
+ assert _t.size(3) == dim_head
+ assert _t.is_cuda
+
+ self.batch_size = batch_size
+ self.num_heads = num_heads
+ self.num_heads_kv = num_heads_kv
+ self.dim_head = dim_head
+ self.num_units = batch_size
+ self.unit_size = num_heads
+ self.unit_size_kv = num_heads_kv
+
+ self.global_blocks = [[] for _ in range(self.num_units)] # [[memory_unit]]
+ self.cached_blocks = [
+ {} for _ in range(self.num_units)
+ ] # [[block_id: block_score]
+ self.num_global_block = 0
+
+ if self.faiss:
+ self.block_k = [
+ Faiss(dim_head * self.unit_size, global_k.dtype)
+ for _ in range(self.num_units)
+ ]
+ else:
+ self.block_k = [
+ VectorTensor(dim_head * self.unit_size, global_k.dtype)
+ for _ in range(self.num_units)
+ ]
+
+ self.local_k = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=local_k.dtype,
+ device=local_k.device,
+ )
+ self.local_v = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=local_v.dtype,
+ device=local_v.device,
+ )
+
+ if self.dense_decoding:
+ self.dense_k = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=local_k.dtype,
+ device=local_k.device,
+ )
+ self.dense_v = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=local_v.dtype,
+ device=local_v.device,
+ )
+
+ self.global_remainder = (
+ torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ ),
+ torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=global_v.dtype,
+ device=global_v.device,
+ ),
+ )
+
+ self.global_remainder_local_score = torch.empty(
+ (self.num_units, self.unit_size, 0),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ )
+
+ self.init_k = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ )
+ self.init_v = torch.empty(
+ (self.num_units, self.unit_size_kv, 0, dim_head),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ )
+ self.init_exc = False
+ self.dtype = local_q.dtype
+ self.position_embedding._update_cos_sin_tables_len(
+ self.n_local + self.exc_block_size + 1, local_k.device, local_k.dim()
+ )
+
+ buffer_len = (
+ self.topk * self.block_size
+ + self.exc_block_size
+ + self.block_size
+ + self.n_init
+ )
+ self.global_buffer = torch.zeros(
+ (2, self.num_units, self.unit_size_kv, buffer_len, dim_head),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ )
+ self.global_buffer_block_id_list = [
+ [-1] * self.topk for _ in range(self.num_units)
+ ]
+ self.global_buffer_init_st = 0
+ self.global_buffer_init_ed = 0
+ self.cuda_cache = CudaCache(
+ self.max_cached_block * self.num_units,
+ self.unit_size_kv * self.block_size * dim_head * 2,
+ local_k.dtype,
+ )
+
+ self.initialized = True
+
+ def calc_block_topk(self, global_h_q):
+ if not self._use_chunk_topk:
+ if self.num_global_block <= self.topk:
+ return [
+ list(range(len(self.global_blocks[0])))
+ for _ in range(self.num_units)
+ ]
+
+ global_h_q = global_h_q.mean(dim=2, keepdim=False)
+ assert global_h_q.shape == (self.num_units, self.unit_size, self.dim_head)
+ global_h_q = global_h_q.reshape(
+ self.num_units, self.dim_head * self.unit_size
+ )
+ ret = []
+ for u in range(self.num_units):
+ ret.append(self.block_k[u].get_topk(global_h_q[u], self.topk))
+
+ else:
+ return self._cached_topk[self._topk_cur]
+
+ return ret
+
+ def get_global_hidden_and_mask(self, len_q, block_topk):
+ assert len(block_topk) == self.num_units
+ global_block_map = [[] for _ in range(self.num_units)]
+ global_remainder_len = max(
+ self._global_remainder_ed
+ - self._global_remainder_st
+ + len_q
+ - self.n_local,
+ 0,
+ )
+ init_len = self.init_k.size(-2)
+ sliding_window = None
+
+ global_h_k = self.global_buffer[0]
+ global_h_v = self.global_buffer[1]
+
+ block_num = len(block_topk[0])
+ for u in range(self.num_units):
+ assert len(block_topk[u]) == block_num
+
+ block_topk[u].sort()
+ global_block_map[u] = deepcopy(self.global_buffer_block_id_list[u])
+ for b_idx in block_topk[u]:
+ if b_idx in global_block_map[u]:
+ continue
+
+ st = -1
+ ed = -1
+ for j in range(self.topk):
+ if (
+ global_block_map[u][j] == -1
+ or global_block_map[u][j] not in block_topk[u]
+ ):
+ st = j * self.block_size
+ ed = st + self.block_size
+ global_block_map[u][j] = b_idx
+ break
+
+ assert b_idx in self.cached_blocks[u]
+ self.global_blocks[u][b_idx].load(
+ (global_h_k[u, :, st:ed, :], global_h_v[u, :, st:ed, :])
+ )
+
+ init_st = block_num * self.block_size
+ init_ed = init_st + init_len
+ if (
+ self.global_buffer_init_st != init_st
+ or self.global_buffer_init_ed != init_ed
+ ):
+ global_h_k[:, :, init_st:init_ed, :].copy_(self.init_k, non_blocking=True)
+ global_h_v[:, :, init_st:init_ed, :].copy_(self.init_v, non_blocking=True)
+
+ ed = init_ed
+
+ rmd_st = init_ed
+ rmd_ed = rmd_st + global_remainder_len
+ ed = rmd_ed
+ global_h_k[:, :, rmd_st:rmd_ed, :].copy_(
+ self.global_remainder[0][
+ :,
+ :,
+ self._global_remainder_st : self._global_remainder_st
+ + global_remainder_len,
+ :,
+ ],
+ non_blocking=True,
+ )
+ global_h_v[:, :, rmd_st:rmd_ed, :].copy_(
+ self.global_remainder[1][
+ :,
+ :,
+ self._global_remainder_st : self._global_remainder_st
+ + global_remainder_len,
+ :,
+ ],
+ non_blocking=True,
+ )
+
+ sliding_window = (self.global_remainder[0].size(-2) + rmd_st, self.n_local)
+
+ self.global_buffer_block_id_list = deepcopy(global_block_map)
+ self.global_buffer_init_st = init_st
+ self.global_buffer_init_ed = init_ed
+
+ for u in range(self.num_units):
+ assert max(global_block_map[u][block_num:] + [-1]) == -1
+ assert min(global_block_map[u][:block_num] + [0]) > -1
+ global_block_map[u] = list(global_block_map[u][:block_num])
+
+ global_h_k = global_h_k[:, :, :ed, :]
+ global_h_v = global_h_v[:, :, :ed, :]
+ return global_h_k, global_h_v, sliding_window, global_block_map, block_num
+
+ def update_block_score(
+ self, global_score: torch.FloatTensor, global_block_map, global_block_num
+ ):
+ if global_score is not None:
+ global_score = global_score[:, :, : global_block_num * self.block_size]
+ assert global_score.shape == (
+ self.num_units,
+ self.unit_size,
+ global_block_num * self.block_size,
+ )
+ global_score = global_score.view(
+ self.num_units, self.unit_size, global_block_num, self.block_size
+ )
+ global_score = global_score.sum(dim=-1).sum(dim=1)
+ assert global_score.shape == (self.num_units, global_block_num)
+ global_score = global_score.to(
+ device="cpu", non_blocking=False
+ ) # (num_units, global_block_num)
+ for u in range(self.num_units):
+ for k, v in self.cached_blocks[u].items():
+ self.cached_blocks[u][k] = v * self.score_decay
+ score = global_score[u].tolist()
+ assert len(score) >= len(global_block_map[u])
+ for s, i in zip(score, global_block_map[u]):
+ self.cached_blocks[u][i] += s
+
+ def _append(self, local_q, local_k, local_v, global_q):
+ # get local_h_q, local_h_k, local_h_v
+ local_h_q, local_h_k = self.position_embedding(local_q, local_k)
+ local_h_v = local_v
+
+ # calc local result first to overlap host-device communication
+ attn = self.Attn(local_h_q.shape, local_h_q.dtype, local_h_q.device)
+ attn.append(
+ local_h_q, local_h_k, local_h_v, get_score=True, sliding_window=self.n_local
+ )
+
+ # calc topk global repr k and load cache
+ with torch.cuda.stream(GLOBAL_STREAM):
+ block_topk = self.calc_block_topk(global_q)
+
+ for u in range(self.num_units):
+ num_remove = len(self.cached_blocks[u]) - self.max_cached_block
+ for bidx in block_topk[u]:
+ if bidx not in self.cached_blocks[u]:
+ num_remove += 1
+
+ # update cache
+ self.remove_lru_blocks(u, num_remove, block_topk[u])
+
+ if self.cache_strategy == "lru":
+ self.load_count += 1
+ for u in range(self.num_units):
+ for bidx in block_topk[u]:
+ self.cached_blocks[u][bidx] = self.load_count
+
+ elif self.cache_strategy == "lru-s":
+ for u in range(self.num_units):
+ for bidx in block_topk[u]:
+ self.cached_blocks[u][bidx] = 0
+ else:
+ raise ValueError
+
+ # get global_h_k, global_h_v, global_mask
+ # Beacuse exc_block_size <= n_local, no global_k, global_v used in global part
+ global_h_q = global_q
+ (
+ global_h_k,
+ global_h_v,
+ global_sliding_window,
+ global_block_map,
+ global_block_num,
+ ) = self.get_global_hidden_and_mask(local_h_q.size(-2), block_topk)
+
+ if self.async_global_stream:
+ torch.cuda.current_stream().wait_stream(GLOBAL_STREAM)
+
+ # calc global result
+ attn.append(
+ global_h_q,
+ global_h_k,
+ global_h_v,
+ end=True,
+ get_score=self.calc_block_score,
+ sliding_window=global_sliding_window,
+ complement_sliding_window=True,
+ )
+
+ o, score_list = attn.get_result()
+ loc_score = score_list[0]
+ glb_score = score_list[1]
+
+ if self.async_global_stream:
+ GLOBAL_STREAM.wait_stream(torch.cuda.current_stream())
+
+ # update global score
+ with torch.cuda.stream(GLOBAL_STREAM):
+ self.update_block_score(glb_score, global_block_map, global_block_num)
+
+ return o.view((self.batch_size, self.num_heads, -1, self.dim_head)), loc_score
+
+ def get_batched_topk(self, global_q):
+ length = global_q.shape[2]
+ exc_num = (length + self.exc_block_size - 1) // self.exc_block_size
+ exc_block_num = length // self.exc_block_size
+ ret = []
+ if self.num_global_block <= self.topk:
+ for _ in range(exc_num):
+ ret.append(
+ [
+ list(range(len(self.global_blocks[0])))
+ for _ in range(self.num_units)
+ ]
+ )
+ return ret
+
+ global_h_q = global_q
+ assert global_h_q.dim() == 4
+ assert global_h_q.shape[:2] == (self.num_units, self.unit_size)
+ assert global_h_q.shape[3] == self.dim_head
+
+ block_k = torch.cat(
+ [self.block_k[u].get_data()[None, :, :] for u in range(self.num_units)],
+ dim=0,
+ )
+ assert block_k.shape == (
+ self.num_units,
+ self.num_global_block,
+ self.dim_head * self.unit_size,
+ )
+ block_k = (
+ block_k.reshape(
+ self.num_units, self.num_global_block, self.unit_size, self.dim_head
+ )
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ )
+
+ if exc_block_num > 0:
+ tmp_global_h_q = (
+ global_h_q[:, :, : exc_block_num * self.exc_block_size, :]
+ .reshape(
+ self.num_units,
+ self.unit_size,
+ exc_block_num,
+ self.exc_block_size,
+ self.dim_head,
+ )
+ .mean(dim=-2)
+ )
+ assert tmp_global_h_q.shape == (
+ self.num_units,
+ self.unit_size,
+ exc_block_num,
+ self.dim_head,
+ )
+ block_score = torch.matmul(tmp_global_h_q, block_k.transpose(-1, -2)).mean(
+ dim=1
+ ) # (num_units, exc_block_num, num_global_block)
+ assert block_score.shape == (
+ self.num_units,
+ exc_block_num,
+ self.num_global_block,
+ )
+
+ indices = block_score.topk(self.topk, dim=-1).indices.cpu()
+ for b in range(exc_block_num):
+ tmp = []
+ for u in range(self.num_units):
+ tmp.append(indices[u, b].tolist())
+ assert len(tmp[-1]) == self.topk
+
+ ret.append(tmp)
+
+ if exc_block_num != exc_num:
+ tmp_global_h_q = (
+ global_h_q[:, :, exc_block_num * self.exc_block_size :, :]
+ .reshape(
+ self.num_units,
+ self.unit_size,
+ length - exc_block_num * self.exc_block_size,
+ self.dim_head,
+ )
+ .mean(dim=-2, keepdim=True)
+ )
+ assert tmp_global_h_q.shape == (
+ self.num_units,
+ self.unit_size,
+ 1,
+ self.dim_head,
+ )
+ block_score = torch.matmul(tmp_global_h_q, block_k.transpose(-1, -2))
+ assert block_score.shape == (
+ self.num_units,
+ self.unit_size,
+ 1,
+ self.num_global_block,
+ )
+ block_score = block_score.squeeze(dim=2).mean(dim=1)
+ assert block_score.shape == (self.num_units, self.num_global_block)
+ indices = block_score.topk(self.topk, dim=-1).indices.cpu()
+ tmp = []
+ for u in range(self.num_units):
+ tmp.append(indices[u].tolist())
+ assert len(tmp[-1]) == self.topk
+
+ ret.append(tmp)
+
+ return ret
+
+ def append_global(self, exc_length, kv_length, local_score):
+ global_remainder_ed = self._global_remainder_ed + exc_length
+ global_remainder_st = self._global_remainder_st
+
+ global_remainder_len = global_remainder_ed - global_remainder_st
+
+ assert local_score.shape[:3] == (self.num_units, self.unit_size, kv_length)
+ local_score = local_score[:, :, -exc_length - self.n_local :]
+ self.global_remainder_local_score[
+ :, :, global_remainder_ed - local_score.size(-1) : global_remainder_ed
+ ].add_(local_score)
+
+ if not self.init_exc and global_remainder_len > self.n_local:
+ global_k = self.global_remainder[0]
+ global_v = self.global_remainder[1]
+
+ append_init_len = min(
+ self.n_init - self.init_k.size(-2), global_remainder_len - self.n_local
+ )
+ self.init_k = torch.cat(
+ (
+ self.init_k,
+ global_k[
+ :,
+ :,
+ global_remainder_st : global_remainder_st + append_init_len,
+ :,
+ ],
+ ),
+ dim=-2,
+ )
+ self.init_v = torch.cat(
+ (
+ self.init_v,
+ global_v[
+ :,
+ :,
+ global_remainder_st : global_remainder_st + append_init_len,
+ :,
+ ],
+ ),
+ dim=-2,
+ )
+ global_remainder_st += append_init_len
+ global_remainder_len -= append_init_len
+
+ if self.init_k.size(-2) == self.n_init:
+ self.init_exc = True
+
+ while global_remainder_len - self.block_size >= self.n_local:
+ global_remainder_len -= self.block_size
+ for u in range(self.num_units):
+ self.global_blocks[u].append(
+ (
+ MemoryUnit(
+ (
+ self.global_remainder[0][
+ u,
+ :,
+ global_remainder_st : global_remainder_st
+ + self.block_size,
+ :,
+ ],
+ self.global_remainder[1][
+ u,
+ :,
+ global_remainder_st : global_remainder_st
+ + self.block_size,
+ :,
+ ],
+ ),
+ self.cuda_cache,
+ False,
+ self.pin_memory,
+ )
+ )
+ )
+
+ global_block_k = self.get_block_k(
+ self.global_remainder[0][
+ :, :, global_remainder_st : global_remainder_st + self.block_size, :
+ ],
+ self.global_remainder_local_score[
+ :, :, global_remainder_st : global_remainder_st + self.block_size
+ ],
+ )
+ assert global_block_k.shape == (
+ self.num_units,
+ self.unit_size,
+ self.repr_topk,
+ self.dim_head,
+ )
+ global_block_k = global_block_k.mean(dim=-2, keepdim=False)
+ global_block_k = global_block_k.reshape(
+ self.num_units, self.unit_size * self.dim_head
+ )
+ global_block_k = global_block_k[:, None, :]
+
+ self.num_global_block += 1
+ for u in range(self.num_units):
+ self.block_k[u].append(global_block_k[u])
+ global_remainder_st += self.block_size
+
+ self._global_remainder_ed = global_remainder_ed
+ self._global_remainder_st = global_remainder_st
+
+ def append(
+ self,
+ local_q,
+ local_k,
+ local_v,
+ global_q,
+ global_k,
+ global_v,
+ ):
+ batch_size = local_q.size(0)
+ input_length = local_q.size(-2)
+
+ if self.perhead:
+ num_heads = local_q.size(1)
+ num_heads_kv = local_v.size(1)
+
+ def repeat_kv(t):
+ t = t.view(batch_size, num_heads_kv, 1, input_length, -1)
+ t = t.expand(
+ batch_size,
+ num_heads_kv,
+ num_heads // num_heads_kv,
+ input_length,
+ -1,
+ )
+ t = t.reshape(batch_size * num_heads, 1, input_length, -1)
+ return t
+
+ local_q = local_q.view(batch_size * num_heads, 1, input_length, -1)
+ local_k = repeat_kv(local_k)
+ local_v = repeat_kv(local_v)
+ global_q = global_q.view(batch_size * num_heads, 1, input_length, -1)
+ global_k = repeat_kv(global_k)
+ global_v = repeat_kv(global_v)
+
+ if not self.initialized:
+ self.init(local_q, local_k, local_v, global_q, global_k, global_v)
+
+ input_length = local_q.size(-2)
+
+ if self.async_global_stream:
+ GLOBAL_STREAM.wait_stream(torch.cuda.current_stream())
+
+ # append local and global tensor
+ self.local_k = torch.cat((self.local_k, local_k), dim=-2)
+ self.local_v = torch.cat((self.local_v, local_v), dim=-2)
+ kv_length = self.local_k.size(-2)
+
+ if self.dense_decoding:
+ self.dense_k = torch.cat((self.dense_k, local_k), dim=-2)
+ self.dense_v = torch.cat((self.dense_v, local_v), dim=-2)
+
+ # append global remainder
+ with torch.cuda.stream(GLOBAL_STREAM):
+ self._global_remainder_st = 0
+ self._global_remainder_ed = self.global_remainder[0].size(-2)
+
+ self.global_remainder = (
+ torch.cat((self.global_remainder[0], global_k), dim=-2),
+ torch.cat((self.global_remainder[1], global_v), dim=-2),
+ )
+
+ self.global_remainder_local_score = torch.cat(
+ (
+ self.global_remainder_local_score,
+ torch.zeros(
+ (self.num_units, self.unit_size, global_k.size(-2)),
+ dtype=global_k.dtype,
+ device=global_k.device,
+ ),
+ ),
+ dim=-1,
+ )
+
+ with torch.cuda.stream(GLOBAL_STREAM):
+ global_q = self.position_embedding.apply_rotary_pos_emb_one_angle(
+ global_q, self.n_local
+ )
+
+ use_chunk_topk = self.chunk_topk_calc is not None and input_length > 1
+ self._use_chunk_topk = use_chunk_topk
+ if use_chunk_topk:
+ exc_block_num = input_length // self.exc_block_size
+ exc_block_per_topk_chunk = self.chunk_topk_calc // self.exc_block_size
+ calc_cur_list = [
+ i * self.exc_block_size
+ for i in range(0, exc_block_num + 1, exc_block_per_topk_chunk)
+ ]
+ if calc_cur_list[-1] < input_length:
+ calc_cur_list.append(input_length)
+ self._topk_cur = 0
+ self._topk_calc_cur = -1
+
+ o_list = []
+
+ for st in range(0, input_length, self.exc_block_size):
+ ed = min(st + self.exc_block_size, input_length)
+ if use_chunk_topk and calc_cur_list[self._topk_calc_cur + 1] < ed:
+ # calculate topk and sync with host here
+ assert ed <= calc_cur_list[self._topk_calc_cur + 2]
+ self._topk_calc_cur += 1
+ with torch.cuda.stream(GLOBAL_STREAM):
+ self._cached_topk = self.get_batched_topk(
+ global_q[
+ :,
+ :,
+ calc_cur_list[self._topk_calc_cur] : calc_cur_list[
+ self._topk_calc_cur + 1
+ ],
+ :,
+ ]
+ )
+ self._topk_cur = 0
+
+ kv_st = max(kv_length + st - input_length - self.n_local, 0)
+ kv_ed = kv_length + ed - input_length
+ chunk_o, local_score = self._append(
+ local_q[:, :, st:ed, :],
+ self.local_k[:, :, kv_st:kv_ed, :],
+ self.local_v[:, :, kv_st:kv_ed, :],
+ global_q[:, :, st:ed, :],
+ )
+ o_list.append(chunk_o)
+
+ # append global
+ with torch.cuda.stream(GLOBAL_STREAM):
+ self.append_global(ed - st, kv_ed - kv_st, local_score)
+
+ if self.async_global_stream:
+ torch.cuda.current_stream().wait_stream(GLOBAL_STREAM)
+
+ if use_chunk_topk:
+ self._topk_cur += 1
+
+ self.length += input_length
+
+ # update local and global tensor
+ if self.local_k.size(-2) >= self.n_local:
+ self.local_k = self.local_k[:, :, -self.n_local :, :]
+ self.local_v = self.local_v[:, :, -self.n_local :, :]
+
+ assert self._global_remainder_ed == self.global_remainder[0].size(-2)
+ with torch.cuda.stream(GLOBAL_STREAM):
+ self.global_remainder = (
+ self.global_remainder[0][:, :, self._global_remainder_st :, :],
+ self.global_remainder[1][:, :, self._global_remainder_st :, :],
+ )
+ self.global_remainder_local_score = self.global_remainder_local_score[
+ :, :, self._global_remainder_st :
+ ]
+
+ ret = torch.cat(o_list, dim=-2)
+
+ if self.perhead:
+ ret = ret.view(batch_size, num_heads, input_length, -1)
+
+ return ret
+
+ def size(self, *args, **kwargs):
+ return self.length
+
+
+def inf_llm_forward(
+ n_local,
+ n_init,
+ topk,
+ block_size,
+ max_cached_block,
+ exc_block_size,
+ repr_topk: int = 1,
+ cache_strategy="lru",
+ score_decay=None,
+ chunk_topk_calc=None,
+ async_global_stream=True,
+ pin_memory=False,
+ faiss=False,
+ perhead=False,
+ dense_decoding=False,
+ *args,
+ **kwargs
+):
+ def forward(
+ self,
+ query: torch.Tensor,
+ key_value: torch.Tensor,
+ position_bias: Optional[torch.Tensor],
+ use_cache: bool,
+ past_key_value,
+ project_q,
+ project_k,
+ project_v,
+ attention_out,
+ dim_head,
+ num_heads,
+ num_heads_kv,
+ ):
+ batch_size = query.size(0)
+ len_q = query.size(1)
+ len_k = key_value.size(1)
+
+ # assert use_cache
+
+ h_q = project_q(query) # (batch, len_q, num_heads * dim_head)
+ h_k = project_k(key_value) # (batch, len_k, num_heads * dim_head)
+ h_v = project_v(key_value) # (batch, len_k, num_heads * dim_head)
+
+ h_q = (
+ h_q.view(batch_size, len_q, num_heads, dim_head)
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ ) # (batch, num_heads, len_q, dim_head)
+ h_k = (
+ h_k.view(batch_size, len_k, num_heads_kv, dim_head)
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ ) # (batch, num_heads_kv, len_k, dim_head)
+ h_v = (
+ h_v.view(batch_size, len_k, num_heads_kv, dim_head)
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ ) # (batch, num_heads_kv, len_k, dim_head)
+
+ if len_q == 1 and dense_decoding:
+ past_k = past_key_value.dense_k
+ past_v = past_key_value.dense_v
+
+ h_k = torch.cat((past_k, h_k), dim=-2)
+ h_v = torch.cat((past_v, h_v), dim=-2)
+
+ past_key_value.dense_k = h_k
+ past_key_value.dense_v = h_v
+
+ h_q, h_k = position_bias(h_q, h_k)
+
+ # (batch_size, seqlen, nheads, headdim)
+ h_q = h_q.transpose(1, 2)
+ h_k = h_k.transpose(1, 2)
+ h_v = h_v.transpose(1, 2)
+
+ # (batch_size, seqlen, nheads, headdim)
+ o = flash_attn_func(h_q, h_k, h_v, causal=True)
+
+ o = o.reshape(batch_size, len_q, dim_head * num_heads)
+ o = attention_out(o)
+
+ if use_cache:
+ return o, past_key_value
+ else:
+ return o
+
+ if past_key_value is None:
+ past_key_value = ContextManager(
+ position_bias,
+ n_init,
+ n_local,
+ block_size,
+ max_cached_block,
+ topk,
+ exc_block_size,
+ score_decay,
+ repr_topk,
+ cache_strategy,
+ chunk_topk_calc,
+ async_global_stream,
+ pin_memory,
+ faiss,
+ perhead,
+ dense_decoding=dense_decoding,
+ )
+
+ local_q, local_k, local_v = h_q, h_k, h_v
+ global_q, global_k, global_v = h_q, h_k, h_v
+
+ o = past_key_value.append(
+ local_q,
+ local_k,
+ local_v,
+ global_q,
+ global_k,
+ global_v,
+ )
+
+ o = o.view(batch_size, num_heads, len_q, dim_head).permute(0, 2, 1, 3)
+ o = o.reshape(batch_size, len_q, dim_head * num_heads)
+ o = attention_out(o)
+
+ if use_cache:
+ return o, past_key_value
+ else:
+ return o
+
+ return forward
+
+
+class GreedySearch:
+ def __init__(self, model, tokenizer):
+ model.eval()
+ self.device = model.device
+ self.model = model
+ self.tokenizer = tokenizer
+ self.past_kv = None
+
+ def clear(self):
+ self.past_kv = None
+
+ def _process_texts(self, input_text):
+ model_inputs = {}
+ input_ids = self.tokenizer.encode(input_text)
+
+ model_inputs["input_ids"] = input_ids
+ model_inputs["attention_mask"] = [1] * len(model_inputs["input_ids"])
+
+ for key in model_inputs:
+ model_inputs[key] = (
+ torch.tensor(model_inputs[key]).int().unsqueeze(0).cuda()
+ )
+
+ return model_inputs
+
+ def generate(self, text=None, input_ids=None, **kwargs):
+ if input_ids is None:
+ model_inputs = self._process_texts(text)
+ input_ids = model_inputs["input_ids"]
+
+ with torch.inference_mode():
+ result = self._decode(input_ids, **kwargs)
+
+ self.clear()
+ return result
+
+ def _decode(
+ self,
+ input_ids,
+ max_length=100,
+ extra_end_token_ids=[],
+ chunk_size: int = 4096,
+ output=False,
+ ):
+ if input_ids.dim() == 1:
+ input_ids = input_ids[None, :]
+ input_ids = input_ids.cuda()
+ attention_mask = torch.ones_like(input_ids)
+ assert input_ids.size(0) == 1
+ length = input_ids.size(1)
+ end_token_ids = extra_end_token_ids + [self.tokenizer.eos_token_id]
+ logits = None
+ past_key_values = self.past_kv
+ if output:
+ output_text = ""
+
+ for i in range(max_length + 1):
+ if i == 0:
+ if chunk_size is None:
+ chunk_size = input_ids.size(1)
+ for st in range(0, input_ids.size(1) - 1, chunk_size):
+ ed = min(input_ids.size(1) - 1, st + chunk_size)
+ out = self.model(
+ input_ids=input_ids[:, st:ed],
+ attention_mask=attention_mask[:, :ed],
+ use_cache=True,
+ return_dict=True,
+ past_key_values=past_key_values,
+ )
+ logits, past_key_values = out.logits, out.past_key_values
+
+ out = self.model(
+ input_ids=input_ids[:, -1:],
+ attention_mask=attention_mask,
+ use_cache=True,
+ return_dict=True,
+ past_key_values=past_key_values,
+ )
+ logits, past_key_values = out.logits, out.past_key_values
+ else:
+ out = self.model(
+ input_ids=input_ids[:, -1:],
+ attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ use_cache=True,
+ return_dict=True,
+ )
+ logits, past_key_values = out.logits, out.past_key_values
+
+ logits = logits[:, -1, :]
+ word = logits.argmax(dim=-1)
+ if word.item() in end_token_ids or i == max_length:
+ break
+
+ input_ids = torch.cat((input_ids, word.view(1, 1)), dim=-1)
+ attention_mask = torch.cat(
+ (
+ attention_mask,
+ torch.ones(
+ (attention_mask.size(0), 1),
+ dtype=torch.int,
+ device=attention_mask.device,
+ ),
+ ),
+ dim=-1,
+ )
+ if output:
+ tmp = self.tokenizer.decode(input_ids.squeeze(0)[length:])
+ if len(tmp) > len(output_text):
+ import sys
+
+ sys.stdout.write(tmp[len(output_text) :])
+ sys.stdout.flush()
+ output_text = tmp
+
+ self.past_kv = past_key_values
+
+ if output:
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+ # return [self.tokenizer.decode(input_ids.squeeze(0)[length:])]
+ return input_ids
+
+
+class InfLLMGenerator(GreedySearch):
+ def generate(
+ self,
+ input_ids=None,
+ generation_config=None,
+ pad_token_id=None,
+ max_new_tokens=None,
+ ):
+ if max_new_tokens is not None:
+ max_new_tokens = max_new_tokens
+ else:
+ max_new_tokens = generation_config.max_new_tokens
+ return super().generate(
+ text=None,
+ input_ids=input_ids,
+ max_length=max_new_tokens,
+ chunk_size=8192,
+ extra_end_token_ids=[pad_token_id] if pad_token_id is not None else [],
+ )
+
+ @torch.no_grad()
+ def __call__(self, input_ids=None, *args, **kwargs):
+ # chunked forward
+ chunk_size = 8192
+ all_logits = torch.empty(0, dtype=torch.bfloat16).to(input_ids.device)
+ for st in range(0, input_ids.size(1), chunk_size):
+ torch.cuda.empty_cache()
+ ed = min(input_ids.size(1), st + chunk_size)
+ out = self.model(
+ input_ids=input_ids[:, st:ed],
+ )
+ logits = out.logits.to(torch.bfloat16)
+ all_logits = torch.cat((all_logits, logits), dim=1)
+
+ return CausalLMOutput(logits=all_logits)
diff --git a/minference/modules/minference_forward.py b/minference/modules/minference_forward.py
new file mode 100644
index 0000000..eb0d612
--- /dev/null
+++ b/minference/modules/minference_forward.py
@@ -0,0 +1,918 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import inspect
+import json
+import os
+from importlib import import_module
+
+from transformers.models.llama.modeling_llama import *
+from transformers.utils.import_utils import _is_package_available
+
+if _is_package_available("vllm"):
+ from vllm.attention.backends.flash_attn import *
+
+from ..ops.block_sparse_flash_attention import block_sparse_attention
+from ..ops.pit_sparse_flash_attention_v2 import vertical_slash_sparse_attention
+from ..ops.streaming_kernel import streaming_forward, streaming_forward2
+from .snap_kv import *
+
+last_q = 64
+arange = torch.arange(last_q, device="cuda")
+LAST_Q_MASK = arange[None, None, :, None] >= arange[None, None, None, :]
+ROPE_TYPE = None
+SEARCH_MASK = None
+
+def set_rope_type(self):
+ global ROPE_TYPE
+ if ROPE_TYPE is not None:
+ return
+ if "seq_len" in inspect.signature(self.rotary_emb.forward).parameters:
+ ROPE_TYPE = "seq_len"
+ elif "max_seq_len" in inspect.signature(self.rotary_emb.forward).parameters:
+ ROPE_TYPE = "max_seq_len"
+ else:
+ ROPE_TYPE = "position_ids"
+
+def get_cos_sin(self, value_states, kv_seq_len, position_ids):
+ if ROPE_TYPE == "seq_len":
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ elif ROPE_TYPE == "max_seq_len":
+ cos = self.rotary_emb(kv_seq_len)
+ if position_ids is not None:
+ cos = cos[position_ids]
+ else:
+ cos = cos[None, :kv_seq_len]
+ sin = None
+ else:
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ return cos, sin
+
+
+def init_minference_parameters(self):
+ config = self.config.to_dict()
+ self.starting_layer = config.get("starting_layer", 0)
+ self.is_search = config.get("is_search", False)
+
+ self.ne_inf = None
+ self.config_path = config.get("config_path", "")
+ if (
+ self.config_path is not None and
+ os.path.exists(self.config_path) and
+ self.layer_idx < len(json.load(open(self.config_path)))
+ ):
+ self.best_pattern = {int(ii): jj for ii, jj in json.load(open(self.config_path))[self.layer_idx].items()}
+ else:
+ self.best_pattern = {}
+ self.vertical, self.slash = None, None
+
+ # import apply_rotary_pos_emb
+ if "apply_rotary_pos_emb" not in self.__dict__:
+ global apply_rotary_pos_emb
+ model_path = self.rotary_emb.__class__.__module__
+ apply_rotary_pos_emb = getattr(import_module(model_path), "apply_rotary_pos_emb")
+ self.apply_rotary_pos_emb = True
+
+def sum_all_diagonal_matrix(mat: torch.tensor):
+ b, h, n, m = mat.shape
+ zero_mat = torch.zeros((b, h, n, n)).to(mat.device) # Zero matrix used for padding
+ mat_padded = torch.cat((zero_mat, mat, zero_mat), -1) # pads the matrix on left and right
+ mat_strided = mat_padded.as_strided((1, 1, n, n + m), (1, n * (2 * n + m), 2 * n + m + 1, 1)) # Change the strides
+ sum_diags = torch.sum(mat_strided, 2) # Sums the resulting matrix's columns
+ return sum_diags[:,:,1:]
+
+def gather(t, dim, i):
+ """A broadcasting version of torch.gather."""
+ dim += (dim < 0) * t.ndim
+ return t.gather(dim, i.expand(*t.shape[:dim], i.shape[dim], *t.shape[dim + 1 :]))
+
+def gather_qkv(q, k, v, attention_mask):
+ attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(q.size(-1)) + attention_mask
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype)
+ attn_output = torch.matmul(attn_weights, v)
+ return attn_output
+
+def search_pattern(q, k, head):
+ q_len = q.shape[2]
+ head_dim = q.shape[-1]
+
+ def vertical_and_slash(vertical_size, slash_size):
+ last_q = 64
+ q_len = q.shape[2]
+ qk_idxs = [ii + q_len for ii in list(range(-last_q, 0, 1))]
+ qk = torch.matmul(q[:,:,qk_idxs,:], k.transpose(2, 3))/ math.sqrt(head_dim) + attention_mask[:,:,qk_idxs]
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical_topk = torch.topk(-vertical, q_len - vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-30:] = torch.inf
+ slash_topk = slash
+ slash = torch.topk(slash, slash_size, -1).indices - (q_len - 1)
+ slash = torch.stack([torch.sparse.spdiags(torch.ones(slash_size, q_len), slash.cpu()[0][_], (q_len, q_len)).to_dense() for _ in range(1)]).to(q.device)
+
+ est_attn = torch.ones_like(attn_weights)
+ dim = 3
+ est_attn = est_attn.scatter(3, vertical_topk.expand(*est_attn.shape[:dim], vertical_topk.shape[dim], *est_attn.shape[dim + 1 :]), 0)
+ est_attn = est_attn + slash
+
+ est_attn = (est_attn > 0).float()
+ est_attn = torch.tril(est_attn)
+ attn_weights_x = attn_weights * est_attn
+ res3 = attn_weights_x[:,:,2500:].sum(-1).mean(-1).squeeze().float().detach().cpu().numpy()
+ return res3
+
+ def stream_llm(vertical_size, slash_size):
+ q_len = q.shape[2]
+
+ mask = torch.triu(torch.tril(torch.ones(q_len, q_len), 0), -slash_size).to(q)
+ mask[:,:vertical_size] = 1
+ mask = mask.unsqueeze(0).unsqueeze(1)
+
+ est_attn = torch.tril(mask)
+ attn_weights_x = attn_weights * est_attn
+ res3 = attn_weights_x[:,:,2500:].sum(-1).mean(-1).squeeze().float().detach().cpu().numpy()
+ return res3
+
+ def block_sparse(topk_ratio, slash_size=None):
+ block_num = (q_len -1) // 32 + 1
+ block_q = torch.zeros(1,1,block_num * 32,head_dim).to(q)
+ block_q[:,:,:q_len] = q
+ block_q = block_q.reshape(1,1,block_num,32,-1).mean(-2)
+ block_k = torch.zeros(1,1,block_num * 32,head_dim).to(k)
+ block_k[:,:,:q_len] = k
+ block_k = block_k.reshape(1,1,block_num,32,-1).mean(-2)
+
+ qk = torch.matmul(block_q, block_k.transpose(2, 3)) + attention_mask[:,:,:block_num,:block_num]
+ est_attn = torch.ones_like(qk)
+ block_topk = torch.topk(-qk, block_num - block_num//topk_ratio, -1).indices
+
+ dim = 3
+ est_attn = est_attn.scatter(3, block_topk.expand(*est_attn.shape[:dim], block_topk.shape[dim], *est_attn.shape[dim + 1 :]), 0)
+ est_attn = est_attn.unsqueeze(3).unsqueeze(-1).repeat(1,1,1,32,1,32).reshape(1,1,block_num * 32, block_num * 32)[...,:q_len,:q_len]
+ est_attn = torch.tril(est_attn)
+
+ attn_weights_x = attn_weights * est_attn
+ res2 = attn_weights_x[:,:,2500:].sum(-1).mean(-1).squeeze().float().detach().cpu().numpy()
+ return res2
+
+ global SEARCH_MASK
+ if SEARCH_MASK is None:
+ attention_mask = torch.full((q_len, q_len), torch.finfo(q.dtype).min, device="cuda")
+ mask_cond = torch.arange(attention_mask.size(-1), device="cuda")
+ attention_mask.masked_fill_(mask_cond < (mask_cond + 1).view(attention_mask.size(-1), 1), 0)
+ attention_mask = attention_mask[None, None, :]
+ SEARCH_MASK = attention_mask
+ else:
+ attention_mask = SEARCH_MASK
+ attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(head_dim) + attention_mask
+ attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype)
+ best_s, best_v, best_score, best_ty = 0, 0, 0, ""
+ all_info = []
+ for ty, fc in [("stream_llm", stream_llm), ("vertical_and_slash", vertical_and_slash), ("block_sparse", block_sparse)]:
+ if ty == "stream_llm":
+ vs_list = [(100, 800)]
+ elif ty == "vertical_and_slash":
+ vs_list = [(30, 800), (100, 750), (500, 700), (3500, 100)]
+ else:
+ vs_list = [(8, 1)]
+ for v_size, s_size in vs_list:
+ score = fc(v_size, s_size)
+ score = score.item()
+ all_info.append([ty, v_size, s_size, score])
+ if score > best_score:
+ best_score = score
+ best_s, best_v = s_size, v_size
+ best_ty = ty
+ if best_ty == "stream_llm":
+ best_ty = "vertical_and_slash"
+ if best_ty == "block_sparse":
+ best_ty, best_v, best_s = "vertical_and_slash", 1000, 6096
+ print(head, best_ty, best_v, best_s, best_score)
+ return (best_ty, best_v, best_s, best_score)
+
+def search_pattern_v2(q, k, v, head):
+ q_len = q.shape[2]
+ head_dim = q.shape[-1]
+ def vertical_and_slash_kernel(q, k, v, vertical_size, slash_size):
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ last_q = 64
+ qk = torch.einsum(f'bhmk, bhnk -> bhmn', q[:,:,-last_q:,:], k)
+ qk[:, :, :, -last_q:] = torch.where(LAST_Q_MASK, qk[:, :, :, -last_q:], -torch.inf)
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical_topk = torch.topk(vertical, vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-30:] = torch.inf
+ slash_topk = slash
+ slash = (q_len - 1) - torch.topk(slash, slash_size, -1).indices
+
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+ def dense(q, k, v, vertical_size=None, slash_size=None):
+ return flash_attn_func(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, 1, q_len, head_dim)
+ def block_sparse_kernel(q, k, v, vertical_size=None, slash_size=None):
+ topk = 100
+ return block_sparse_attention(q, k, v, topk)
+
+ best_s, best_v, best_score, best_ty = 0, 0, float("inf"), ""
+ bsz = q.shape[0]
+ all_info = []
+ ref = dense(q, k, v)
+ for ty, fc in [("stream_llm", streaming_forward), ("vertical_and_slash", vertical_and_slash_kernel), ("block_sparse", block_sparse_kernel)]:
+ if ty == "stream_llm":
+ vs_list = [(100, 800)]
+ elif ty == "vertical_and_slash":
+ vs_list = [(30, 800), (100, 800), (100, 750), (500, 700), (3500, 100), (1000, 4096)]
+ else:
+ vs_list = [(10, 1)]
+ for v_size, s_size in vs_list:
+ score = fc(q, k, v, v_size, s_size)
+ # delta = (ref - score).abs().sum()
+ delta = ((ref - score).abs() > 5e-3).sum()
+ score = delta.item()
+ all_info.append([ty, v_size, s_size, score])
+ if score < best_score:
+ best_score = score
+ best_s, best_v = s_size, v_size
+ best_ty = ty
+ print(head, best_ty, best_v, best_s, best_score)
+ return all_info
+
+def shift_matrix(mat):
+ b, h, _, n = mat.shape
+ zero_mat = torch.zeros((b, h, n, n)).to(mat.device) # Zero matrix used for padding
+ mat_padded = torch.cat((zero_mat, mat, zero_mat), -1) # pads the matrix on left and right
+ mat_strided = mat_padded.as_strided((1, 1, n, n + 2 * n), (1, n * (2 * n + n), 2 * n + n - 1, 1)) # Change the strides
+ return mat_strided[...,2 * n-1:-1]
+
+def repeat(self, q, k, v, attention_mask):
+ q_len = q.shape[2]
+ if q_len == 1:
+ return gather_qkv(q, k, v, attention_mask)
+ qk = torch.matmul(q[:,:,-1:,:], k.transpose(2, 3)) / math.sqrt(self.head_dim)
+ qk = qk.repeat(1,1,q_len, 1)
+ qk = shift_matrix(qk) + attention_mask
+ attn_weights = nn.functional.softmax(qk, dim=-1, dtype=torch.float32).to(q.dtype)
+ attn_output = torch.matmul(attn_weights, v)
+ return attn_output
+
+def gather_last_q_vertical_slash_topk_v4(self, q, k, v, head_id):
+ kv_seq_len = k.size(2)
+
+ def vertical_and_slash(attn_weights, vertical_size, slash_size):
+ last_q = 64
+ q_len = q.shape[2]
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ qk_idxs = [ii + q_len for ii in list(range(-last_q, 0, 1))]
+ qk = torch.matmul(q[:,:,qk_idxs,:], k.transpose(2, 3))/ math.sqrt(self.head_dim) + attention_mask[:,:,qk_idxs]
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = -self.ne_inf
+ vertical_topk = torch.topk(-vertical, q_len - vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-30:] = -self.ne_inf
+ slash_topk = slash
+ slash = torch.topk(slash, slash_size, -1).indices - (q_len - 1)
+ slash = torch.stack([torch.sparse.spdiags(torch.ones(slash_size, q_len), slash.cpu()[0][_], (q_len, q_len)).to_dense() for _ in range(1)]).to(q.device)
+
+ est_attn = torch.ones_like(attn_weights)
+ dim = 3
+ est_attn = est_attn.scatter(3, vertical_topk.expand(*est_attn.shape[:dim], vertical_topk.shape[dim], *est_attn.shape[dim + 1 :]), 0)
+ est_attn = est_attn + slash
+
+ est_attn = (est_attn > 0).float()
+ est_attn = torch.tril(est_attn)
+ est_attn = (est_attn == 0).int() * self.ne_inf
+ attn_weights = attn_weights + est_attn
+ if self.kv_cache_compressed_v4:
+ self.vertical = torch.topk(vertical, vertical_size * 4, -1).indices
+ self.slash = (torch.topk(slash_topk, slash_size * 4, -1).indices - (q_len - 1)).unsqueeze(2)
+ return attn_weights
+
+ def stream_llm(attn_weights, vertical_size, slash_size):
+ q_len = q.shape[2]
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ mask = torch.triu(torch.tril(torch.ones(q_len, q_len), 0), -slash_size).to(q)
+ mask[:,:vertical_size] = 1
+ mask = mask.unsqueeze(0).unsqueeze(1)
+
+ est_attn = torch.tril(mask)
+ est_attn = (est_attn == 0).int() * self.ne_inf
+ attn_weights = attn_weights + est_attn
+ if self.kv_cache_compressed_v4:
+ self.vertical = torch.Tensor(list(range(vertical_size * 4))).long().to(q.device).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ self.slash = torch.Tensor(list(range(-slash_size * 4, 1))).long().to(q.device).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ return attn_weights
+
+ def block_sparse(attn_weights, topk_ratio, slash_size=None, block_size=8):
+ block_num = (q_len -1) // block_size + 1
+ block_q = torch.zeros(1,1,block_num * block_size,head_dim).to(q)
+ block_q[:,:,:q_len] = q
+ block_q = block_q.reshape(1,1,block_num,block_size,-1).mean(-2)
+ block_k = torch.zeros(1,1,block_num * block_size,head_dim).to(k)
+ block_k[:,:,:q_len] = k
+ block_k = block_k.reshape(1,1,block_num,block_size,-1).mean(-2)
+
+ qk = torch.matmul(block_q, block_k.transpose(2, 3)) + attention_mask[:,:,:block_num,:block_num]
+ est_attn = torch.ones_like(qk)
+ block_topk = torch.topk(-qk, block_num - block_num//topk_ratio, -1).indices
+
+ dim = 3
+ est_attn = est_attn.scatter(3, block_topk.expand(*est_attn.shape[:dim], block_topk.shape[dim], *est_attn.shape[dim + 1 :]), 0)
+ est_attn = est_attn.unsqueeze(3).unsqueeze(-1).repeat(1,1,1,block_size,1,block_size).reshape(1,1,block_num * block_size, block_num * block_size)[...,:q_len,:q_len]
+ est_attn = torch.tril(est_attn)
+ est_attn = (est_attn == 0).int()
+ attn_weights = attn_weights + est_attn
+ return attn_weights
+
+ def dialted(q,k,v, type):
+ q_len = q.shape[2]
+ n_init = min(1024, q_len)
+ vertical_topk = torch.arange(0, n_init, device=q.device)[None, None, None, :]
+
+ slash = torch.arange(0, q_len, device=q.device)
+ if type == 'dilated1':
+ # 8k local with 1 interval
+ slash = slash[-8192::2][None, None, :]
+ elif type == 'dilated2':
+ # 2k dense local + 4k local with 1 interval
+ slash = torch.cat([slash[-2048:], slash[-6144:-2048:2]], 0)[None, None, :]
+
+ slash = (q_len - 1) - slash
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+
+ def vertical_and_slash_kernel(q, k, v, vertical_size, slash_size):
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ last_q = min(64, q_len)
+ qk = torch.einsum(f'bhmk, bhnk -> bhmn', q[:,:,-last_q:,:], k)
+ qk[:, :, :, -last_q:] = torch.where(LAST_Q_MASK[...,-last_q:,-last_q:].to(q.device), qk[:, :, :, -last_q:], -torch.inf)
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical_topk = torch.topk(vertical, vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-100:] = torch.inf
+ slash_topk = slash
+ slash = (q_len - 1) - torch.topk(slash, slash_size, -1).indices
+
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+
+ def vertical_and_slash_kernel_extend(q, k, v, vertical_size, slash_size):
+ vertical_size, slash_size = min(q_len, max(vertical_size + 100, 30)), min(q_len, max(slash_size, 50))
+ last_q = min(64, q_len)
+ last_start = 100
+ qk = torch.einsum(f'bhmk, bhnk -> bhmn', q[:,:,-last_q-last_start:-last_start,:], k)
+ qk[:, :, :, -last_start:] = -torch.inf
+ qk[:, :, :, -last_q-last_start:-last_start] = torch.where(LAST_Q_MASK[...,-last_q:,-last_q:].to(q.device), qk[:, :, :, -last_q-last_start:-last_start], -torch.inf)
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical[...,-100:] = torch.inf
+ vertical_topk = torch.topk(vertical, vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-100:] = torch.inf
+ slash_topk = slash
+ slash = (q_len - 1) - torch.topk(slash, slash_size, -1).indices
+
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+
+ def vertical_and_slash_kernel_static(q, k, v, vertical_size, slash_size):
+ if "vs" in self.__dict__:
+ vertical_topk, slash = self.vs
+ else:
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ last_q = 64
+ qk = torch.einsum(f'bhmk, bhnk -> bhmn', q[:,:,-last_q:,:], k)
+ qk[:, :, :, -last_q:] = torch.where(LAST_Q_MASK, qk[:, :, :, -last_q:], -torch.inf)
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical_topk = torch.topk(vertical, vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-30:] = torch.inf
+ slash_topk = slash
+ slash = (q_len - 1) - torch.topk(slash, slash_size, -1).indices
+ self.vs = vertical_topk, slash
+
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+ def dense(q, k, v, vertical_size=None, slash_size=None):
+ return flash_attn_func(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, 1, q_len, self.head_dim)
+ def block_sparse_kernel(q, k, v, vertical_size=None, slash_size=None):
+ topk = 100
+ return block_sparse_attention(q, k, v, topk)
+
+ q_len = q.shape[2]
+ bsz = q.shape[0]
+
+ if q_len == 1:
+ return dense(q, k, v)
+
+ if self.config.to_dict().get("dilated1", False):
+ return dialted(q, k, v, 'dilated1')
+ if self.config.to_dict().get("dilated2", False):
+ return dialted(q, k, v, 'dilated2')
+ if self.config.to_dict().get("dense", False):
+ return dense(q, k, v)
+ if self.config.to_dict().get("streaming", False):
+ return streaming_forward(q, k, v, self.config.streaming_kwargs["n_init"], self.config.streaming_kwargs["n_local"])
+
+ ty, vertical_size, slash_size, _ = self.best_pattern.get(head_id, ("vertical_and_slash", 1000, 6096, 1))
+
+ if self.config.to_dict().get("static_pattern", False):
+ return vertical_and_slash_kernel_static(q, k, v, vertical_size, slash_size)
+ if self.config.to_dict().get("vs_only", False):
+ return vertical_and_slash_kernel(q, k, v, vertical_size, slash_size)
+
+ fc = {
+ "stream_llm": streaming_forward,
+ "vertical_and_slash": vertical_and_slash_kernel,
+ "block_sparse": block_sparse_kernel,
+ }[ty]
+ return fc(q, k, v, vertical_size, slash_size)
+
+def apply_rotary_pos_emb_single(q, cos, sin, position_ids, unsqueeze_dim=1):
+ # cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ # sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ return (q * cos) + (rotate_half(q) * sin)
+
+def minference_forward():
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ output_attentions,
+ use_cache,
+ **kwargs,
+ ):
+ self.init_minference_parameters()
+ self.ne_inf = torch.finfo(hidden_states.dtype).min
+
+ bsz, q_len, _ = hidden_states.size()
+
+ if "q_proj" in self.__dict__["_modules"]:
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+ else:
+ qkv = self.qkv_proj(hidden_states)
+ query_pos = self.num_heads * self.head_dim
+ key_value_pos = query_pos // self.num_key_value_groups
+ query_states, key_states, value_states = torch.split(qkv, [query_pos, key_value_pos, key_value_pos], -1)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ set_rope_type(self)
+ cos, sin = get_cos_sin(self, value_states, kv_seq_len, position_ids)
+ if ROPE_TYPE == "max_seq_len":
+ query_states = apply_rotary_pos_emb(query_states, cos)
+ key_states = apply_rotary_pos_emb(key_states, cos)
+ else:
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ if self.is_search:
+ if os.path.exists(self.config_path):
+ config_list = json.load(open(self.config_path))
+ if self.layer_idx < len(config_list):
+ assert False
+ else:
+ config_list = []
+ config = {}
+ print("Layer", self.layer_idx)
+ if q_len != 1:
+ output = torch.empty_like(query_states)
+ for head in range(query_states.size(1)):
+ q = query_states[:, head, :, :].unsqueeze(1)
+ k = key_states[:, head, :, :].unsqueeze(1)
+ v = value_states[:, head, :, :].unsqueeze(1)
+ if self.is_search:
+ config[head] = search_pattern(q, k, head)
+ if self.layer_idx >= self.starting_layer and not self.is_search:
+ attn_output = self.gather_last_q_vertical_slash_topk_v4(q, k, v, head)
+ elif is_flash_attn_2_available():
+ attn_output = flash_attn_func(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, 1, q_len, self.head_dim)
+ else:
+ attn_output = gather_qkv(q, k, v, attention_mask)
+ output[:, head:head + 1] = attn_output
+ if self.is_search:
+ config_list.append(config)
+ with open(self.config_path, 'w') as json_file:
+ json.dump(config_list, json_file)
+ else:
+ output = flash_attn_func(query_states.transpose(1, 2), key_states.transpose(1, 2), value_states.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, query_states.size(1), q_len, self.head_dim)
+ attn_output = output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+ return forward
+
+def minference_kv_cache_cpu_forward():
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ output_attentions,
+ use_cache,
+ **kwargs,
+ ):
+ self.init_minference_parameters()
+ self.ne_inf = torch.finfo(hidden_states.dtype).min
+
+ bsz, q_len, hidden_dim = hidden_states.size()
+ kv_seq_len = q_len
+ if use_cache and past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+
+ set_rope_type(self)
+ cos, sin = get_cos_sin(self, hidden_states, kv_seq_len, position_ids)
+ cache_kwargs = {"sin": sin, "cos": cos}
+ kv_cache_cpu_device = self.config.to_dict().get("kv_cache_cpu_device", "cpu")
+
+ attn_out = torch.empty_like(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim)
+ act_num_heads = self.num_heads // self.num_key_value_groups
+ if use_cache:
+ k = torch.zeros(bsz, act_num_heads, q_len, self.head_dim).to(hidden_states.dtype).to(kv_cache_cpu_device)
+ v = torch.zeros(bsz, act_num_heads, q_len, self.head_dim).to(hidden_states.dtype).to(kv_cache_cpu_device)
+ part_k, part_v = None, None
+ for head in range(self.num_heads):
+ if "q_proj" in self.__dict__["_modules"]:
+ part_q = F.linear(hidden_states, self.q_proj.weight.view(self.num_heads, self.head_dim, hidden_dim)[head]).unsqueeze(2)
+ if self.q_proj.bias is not None:
+ part_q += self.q_proj.bias.view(self.num_heads, self.head_dim)[head]
+ else:
+ query_pos = self.num_heads * self.head_dim
+ part_q = F.linear(hidden_states, self.qkv_proj.weight[:query_pos].view(self.num_heads, self.head_dim, hidden_dim)[head]).unsqueeze(2)
+ if self.qkv_proj.bias is not None:
+ part_q += self.qkv_proj.bias[:query_pos].view(self.num_heads, self.head_dim)[head]
+
+ if ROPE_TYPE == "max_seq_len":
+ part_q = apply_rotary_pos_emb(part_q.transpose(1, 2), cos)
+ else:
+ part_q = apply_rotary_pos_emb_single(part_q.transpose(1, 2), cos, sin, position_ids)
+
+ if head % self.num_key_value_groups == 0:
+ if "q_proj" in self.__dict__["_modules"]:
+ part_k = F.linear(hidden_states, self.k_proj.weight.view(act_num_heads, self.head_dim, hidden_dim)[head // self.num_key_value_groups]).unsqueeze(2)
+ part_v = F.linear(hidden_states, self.v_proj.weight.view(act_num_heads, self.head_dim, hidden_dim)[head // self.num_key_value_groups]).unsqueeze(2).transpose(1, 2)
+ if self.k_proj.bias is not None:
+ part_k += self.k_proj.bias.view(act_num_heads, self.head_dim)[head // self.num_key_value_groups]
+ if self.v_proj.bias is not None:
+ part_v += self.v_proj.bias.view(act_num_heads, self.head_dim)[head // self.num_key_value_groups]
+ else:
+ query_pos = self.num_heads * self.head_dim
+ part_k = F.linear(hidden_states, self.qkv_proj.weight[query_pos:].view(2, act_num_heads, self.head_dim, hidden_dim)[0][head // self.num_key_value_groups]).unsqueeze(2)
+ part_v = F.linear(hidden_states, self.qkv_proj.weight[query_pos:].view(2, act_num_heads, self.head_dim, hidden_dim)[1][head // self.num_key_value_groups]).unsqueeze(2).transpose(1, 2)
+ if self.qkv_proj.bias is not None:
+ part_k += self.qkv_proj.bias[query_pos:].view(2, act_num_heads, self.head_dim)[0][head // self.num_key_value_groups]
+ part_v += self.qkv_proj.bias[query_pos:].view(2, act_num_heads, self.head_dim)[1][head // self.num_key_value_groups]
+
+ if ROPE_TYPE == "max_seq_len":
+ part_k = apply_rotary_pos_emb(part_k.transpose(1, 2), cos)
+ else:
+ part_k = apply_rotary_pos_emb_single(part_k.transpose(1, 2), cos, sin, position_ids)
+ if use_cache and past_key_value is not None:
+ k[:,head // self.num_key_value_groups] = part_k.to(kv_cache_cpu_device)
+ v[:,head // self.num_key_value_groups] = part_v.to(kv_cache_cpu_device)
+ part_k, part_v = past_key_value.get(part_k, part_v, self.layer_idx, head // self.num_key_value_groups, cache_kwargs)
+
+ if self.layer_idx >= self.starting_layer:
+ part_o = self.gather_last_q_vertical_slash_topk_v4(part_q, part_k, part_v, head)
+ else:
+ part_o = flash_attn_func(part_q, part_k, part_v.transpose(1, 2), 0.0, softmax_scale=None, causal=True).view(bsz, part_q.shape[1], self.head_dim)
+ attn_out[:, :, head, :] = part_o
+
+ if use_cache and past_key_value is not None:
+ past_key_value.update(k, v, self.layer_idx, cache_kwargs)
+ torch.matmul(attn_out.view(bsz, q_len, hidden_dim), self.o_proj.weight.T, out=hidden_states)
+ torch.cuda.empty_cache()
+ return (hidden_states, None, past_key_value)
+
+ return forward
+
+def minference_with_snapkv_forward():
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ output_attentions,
+ use_cache,
+ **kwargs,
+ ):
+ self.init_minference_parameters()
+ self.ne_inf = torch.finfo(hidden_states.dtype).min
+
+ init_snapkv(self)
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+
+ if hasattr(self, "kv_seq_len"): #[SnapKV] add kv_seq_len
+ if self.kv_seq_len != 0:
+ kv_seq_len += self.kv_seq_len
+ else:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ else:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ set_rope_type(self)
+ cos, sin = get_cos_sin(self, value_states, kv_seq_len, position_ids)
+ if ROPE_TYPE == "max_seq_len":
+ query_states = apply_rotary_pos_emb(query_states, cos)
+ key_states = apply_rotary_pos_emb(key_states, cos)
+ else:
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ if key_states.shape[-2] == kv_seq_len: # [SnapKV] add kv_cluster
+ self.kv_seq_len = kv_seq_len # [SnapKV] register kv_seq_len
+ key_states_compress, value_states_compress = self.kv_cluster.update_kv(key_states, query_states, value_states, attention_mask, self.num_key_value_groups)
+ past_key_value.update(key_states_compress, value_states_compress, self.layer_idx, cache_kwargs)
+ else:
+ self.kv_seq_len += q_len
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ if self.layer_idx >= self.starting_layer:
+ assert query_states.size(1) == key_states.size(1) == value_states.size(1)
+ output = torch.empty_like(query_states)
+ for head in range(query_states.size(1)):
+ q = query_states[:, head, :, :].unsqueeze(1)
+ k = key_states[:, head, :, :].unsqueeze(1)
+ v = value_states[:, head, :, :].unsqueeze(1)
+ output[:, head:head + 1] = self.gather_last_q_vertical_slash_topk_v4(q, k, v, head)
+
+ attn_output = output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+ attn_output = self.o_proj(attn_output)
+ return attn_output, None, past_key_value
+
+ else:
+ output = torch.empty_like(query_states)
+ for head in range(query_states.size(1)):
+ q = query_states[:, head, :, :].unsqueeze(1)
+ k = key_states[:, head, :, :].unsqueeze(1)
+ v = value_states[:, head, :, :].unsqueeze(1)
+ if is_flash_attn_2_available():
+ attn_output = flash_attn_func(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, 1, q.shape[2], self.head_dim)
+ else:
+ attn_output = gather_qkv(q, k, v, attention_mask)
+ output[:, head:head + 1] = attn_output
+ attn_output = output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+ return forward
+
+def gather_last_q_vertical_slash_topk_vllm(self, q, k, v, head_id):
+ kv_seq_len = k.size(2)
+ head_dim = q.size(-1)
+
+ def vertical_and_slash_kernel(q, k, v, vertical_size, slash_size):
+ vertical_size, slash_size = min(q_len, max(vertical_size, 30)), min(q_len, max(slash_size, 50))
+ last_q = min(64, q_len)
+ qk = torch.einsum(f'bhmk, bhnk -> bhmn', q[:,:,-last_q:,:], k)
+
+ qk[:, :, :, -last_q:] = torch.where(LAST_Q_MASK[...,-last_q:,-last_q:], qk[:, :, :, -last_q:], -torch.inf)
+ qk = torch.nn.functional.softmax(qk, dim=-1, dtype=torch.float32)
+ vertical = qk.sum(-2, keepdim=True)
+ vertical[...,:30] = torch.inf
+ vertical_topk = torch.topk(vertical, vertical_size, -1).indices
+
+ slash = sum_all_diagonal_matrix(qk)[...,:-last_q + 1]
+ slash[...,-100:] = torch.inf
+ slash_topk = slash
+ slash = (q_len - 1) - torch.topk(slash, slash_size, -1).indices
+
+ return vertical_slash_sparse_attention(q, k, v, vertical_topk, slash)
+
+ def block_sparse_kernel(q, k, v, vertical_size=None, slash_size=None):
+ topk = 100
+ return block_sparse_attention(q, k, v, topk)
+
+ def dense(q, k, v, vertical_size=None, slash_size=None):
+ return flash_attn_func(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1,2), 0.0, softmax_scale=None, causal=q_len != 1).view(bsz, 1, q_len, head_dim)
+
+ q_len = q.shape[2]
+ bsz = q.shape[0]
+
+ ty, vertical_size, slash_size, _ = self.best_pattern[head_id]
+
+ if q_len == 1:
+ return dense(q, k, v)
+
+ fc = {
+ "stream_llm": streaming_forward,
+ "vertical_and_slash": vertical_and_slash_kernel,
+ "block_sparse": block_sparse_kernel,
+ }[ty]
+ return fc(q, k, v, vertical_size, slash_size)
+
+def minference_vllm_forward(
+ pattern_config
+):
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ kv_cache: torch.Tensor,
+ attn_metadata: AttentionMetadata[FlashAttentionMetadata],
+ kv_scale: float,
+ layer_idx: int,
+ ) -> torch.Tensor:
+ """Forward pass with FlashAttention and PagedAttention.
+
+ Args:
+ query: shape = [num_tokens, num_heads * head_size]
+ key: shape = [num_tokens, num_kv_heads * head_size]
+ value: shape = [num_tokens, num_kv_heads * head_size]
+ kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size]
+ attn_metadata: Metadata for attention.
+ Returns:
+ shape = [num_tokens, num_heads * head_size]
+ """
+ self.best_pattern = {int(ii): jj for ii, jj in pattern_config[layer_idx].items()}
+ def repeat_kv(hidden_states, n_rep):
+ sqlen, num_head, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :].expand(sqlen, num_head, n_rep, head_dim)
+ return hidden_states.reshape(sqlen, num_head * n_rep, head_dim)
+
+ def minference_prefill_func(
+ q, k, v,
+ ):
+ # (seq_len, num_heads, head_size)
+ if q.size(-2) != k.size(-2):
+ k = repeat_kv(k, q.size(-2) // k.size(-2))
+ v = repeat_kv(v, q.size(-2) // v.size(-2))
+
+ output = torch.empty_like(q)
+ for head in range(q.size(-2)):
+ q_head = q[:, head, :].unsqueeze(1)
+ k_head = k[:, head, :].unsqueeze(1)
+ v_head = v[:, head, :].unsqueeze(1)
+
+ # (1, seq_len, num_heads, head_size)
+ q_head = q_head[None, ...]
+ k_head = k_head[None, ...]
+ v_head = v_head[None, ...]
+
+ q_head = q_head.transpose(1, 2)
+ k_head = k_head.transpose(1, 2)
+ v_head = v_head.transpose(1, 2)
+
+ out = self.gather_last_q_vertical_slash_topk_vllm(q_head, k_head, v_head, head)
+
+ out = out.transpose(1, 2).squeeze(0).contiguous()
+ output[:, head:head+1, :] = out
+ return output
+
+ num_tokens, hidden_size = query.shape
+ # Reshape the query, key, and value tensors.
+ query = query.view(-1, self.num_heads, self.head_size)
+ key = key.view(-1, self.num_kv_heads, self.head_size)
+ value = value.view(-1, self.num_kv_heads, self.head_size)
+
+ if kv_cache is not None:
+ key_cache, value_cache = PagedAttention.split_kv_cache(
+ kv_cache, self.num_kv_heads, self.head_size)
+
+ # Reshape the input keys and values and store them in the cache.
+ # If kv_cache is not provided, the new key and value tensors are
+ # not cached. This happens during the initial memory profiling run.
+ PagedAttention.write_to_paged_cache(key, value, key_cache,
+ value_cache,
+ attn_metadata.slot_mapping,
+ attn_metadata.kv_cache_dtype,
+ kv_scale)
+
+ num_prefill_tokens = attn_metadata.num_prefill_tokens
+ num_decode_tokens = attn_metadata.num_decode_tokens
+ assert key.shape[0] == num_prefill_tokens + num_decode_tokens
+ assert value.shape[0] == num_prefill_tokens + num_decode_tokens
+
+ output = torch.empty_like(query)
+ # Query for decode. KV is not needed because it is already cached.
+ decode_query = query[num_prefill_tokens:]
+ # QKV for prefill.
+ query = query[:num_prefill_tokens]
+ key = key[:num_prefill_tokens]
+ value = value[:num_prefill_tokens]
+
+ assert query.shape[0] == num_prefill_tokens
+ assert decode_query.shape[0] == num_decode_tokens
+
+ if prefill_meta := attn_metadata.prefill_metadata:
+ # Prompt run.
+ if kv_cache is None or prefill_meta.block_tables.numel() == 0:
+ # normal attention
+ # When block_tables are not filled, it means q and k are the
+ # prompt, and they have the same length.
+ # (seq_len, num_heads, head_size)
+ # out = flash_attn_varlen_func(
+ # q=query,
+ # k=key,
+ # v=value,
+ # cu_seqlens_q=prefill_meta.seq_start_loc,
+ # cu_seqlens_k=prefill_meta.seq_start_loc,
+ # max_seqlen_q=prefill_meta.max_prompt_len,
+ # max_seqlen_k=prefill_meta.max_prompt_len,
+ # softmax_scale=self.scale,
+ # causal=True,
+ # window_size=self.sliding_window,
+ # alibi_slopes=self.alibi_slopes,
+ # )
+ out = minference_prefill_func(query, key, value)
+ assert output[:num_prefill_tokens].shape == out.shape
+ output[:num_prefill_tokens] = out
+ else:
+ # prefix-enabled attention
+ # TODO(Hai) this triton kernel has regression issue (broke) to
+ # deal with different data types between KV and FP8 KV cache,
+ # to be addressed separately.
+ output[:num_prefill_tokens] = PagedAttention.forward_prefix(
+ query,
+ key,
+ value,
+ key_cache,
+ value_cache,
+ prefill_meta.block_tables,
+ prefill_meta.subquery_start_loc,
+ prefill_meta.prompt_lens_tensor,
+ prefill_meta.context_lens,
+ prefill_meta.max_subquery_len,
+ self.alibi_slopes,
+ )
+ if decode_meta := attn_metadata.decode_metadata:
+ # Decoding run.
+ output[num_prefill_tokens:] = PagedAttention.forward_decode(
+ decode_query,
+ key_cache,
+ value_cache,
+ decode_meta.block_tables,
+ decode_meta.context_lens,
+ decode_meta.max_context_len,
+ attn_metadata.kv_cache_dtype,
+ self.num_kv_heads,
+ self.scale,
+ self.alibi_slopes,
+ kv_scale,
+ )
+
+ # Reshape the output tensor.
+ return output.view(num_tokens, hidden_size)
+
+ return forward
diff --git a/minference/modules/snap_kv.py b/minference/modules/snap_kv.py
new file mode 100644
index 0000000..d6cf9e8
--- /dev/null
+++ b/minference/modules/snap_kv.py
@@ -0,0 +1,426 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Refer to the code in https://github.com/FasterDecoding/SnapKV/blob/main/snapkv/monkeypatch/snapkv_utils.py
+
+import math
+import time
+import warnings
+from importlib.metadata import version
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import transformers
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+
+# https://github.com/huggingface/transformers/blob/v4.37-release/src/transformers/models/llama/modeling_llama.py
+def llama_flash_attn2_forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # [SnapKV] register kv_cluster
+ init_snapkv(self)
+ # LlamaFlashAttention2 attention does not support output_attentions
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop("padding_mask")
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(
+ bsz, q_len, self.num_heads, self.head_dim
+ ).transpose(1, 2)
+ key_states = key_states.view(
+ bsz, q_len, self.num_key_value_heads, self.head_dim
+ ).transpose(1, 2)
+ value_states = value_states.view(
+ bsz, q_len, self.num_key_value_heads, self.head_dim
+ ).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ # if past_key_value is not None:
+ # kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ if hasattr(self, "kv_seq_len"): # [SnapKV] add kv_seq_len
+ if self.kv_seq_len != 0:
+ kv_seq_len += self.kv_seq_len
+ else:
+ kv_seq_len += past_key_value.get_usable_length(
+ kv_seq_len, self.layer_idx
+ )
+ else:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(
+ query_states, key_states, cos, sin, position_ids
+ )
+ # [SnapKV] move to ahead
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ # key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+ # print('kv_seq_len:', kv_seq_len)
+ # print('key_states.shape:', key_states.shape)
+ if key_states.shape[-2] == kv_seq_len: # [SnapKV] add kv_cluster
+ self.kv_seq_len = kv_seq_len # [SnapKV] register kv_seq_len
+ key_states_compress, value_states_compress = self.kv_cluster.update_kv(
+ key_states,
+ query_states,
+ value_states,
+ attention_mask,
+ self.num_key_value_groups,
+ )
+ past_key_value.update(
+ key_states_compress, value_states_compress, self.layer_idx, cache_kwargs
+ )
+ else:
+ self.kv_seq_len += q_len
+ key_states, value_states = past_key_value.update(
+ key_states, value_states, self.layer_idx, cache_kwargs
+ )
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=dropout_rate,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+def prepare_inputs_for_generation_llama(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ **kwargs,
+):
+ if past_key_values is None: # [SnapKV]
+ for layer in self.model.layers:
+ layer.self_attn.kv_seq_len = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ # cache_length = past_length = past_key_values[0][0].shape[2]
+ # max_cache_length = None
+ cache_length = past_length = self.model.layers[0].self_attn.kv_seq_len
+ max_cache_length = None
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+
+llama_flash_attn2_forward_4_37 = llama_flash_attn2_forward
+prepare_inputs_for_generation_llama_4_37 = prepare_inputs_for_generation_llama
+
+
+@torch.no_grad()
+def rope_forward(self, x, seq_len):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ position_ids = torch.arange(seq_len, device=x.device).unsqueeze(0)
+ inv_freq_expanded = (
+ self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ )
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 since bfloat16 loses precision on long contexts
+ # See https://github.com/huggingface/transformers/pull/29285
+ device_type = x.device.type
+ device_type = (
+ device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ )
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(
+ 1, 2
+ )
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+##################
+
+# perform qk calculation and get indices
+# this version will not update in inference mode
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(
+ batch, num_key_value_heads, n_rep, slen, head_dim
+ )
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class SnapKVCluster:
+ def __init__(
+ self,
+ window_size=64,
+ max_capacity_prompt=256 + 64,
+ kernel_size=5,
+ pooling="avgpool",
+ ):
+ self.window_size = window_size
+ self.max_capacity_prompt = max_capacity_prompt
+ assert self.max_capacity_prompt - self.window_size > 0
+ self.kernel_size = kernel_size
+ self.pooling = pooling
+
+ def reset(
+ self,
+ window_size=64,
+ max_capacity_prompt=256 + 64,
+ kernel_size=5,
+ pooling="avgpool",
+ ):
+ self.window_size = window_size
+ self.max_capacity_prompt = max_capacity_prompt
+ assert self.max_capacity_prompt - self.window_size > 0
+ self.kernel_size = kernel_size
+ self.pooling = pooling
+
+ def update_kv(
+ self,
+ key_states,
+ query_states,
+ value_states,
+ attention_mask,
+ num_key_value_groups,
+ ):
+ # check if prefix phase
+ assert key_states.shape[-2] == query_states.shape[-2]
+ bsz, num_heads, q_len, head_dim = query_states.shape
+ if q_len < self.max_capacity_prompt:
+ return key_states, value_states
+ else:
+ attn_weights = torch.matmul(
+ query_states[..., -self.window_size :, :], key_states.transpose(2, 3)
+ ) / math.sqrt(head_dim)
+ mask = torch.full(
+ (self.window_size, self.window_size),
+ torch.finfo(attn_weights.dtype).min,
+ device=attn_weights.device,
+ )
+ mask_cond = torch.arange(mask.size(-1), device=attn_weights.device)
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
+ mask = mask.to(attn_weights.device)
+ attention_mask = mask[None, None, :, :]
+
+ attn_weights[
+ :, :, -self.window_size :, -self.window_size :
+ ] += attention_mask
+
+ attn_weights = nn.functional.softmax(
+ attn_weights, dim=-1, dtype=torch.float32
+ ).to(query_states.dtype)
+ attn_weights_sum = attn_weights[
+ :, :, -self.window_size :, : -self.window_size
+ ].sum(dim=-2)
+ if self.pooling == "avgpool":
+ attn_cache = F.avg_pool1d(
+ attn_weights_sum,
+ kernel_size=self.kernel_size,
+ padding=self.kernel_size // 2,
+ stride=1,
+ )
+ elif self.pooling == "maxpool":
+ attn_cache = F.max_pool1d(
+ attn_weights_sum,
+ kernel_size=self.kernel_size,
+ padding=self.kernel_size // 2,
+ stride=1,
+ )
+ else:
+ raise ValueError("Pooling method not supported")
+ indices = attn_cache.topk(
+ self.max_capacity_prompt - self.window_size, dim=-1
+ ).indices
+ indices = indices.unsqueeze(-1).expand(-1, -1, -1, head_dim)
+ k_past_compress = key_states[:, :, : -self.window_size, :].gather(
+ dim=2, index=indices
+ )
+ v_past_compress = value_states[:, :, : -self.window_size, :].gather(
+ dim=2, index=indices
+ )
+ k_cur = key_states[:, :, -self.window_size :, :]
+ v_cur = value_states[:, :, -self.window_size :, :]
+ key_states = torch.cat([k_past_compress, k_cur], dim=2)
+ value_states = torch.cat([v_past_compress, v_cur], dim=2)
+ return key_states, value_states
+
+
+def init_snapkv(self):
+ if not hasattr(self, "kv_cluster"):
+ if not hasattr(self.config, "window_size"):
+ self.config.window_size = 64
+ if not hasattr(self.config, "max_capacity_prompt"):
+ self.config.max_capacity_prompt = 4096
+ if not hasattr(self.config, "kernel_size"):
+ self.config.kernel_size = 13
+ if not hasattr(self.config, "pooling"):
+ self.config.pooling = "avgpool"
+ self.kv_cluster = SnapKVCluster(
+ window_size=self.config.window_size,
+ max_capacity_prompt=self.config.max_capacity_prompt,
+ kernel_size=self.config.kernel_size,
+ pooling=self.config.pooling,
+ )
+
+
+############
+
+
+def check_version():
+ try:
+ transformers_version = version("transformers")
+ except Exception as e:
+ print(f"Transformers not installed: {e}")
+ return transformers_version
+
+
+def replace_llama():
+ transformers_version = check_version()
+ version_list = ["4.37"]
+ warning_flag = True
+ for version in version_list:
+ if version in transformers_version:
+ warning_flag = False
+ break
+ if warning_flag:
+ warnings.warn(
+ f"Transformers version {transformers_version} might not be compatible with SnapKV. SnapKV is tested with Transformers version {version_list}."
+ )
+ transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation = (
+ prepare_inputs_for_generation_llama_4_37
+ )
+ transformers.models.llama.modeling_llama.LlamaFlashAttention2.forward = (
+ llama_flash_attn2_forward_4_37
+ )
diff --git a/minference/ops/block_sparse_flash_attention.py b/minference/ops/block_sparse_flash_attention.py
new file mode 100644
index 0000000..4932932
--- /dev/null
+++ b/minference/ops/block_sparse_flash_attention.py
@@ -0,0 +1,185 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import numpy as np
+import pycuda.autoprimaryctx
+import torch
+import triton
+import triton.language as tl
+from flash_attn import flash_attn_varlen_func
+from pycuda.compiler import SourceModule
+
+
+# @triton.autotune(
+# configs=[
+# triton.Config({}, num_stages=1, num_warps=4),
+# triton.Config({}, num_stages=1, num_warps=8),
+# triton.Config({}, num_stages=2, num_warps=4),
+# triton.Config({}, num_stages=2, num_warps=8),
+# triton.Config({}, num_stages=3, num_warps=4),
+# triton.Config({}, num_stages=3, num_warps=8),
+# triton.Config({}, num_stages=4, num_warps=4),
+# triton.Config({}, num_stages=4, num_warps=8),
+# triton.Config({}, num_stages=5, num_warps=4),
+# triton.Config({}, num_stages=5, num_warps=8),
+# ],
+# key=['N_CTX'],
+# )
+@triton.jit
+def _triton_block_sparse_attn_fwd_kernel(
+ Q, K, V, seqlens, sm_scale,
+ block_index,
+ Out,
+ stride_qz, stride_qh, stride_qm, stride_qk,
+ stride_kz, stride_kh, stride_kn, stride_kk,
+ stride_vz, stride_vh, stride_vn, stride_vk,
+ stride_oz, stride_oh, stride_om, stride_ok,
+ Z, H, N_CTX,
+ NUM_ROWS, MAX_BLOCKS_PRE_ROW,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ BLOCK_DMODEL: tl.constexpr,
+ dtype: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hz = tl.program_id(1)
+
+ seqlen = tl.load(seqlens + off_hz // H)
+ if start_m * BLOCK_M >= seqlen:
+ return
+
+ # initialize offsets
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_n = tl.arange(0, BLOCK_N)
+ offs_d = tl.arange(0, BLOCK_DMODEL)
+
+ qo_offset = (off_hz // H) * stride_qz + (off_hz % H) * stride_qh
+ kv_offset = (off_hz // H) * stride_kz + (off_hz % H) * stride_kh
+
+ q_ptrs = Q + qo_offset + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
+ k_ptrs = K + kv_offset + offs_d[:, None] * stride_kk
+ v_ptrs = V + kv_offset + offs_d[None, :] * stride_vk
+ o_ptrs = Out + qo_offset + offs_m[:, None] * stride_om + offs_d[None, :] * stride_ok
+
+ blocks_ptr = block_index + (off_hz * NUM_ROWS + start_m) * MAX_BLOCKS_PRE_ROW
+
+ # initialize pointer to m and l
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
+ # scale sm_scale by log_2(e) and use
+ # 2^x instead of exp in the loop because CSE and LICM
+ # don't work as expected with `exp` in the loop
+ qk_scale = sm_scale * 1.44269504
+ # load q: it will stay in SRAM throughout
+ q = tl.load(q_ptrs)
+ q = (q * qk_scale).to(dtype)
+
+ # loop over k, v and update accumulator
+ m_mask = offs_m[:, None] < seqlen
+ block_count = tl.minimum((start_m + 1) * BLOCK_M // BLOCK_N, MAX_BLOCKS_PRE_ROW)
+
+ for sparse_block_idx in range(block_count):
+ real_block_idx = tl.load(blocks_ptr + sparse_block_idx)
+ start_n = real_block_idx * BLOCK_N
+ cols = start_n + offs_n
+ # -- load k, v --
+ k = tl.load(k_ptrs + cols[None, :] * stride_kn)
+ v = tl.load(v_ptrs + cols[:, None] * stride_vn)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ # if start_n + BLOCK_N < seqlen:
+ # qk = tl.where(m_mask, qk, float("-inf"))
+ # else:
+ causal_mask = cols[None, :] <= offs_m[:, None]
+ qk = tl.where(m_mask & causal_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+
+ # write back O
+ acc /= l_i[:, None]
+ tl.store(o_ptrs, acc.to(dtype), mask=m_mask)
+
+
+def _triton_block_sparse_attention(
+ q, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ k, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ v, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ seqlens, # [BATCH, ]
+ block_index, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), MAX_BLOCKS_PRE_ROW]
+ sm_scale,
+ block_size_M=64,
+ block_size_N=64,
+) -> torch.Tensor:
+ # shape constraints
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
+ assert Lq == Lk and Lk == Lv
+ assert Lk in {16, 32, 64, 128}
+ o = torch.zeros_like(q)
+ grid = (triton.cdiv(q.shape[2], block_size_M), q.shape[0] * q.shape[1], 1)
+ dtype = tl.bfloat16 if q.dtype == torch.bfloat16 else tl.float16
+ _triton_block_sparse_attn_fwd_kernel[grid](
+ q, k, v, seqlens, sm_scale,
+ block_index,
+ o,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
+ q.shape[0], q.shape[1], q.shape[2],
+ block_index.shape[-2], block_index.shape[-1],
+ BLOCK_M=block_size_M, BLOCK_N=block_size_N,
+ BLOCK_DMODEL=Lk,
+ dtype=dtype,
+ num_warps=4, num_stages=2,
+ )
+
+ return o
+
+
+def _build_block_index(
+ query: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ key: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ top_k: int,
+ block_size_M: int = 64,
+ block_size_N: int = 64,
+):
+ batch_size, num_heads, context_size, head_dim = query.shape
+ query_pool = query.reshape((batch_size, num_heads, -1, block_size_M, head_dim)).mean(dim=-2)
+ key_pool = key.reshape((batch_size, num_heads, -1, block_size_N, head_dim)).mean(dim=-2)
+ arange_M = torch.arange(query_pool.shape[-2], dtype=torch.int32, device=query.device) * block_size_M
+ arange_N = torch.arange(key_pool.shape[-2], dtype=torch.int32, device=key.device) * block_size_N
+ p_pool = torch.einsum(f'bhmk, bhnk -> bhmn', query_pool, key_pool)
+ p_pool = p_pool.where(arange_M[None, None, :, None] >= arange_N[None, None, None, :], -torch.inf)
+ top_k = min(top_k, context_size // block_size_N)
+ return torch.topk(p_pool, top_k, dim=-1).indices.to(torch.int32).sort(dim=-1).values
+
+
+def block_sparse_attention(
+ query: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ key: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ value: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ top_k: int,
+ block_size_M: int = 64,
+ block_size_N: int = 64,
+):
+ batch_size, num_heads, context_size, head_dim = query.shape
+ pad = block_size_M - (query.shape[2] & (block_size_M - 1))
+ query = torch.nn.functional.pad(query, [0, 0, 0, pad, 0, 0, 0, 0])
+ key = torch.nn.functional.pad(key, [0, 0, 0, pad, 0, 0, 0, 0])
+ value = torch.nn.functional.pad(value, [0, 0, 0, pad, 0, 0, 0, 0])
+ seqlens = torch.tensor([context_size], dtype=torch.int32, device=query.device)
+ sm_scale = head_dim ** -0.5
+ block_index = _build_block_index(query, key, top_k, block_size_N, block_size_N)
+ out = _triton_block_sparse_attention(query, key, value, seqlens, block_index, sm_scale, block_size_M, block_size_N)
+ return out[..., :context_size, :]
diff --git a/minference/ops/pit_sparse_flash_attention.py b/minference/ops/pit_sparse_flash_attention.py
new file mode 100644
index 0000000..4f09504
--- /dev/null
+++ b/minference/ops/pit_sparse_flash_attention.py
@@ -0,0 +1,743 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import numpy as np
+import pycuda.autoprimaryctx
+import torch
+import triton
+import triton.language as tl
+from flash_attn import flash_attn_varlen_func
+from pycuda.compiler import SourceModule
+
+
+@triton.autotune(
+ configs=[
+ triton.Config({}, num_stages=1, num_warps=4),
+ triton.Config({}, num_stages=1, num_warps=8),
+ triton.Config({}, num_stages=2, num_warps=4),
+ triton.Config({}, num_stages=2, num_warps=8),
+ triton.Config({}, num_stages=3, num_warps=4),
+ triton.Config({}, num_stages=3, num_warps=8),
+ triton.Config({}, num_stages=4, num_warps=4),
+ triton.Config({}, num_stages=4, num_warps=8),
+ triton.Config({}, num_stages=5, num_warps=4),
+ triton.Config({}, num_stages=5, num_warps=8),
+ ],
+ key=['N_CTX'],
+)
+@triton.jit
+def triton_sparse_fwd_kernel(
+ Q, K, V, seqlens, sm_scale,
+ col_count, col_index,
+ Out,
+ stride_qz, stride_qh, stride_qm, stride_qk,
+ stride_kz, stride_kh, stride_kn, stride_kk,
+ stride_vz, stride_vh, stride_vn, stride_vk,
+ stride_oz, stride_oh, stride_om, stride_ok,
+ Z, H, N_CTX,
+ NUM_ROWS, MAX_COLS_PRE_ROW,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ BLOCK_DMODEL: tl.constexpr,
+ dtype: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hz = tl.program_id(1)
+
+ seqlen = tl.load(seqlens + off_hz // H)
+ if start_m * BLOCK_M >= seqlen:
+ return
+
+ # initialize offsets
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_n = tl.arange(0, BLOCK_N)
+ offs_d = tl.arange(0, BLOCK_DMODEL)
+
+ qo_offset = (off_hz // H) * stride_qz + (off_hz % H) * stride_qh
+ kv_offset = (off_hz // H) * stride_kz + (off_hz % H) * stride_kh
+
+ q_ptrs = Q + qo_offset + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
+ k_ptrs = K + kv_offset + offs_d[:, None] * stride_kk
+ v_ptrs = V + kv_offset + offs_d[None, :] * stride_vk
+ o_ptrs = Out + qo_offset + offs_m[:, None] * stride_om + offs_d[None, :] * stride_ok
+
+ num_cols = tl.load(col_count + off_hz * NUM_ROWS + start_m)
+ cols_ptr = col_index + (off_hz * NUM_ROWS + start_m) * MAX_COLS_PRE_ROW
+
+ # initialize pointer to m and l
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
+ # scale sm_scale by log_2(e) and use
+ # 2^x instead of exp in the loop because CSE and LICM
+ # don't work as expected with `exp` in the loop
+ qk_scale = sm_scale * 1.44269504
+ # load q: it will stay in SRAM throughout
+ q = tl.load(q_ptrs)
+ q = (q * qk_scale).to(dtype)
+
+ # loop over k, v and update accumulator
+ m_mask = offs_m[:, None] < seqlen
+ split = tl.maximum(num_cols - BLOCK_N, 0) & ~(BLOCK_N - 1)
+
+ for start_n in range(0, split, BLOCK_N):
+ cols = tl.load(cols_ptr + start_n + offs_n)
+ # -- load k, v --
+ k = tl.load(k_ptrs + cols[None, :] * stride_kn)
+ v = tl.load(v_ptrs + cols[:, None] * stride_vn)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk = tl.where(m_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+
+ for start_n in range(split, num_cols, BLOCK_N):
+ n_mask = start_n + offs_n < num_cols
+ cols = tl.load(cols_ptr + start_n + offs_n, mask=n_mask, other=N_CTX - 1)
+ causal_mask = cols[None, :] <= offs_m[:, None]
+ # -- load k, v --
+ k = tl.load(k_ptrs + cols[None, :] * stride_kn)
+ v = tl.load(v_ptrs + cols[:, None] * stride_vn)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk = tl.where(m_mask & causal_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+
+ # write back O
+ acc = tl.where(m_mask, acc / l_i[:, None], 0.0)
+ tl.store(o_ptrs, acc.to(dtype), mask=m_mask)
+
+
+def triton_sparse_forward(
+ q, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ k, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ v, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ seqlens, # [BATCH, ]
+ col_count, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ col_index, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), MAX_COLS_PRE_ROW]
+ sm_scale,
+ block_size_M=64,
+ block_size_N=64,
+) -> torch.Tensor:
+ # shape constraints
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
+ assert Lq == Lk and Lk == Lv
+ assert Lk in {16, 32, 64, 128}
+ o = torch.zeros_like(q)
+ grid = (triton.cdiv(q.shape[2], block_size_M), q.shape[0] * q.shape[1], 1)
+ num_warps = 4 if (Lk <= 64 or block_size_M <= 64) else 8 # 4
+ dtype = tl.bfloat16 if q.dtype == torch.bfloat16 else tl.float16
+ triton_sparse_fwd_kernel[grid](
+ q, k, v, seqlens, sm_scale,
+ col_count, col_index,
+ o,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
+ q.shape[0], q.shape[1], q.shape[2],
+ col_index.shape[-2], col_index.shape[-1],
+ BLOCK_M=block_size_M, BLOCK_N=block_size_N,
+ BLOCK_DMODEL=Lk,
+ dtype=dtype,
+ # num_warps=num_warps, num_stages=4,
+ )
+
+ return o
+
+
+def torch_build_index(seqlens, vertical_indexes, slash_indexes, block_size_M=64):
+ max_cols_per_row = (seqlens.max().item() + 3) & (-4)
+ batch_size, num_heads, NNZ_S = slash_indexes.shape
+ NNZ_V = vertical_indexes.shape[-1]
+ num_rows = triton.cdiv(max_cols_per_row, block_size_M)
+ max_cols_per_row = max_cols_per_row
+ col_count = torch.zeros((batch_size, num_heads, num_rows), dtype=torch.int32)
+ col_index = torch.zeros((batch_size, num_heads, num_rows, max_cols_per_row), dtype=torch.int32)
+ for b in range(batch_size):
+ seqlen = seqlens[b]
+ for h in range(num_heads):
+ for m, start_m in enumerate(range(0, seqlen, block_size_M)):
+ end_m = start_m + block_size_M
+ tmp_col_count = 0
+ cursor, s, v = -1, 0, 0
+ v_idx = vertical_indexes[b, h, v].item()
+ while s < NNZ_S and slash_indexes[b, h, s] >= end_m:
+ s += 1
+ if s < NNZ_S:
+ s_idx = end_m - slash_indexes[b, h, s].item()
+ s_range = min(s_idx, block_size_M)
+ else:
+ s_idx = seqlen
+ s_range = 0
+ while s_idx <= end_m and v_idx < end_m:
+ if v_idx < s_idx:
+ if v_idx < s_idx - s_range:
+ col_index[b, h, m, tmp_col_count] = v_idx
+ tmp_col_count += 1
+ v += 1
+ if v < NNZ_V:
+ v_idx = vertical_indexes[b, h, v].item()
+ else:
+ break
+ else:
+ for idx in range(max(cursor, s_idx - s_range), min(s_idx, seqlen)):
+ col_index[b, h, m, tmp_col_count] = idx
+ tmp_col_count += 1
+ cursor = s_idx
+ s += 1
+ if s < NNZ_S:
+ s_idx = end_m - slash_indexes[b, h, s].item()
+ s_range = min(s_idx, block_size_M)
+ else:
+ break
+ while s_idx <= end_m and s < NNZ_S:
+ for idx in range(max(cursor, s_idx - s_range), min(s_idx, seqlen)):
+ col_index[b, h, m, tmp_col_count] = idx
+ tmp_col_count += 1
+ cursor = s_idx
+ s += 1
+ if s < NNZ_S:
+ s_idx = end_m - slash_indexes[b, h, s].item()
+ s_range = min(s_idx, block_size_M)
+ else:
+ break
+ while v_idx < end_m and v < NNZ_V:
+ if v_idx < s_idx - s_range:
+ col_index[b, h, m, tmp_col_count] = v_idx
+ tmp_col_count += 1
+ v += 1
+ if v < NNZ_V:
+ v_idx = vertical_indexes[b, h, v].item()
+ else:
+ break
+ col_count[b, h, m] = tmp_col_count
+ return col_count.to(seqlens.device), col_index.to(seqlens.device)
+
+
+
+PYCUDA_BUILD_INDEX_KERNEL_CODE = '''\
+__device__ int min(int x, int y) {
+ return x < y ? x : y;
+}
+
+__device__ int max(int x, int y) {
+ return x > y ? x : y;
+}
+
+__device__ void save_list(int* output, int loop_start, int loop_end, int& offset) {
+ if (loop_start + 4 >= loop_end) {
+ for (int idx = loop_start; idx < loop_end; idx++, offset++) {
+ output[offset] = idx;
+ }
+ return;
+ }
+ int4 tmp_int4;
+ int int4_start = ((offset + 3) & (-4)) - offset + loop_start;
+ int int4_end = ((offset + loop_end - loop_start) & (-4)) - offset + loop_start;
+ for (int idx = loop_start; idx < int4_start; idx++, offset++) {
+ output[offset] = idx;
+ }
+ for (int idx = int4_start; idx < int4_end; idx += 4, offset += 4) {
+ tmp_int4.x = idx + 0;
+ tmp_int4.y = idx + 1;
+ tmp_int4.z = idx + 2;
+ tmp_int4.w = idx + 3;
+ (reinterpret_cast(&output[offset]))[0] = tmp_int4;
+ }
+ for (int idx = int4_end; idx < loop_end; idx++, offset++) {
+ output[offset] = idx;
+ }
+}
+
+__global__ void PYCUDA_BUILD_INDEX_KERNEL(
+ const int* seqlens, // [BATCH, ]
+ const int* vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
+ const int* slash_indexes, // [BATCH, N_HEADS, NNZ_S]
+ int* col_count, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ int* col_index, // [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), N_CTX]
+ int N_HEADS,
+ int N_CTX,
+ int BLOCK_SIZE_M,
+ int N_ROWS,
+ int NNZ_V,
+ int NNZ_S
+) {
+ const int batch_idx = blockIdx.y;
+ const int head_idx = blockIdx.x;
+ const int group_idx = blockIdx.z;
+
+ int seqlen = seqlens[batch_idx];
+ int block_idx_m = group_idx * blockDim.x + threadIdx.x;
+ int start_m = block_idx_m * BLOCK_SIZE_M;
+ if (start_m >= seqlen) {
+ return;
+ }
+ int end_m = start_m + BLOCK_SIZE_M;
+ vertical_indexes += (batch_idx * N_HEADS + head_idx) * NNZ_V;
+ slash_indexes += (batch_idx * N_HEADS + head_idx) * NNZ_S;
+ int row_offset = (batch_idx * N_HEADS + head_idx) * N_ROWS + block_idx_m;
+ col_count += row_offset;
+ col_index += row_offset * N_CTX;
+
+ int tmp_col_count = 0, cursor = -1, s = 0, v = 0;
+ int v_idx = vertical_indexes[v];
+ /*
+ int left = 0, right = NNZ_S - 1;
+ int tmp_s_idx = 0, target = end_m - 1;
+ s = (left + right) >> 1;
+ while (left + 1 < right) {
+ tmp_s_idx = slash_indexes[s];
+ if (tmp_s_idx > target) {
+ left = s;
+ } else if (tmp_s_idx < target) {
+ right = s;
+ } else {
+ break;
+ }
+ s = (left + right) >> 1;
+ }
+ */
+ while (s < NNZ_S && slash_indexes[s] >= end_m) s++;
+
+ int s_idx = (s < NNZ_S) ? (end_m - slash_indexes[s]) : seqlen;
+ int s_range = (s < NNZ_S) ? min(s_idx, BLOCK_SIZE_M) : 0;
+
+ while (s_idx <= end_m && v_idx < end_m) {
+ if (v_idx < s_idx) {
+ if (v_idx < s_idx - s_range) {
+ col_index[tmp_col_count] = v_idx;
+ tmp_col_count++;
+ }
+ v++;
+ if (v < NNZ_V) {
+ v_idx = vertical_indexes[v];
+ } else {
+ break;
+ }
+ } else {
+ save_list(col_index, max(cursor, s_idx - s_range), min(s_idx, seqlen), tmp_col_count);
+ cursor = s_idx;
+ s++;
+ if (s < NNZ_S) {
+ s_idx = end_m - slash_indexes[s];
+ s_range = min(s_idx, BLOCK_SIZE_M);
+ } else {
+ break;
+ }
+ }
+ }
+ while (s_idx <= end_m && s < NNZ_S) {
+ save_list(col_index, max(cursor, s_idx - s_range), min(s_idx, seqlen), tmp_col_count);
+ cursor = s_idx;
+ s++;
+ if (s < NNZ_S) {
+ s_idx = end_m - slash_indexes[s];
+ s_range = min(s_idx, BLOCK_SIZE_M);
+ } else {
+ break;
+ }
+ }
+ while (v_idx < end_m && v < NNZ_V) {
+ if (v_idx < s_idx - s_range) {
+ col_index[tmp_col_count] = v_idx;
+ tmp_col_count++;
+ }
+ v++;
+ if (v < NNZ_V) {
+ v_idx = vertical_indexes[v];
+ } else {
+ break;
+ }
+ }
+ col_count[0] = tmp_col_count;
+}
+'''
+PYCUDA_BUILD_INDEX_KERNEL = SourceModule(
+ PYCUDA_BUILD_INDEX_KERNEL_CODE,
+ options=['-std=c++14', '-O3'],
+).get_function(f'PYCUDA_BUILD_INDEX_KERNEL')
+
+
+def pycuda_build_index(seqlens, vertical_indexes, slash_indexes, block_size_M=64):
+ max_cols_per_row = (seqlens.max().item() + 3) & (-4)
+ batch_size, num_heads, NNZ_S = slash_indexes.shape
+ NNZ_V = vertical_indexes.shape[-1]
+ num_rows = triton.cdiv(max_cols_per_row, block_size_M)
+ max_cols_per_row = max_cols_per_row
+ col_count = torch.zeros((batch_size, num_heads, num_rows), dtype=torch.int32, device=seqlens.device)
+ col_index = torch.zeros((batch_size, num_heads, num_rows, max_cols_per_row), dtype=torch.int32, device=seqlens.device)
+ num_threads = 64
+ PYCUDA_BUILD_INDEX_KERNEL(
+ seqlens, vertical_indexes, slash_indexes,
+ col_count, col_index,
+ np.int32(num_heads), np.int32(max_cols_per_row), np.int32(block_size_M), np.int32(num_rows),
+ np.int32(NNZ_V), np.int32(NNZ_S),
+ # grid=(triton.cdiv(num_rows, num_threads), N_HEADS, BATCH),
+ grid=(num_heads, batch_size, triton.cdiv(num_rows, num_threads)),
+ block=(num_threads, 1, 1),
+ )
+ return col_count, col_index
+
+
+def make_causal_mask(seqlens, device, context_size):
+ batch_size = seqlens.shape[0]
+ arange = torch.arange(context_size, dtype=torch.int32, device=device)
+ causal_mask = arange[None, None, :, None] >= arange[None, None, None, :]
+ causal_mask = causal_mask.repeat((batch_size, 1, 1, 1))
+ for b, seqlen in enumerate(seqlens):
+ causal_mask[b, :, seqlen:, :] = False
+ causal_mask[b, :, :, seqlen:] = False
+ return causal_mask
+
+
+def make_finegrained_mask(vertical_indexes, slash_indexes, causal_mask, device):
+ batch_size, num_heads, _ = vertical_indexes.shape
+ context_size = causal_mask.shape[-1]
+ arange = torch.arange(context_size, dtype=torch.int32, device=device)
+ sparse_mask = torch.zeros((batch_size, num_heads, context_size, context_size), dtype=torch.bool, device=device)
+ for b in range(batch_size):
+ for h in range(num_heads):
+ for vertical_index in vertical_indexes[b, h]:
+ sparse_mask[b, h, :, vertical_index] = True
+ for slash_index in slash_indexes[b, h]:
+ sparse_mask[b, h].logical_or_(arange[:, None] - arange[None, :] == slash_index)
+ sparse_mask.logical_and_(causal_mask)
+ return sparse_mask
+
+
+def make_block_mask(col_count, col_index, seqlens, causal_mask, device, block_size_M=64):
+ batch_size, num_heads, _ = col_count.shape
+ context_size = causal_mask.shape[-1]
+ block_mask = torch.zeros((batch_size, num_heads, context_size, context_size), dtype=torch.bool, device=device)
+ for b in range(batch_size):
+ for h in range(num_heads):
+ for m, start_m in enumerate(range(0, seqlens[b], block_size_M)):
+ end_m = start_m + block_size_M
+ for c in range(col_count[b, h, m]):
+ block_mask[b, h, start_m:end_m, col_index[b, h, m, c]] = True
+ block_mask.logical_and_(causal_mask)
+ return block_mask
+
+
+def plot_mask(mask, name, batch=0, head=0):
+ import matplotlib.pyplot as plt
+ import seaborn as sns
+ plt.figure(figsize=(16, 12))
+ plt.clf()
+ mask = mask[batch, head].cpu().numpy()
+ sns.heatmap(mask)
+ plt.savefig(name)
+
+
+@triton.jit
+def triton_dense_fwd_kernel(
+ Q, K, V, seqlens, sm_scale,
+ Out,
+ stride_qz, stride_qh, stride_qm, stride_qk,
+ stride_kz, stride_kh, stride_kn, stride_kk,
+ stride_vz, stride_vh, stride_vn, stride_vk,
+ stride_oz, stride_oh, stride_om, stride_ok,
+ Z, H, N_CTX,
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ dtype: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hz = tl.program_id(1)
+
+ seqlen = tl.load(seqlens + off_hz // H)
+ if start_m * BLOCK_M >= seqlen:
+ return
+
+ qo_offset = (off_hz // H) * stride_qz + (off_hz % H) * stride_qh
+ kv_offset = (off_hz // H) * stride_kz + (off_hz % H) * stride_kh
+ Q_block_ptr = tl.make_block_ptr(
+ base=Q + qo_offset,
+ shape=(N_CTX, BLOCK_DMODEL),
+ strides=(stride_qm, stride_qk),
+ offsets=(start_m * BLOCK_M, 0),
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
+ order=(1, 0)
+ )
+ K_block_ptr = tl.make_block_ptr(
+ base=K + kv_offset,
+ shape=(BLOCK_DMODEL, N_CTX),
+ strides=(stride_kk, stride_kn),
+ offsets=(0, 0),
+ block_shape=(BLOCK_DMODEL, BLOCK_N),
+ order=(0, 1)
+ )
+ V_block_ptr = tl.make_block_ptr(
+ base=V + kv_offset,
+ shape=(N_CTX, BLOCK_DMODEL),
+ strides=(stride_vn, stride_vk),
+ offsets=(0, 0),
+ block_shape=(BLOCK_N, BLOCK_DMODEL),
+ order=(1, 0)
+ )
+ # initialize offsets
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_n = tl.arange(0, BLOCK_N)
+ # initialize pointer to m and l
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
+ # scale sm_scale by log_2(e) and use
+ # 2^x instead of exp in the loop because CSE and LICM
+ # don't work as expected with `exp` in the loop
+ qk_scale = sm_scale * 1.44269504
+ # load q: it will stay in SRAM throughout
+ q = tl.load(Q_block_ptr)
+ q = (q * qk_scale).to(dtype)
+ # loop over k, v and update accumulator
+ lo = 0
+ hi = (start_m + 1) * BLOCK_M
+ m_mask = offs_m[:, None] < seqlen
+
+ for start_n in range(lo, hi, BLOCK_N):
+ n_mask = (start_n + offs_n[None, :]) <= offs_m[:, None]
+ # -- load k, v --
+ k = tl.load(K_block_ptr)
+ v = tl.load(V_block_ptr)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk = tl.where(m_mask & n_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+ # update pointers
+ K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
+ V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
+ # write back O
+ acc = tl.where(m_mask, acc / l_i[:, None], 0.0)
+ O_block_ptr = tl.make_block_ptr(
+ base=Out + qo_offset,
+ shape=(N_CTX, BLOCK_DMODEL),
+ strides=(stride_om, stride_ok),
+ offsets=(start_m * BLOCK_M, 0),
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
+ order=(1, 0)
+ )
+ tl.store(O_block_ptr, acc.to(dtype), mask=m_mask)
+
+
+def triton_dense_forward(q, k, v, seqlens, sm_scale, block_size_M=128, block_size_N=64) -> torch.Tensor:
+ # shape constraints
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
+ assert Lq == Lk and Lk == Lv
+ assert Lk in {16, 32, 64, 128}
+ o = torch.zeros_like(q)
+ grid = (triton.cdiv(q.shape[2], block_size_M), q.shape[0] * q.shape[1], 1)
+ num_warps = 4 if Lk <= 64 else 8 # 4
+ dtype = tl.bfloat16 if q.dtype == torch.bfloat16 else tl.float16
+ triton_dense_fwd_kernel[grid](
+ q, k, v, seqlens, sm_scale,
+ o,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
+ q.shape[0], q.shape[1], q.shape[2],
+ BLOCK_M=block_size_M, BLOCK_N=block_size_N,
+ BLOCK_DMODEL=Lk,
+ dtype=dtype,
+ num_warps=num_warps, num_stages=4,
+ )
+
+ return o
+
+
+def flash_attn_forward(q, k, v, seqlens, sm_scale, context_size) -> torch.Tensor:
+ return flash_attn_varlen_func(
+ q,
+ k,
+ v,
+ cu_seqlens_q=seqlens,
+ cu_seqlens_k=seqlens,
+ max_seqlen_q=context_size,
+ max_seqlen_k=context_size,
+ dropout_p=0.0,
+ softmax_scale=sm_scale,
+ causal=True,
+ )
+
+
+def torch_forward(
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ sm_scale: float,
+) -> torch.Tensor:
+ p = torch.einsum(f'bhmk, bhnk -> bhmn', query, key) * sm_scale
+ p = p.where(mask, -torch.inf)
+ p_max = p.max(-1, keepdim=True).values
+ p_max = torch.where(p_max < 0, 0.0, p_max)
+ p_exp = torch.exp(p - p_max)
+ s = p_exp / (p_exp.sum(-1, keepdim=True) + 1e-6)
+ out = torch.einsum(f'bhmn, bhnk -> bhmk', s, value)
+ return out
+
+
+def profile(fn, total_flops, tag, warmup=25, rep=100):
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
+ gflops = total_flops / ms * 1e-9
+ print(f'{tag}: {ms:.3f} ms | {gflops:.3f} GFLOP/s')
+
+
+def test_flash_attention(
+ seqlens=None,
+ vertical_indexes=None,
+ slash_indexes=None,
+ dtype=torch.float16,
+ device="cuda",
+ torch_test=True,
+ batch_size=4,
+ num_heads=32,
+ context_size=1024,
+ head_dim=128,
+ sparsity=0.995,
+ block_size_M=64,
+ block_size_N=64,
+):
+ print('========================================')
+ print(f'BATCH={batch_size}, N_CTX={context_size}, N_HEADS={num_heads}, D_HEAD={head_dim}')
+ q = torch.randn((batch_size, num_heads, context_size, head_dim), dtype=dtype, device=device)
+ k = torch.randn((batch_size, num_heads, context_size, head_dim), dtype=dtype, device=device)
+ v = torch.randn((batch_size, num_heads, context_size, head_dim), dtype=dtype, device=device)
+ if seqlens is None:
+ seqlens = torch.randint(context_size // 2, context_size, (batch_size, ), dtype=torch.int32, device=device)
+ else:
+ seqlens = torch.tensor(seqlens, dtype=torch.int32, device=device)
+ dense_mask_nnz = seqlens.to(torch.float32).square().sum().item() * num_heads / 2
+ sm_scale = head_dim ** -0.5
+
+ causal_mask = make_causal_mask(seqlens, device, context_size)
+ if torch_test:
+ ref_o_dense = torch_forward(q, k, v, causal_mask, sm_scale)
+
+ if vertical_indexes is None or slash_indexes is None:
+ nnz = int((1 - sparsity) * context_size)
+ vertical_indexes = torch.stack([
+ torch.stack([
+ torch.randperm(seqlen, dtype=torch.int32, device=device)[:nnz].sort(descending=False)[0]
+ for _ in range(num_heads)
+ ])
+ for seqlen in seqlens
+ ])
+ slash_indexes = torch.concatenate([
+ torch.stack([
+ torch.stack([
+ torch.randperm(seqlen - 1, dtype=torch.int32, device=device)[:nnz].sort(descending=True)[0] + 1
+ for _ in range(num_heads)
+ ])
+ for seqlen in seqlens
+ ]),
+ torch.zeros((batch_size, num_heads, 1), dtype=torch.int32, device=device)
+ ], dim=-1)
+ col_count, col_index = pycuda_build_index(seqlens, vertical_indexes, slash_indexes, block_size_M)
+ if torch_test:
+ col_count_ref, col_index_ref = torch_build_index(seqlens, vertical_indexes, slash_indexes, block_size_M)
+ # import ipdb; ipdb.set_trace()
+ torch.testing.assert_close(col_count_ref, col_count)
+ torch.testing.assert_close(col_index_ref, col_index)
+ sparse_mask_nnz = col_count.to(torch.float32).sum().item() * block_size_M
+ print(f'block mask sparsity: {1 - sparse_mask_nnz / dense_mask_nnz}')
+ pycuda_build_index_fn = lambda: pycuda_build_index(seqlens, vertical_indexes, slash_indexes, block_size_M)
+ profile(pycuda_build_index_fn, 0., 'pycuda-index')
+
+ if torch_test:
+ finegrained_mask = make_finegrained_mask(vertical_indexes, slash_indexes, causal_mask, device)
+ block_mask = make_block_mask(col_count, col_index, seqlens, causal_mask, device, block_size_M)
+ # plot_mask(finegrained_mask, 'mask.png', 2, 26)
+ # plot_mask(block_mask, 'mask-1.png', 2, 26)
+ ref_o_sparse = torch_forward(q, k, v, block_mask, sm_scale)
+
+ triton_dense_fn = lambda: triton_dense_forward(q, k, v, seqlens, sm_scale)
+ output = triton_dense_fn()
+ if torch_test:
+ torch.testing.assert_close(output, ref_o_dense, atol=1e-2, rtol=0)
+ profile(triton_dense_fn, 2. * head_dim * dense_mask_nnz, 'triton-dense')
+
+ triton_sparse_fn = lambda: triton_sparse_forward(q, k, v, seqlens, col_count, col_index, sm_scale, block_size_M, block_size_N)
+ output = triton_sparse_fn()
+ if torch_test:
+ torch.testing.assert_close(output, ref_o_sparse, atol=1e-2, rtol=0)
+ profile(triton_sparse_fn, 2. * head_dim * sparse_mask_nnz, 'triton-sparse')
+
+ q = q.swapaxes(1, 2).contiguous()
+ k = k.swapaxes(1, 2).contiguous()
+ v = v.swapaxes(1, 2).contiguous()
+ q = torch.concatenate([q[i, :seqlen, :, :] for i, seqlen in enumerate(seqlens)])
+ k = torch.concatenate([k[i, :seqlen, :, :] for i, seqlen in enumerate(seqlens)])
+ v = torch.concatenate([v[i, :seqlen, :, :] for i, seqlen in enumerate(seqlens)])
+ seqlens = torch.nn.functional.pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
+
+ flash_fn = lambda: flash_attn_forward(q, k, v, seqlens, sm_scale, context_size)
+ output = flash_fn()
+ output = torch.stack([
+ torch.nn.functional.pad(
+ output[seqlens[i]:seqlens[i + 1], :, :],
+ (0, 0, 0, 0, 0, context_size + seqlens[i] - seqlens[i + 1])
+ )
+ for i in range(batch_size)
+ ]).swapaxes(1, 2).contiguous()
+ if torch_test:
+ torch.testing.assert_close(output, ref_o_dense, atol=1e-2, rtol=0)
+ profile(flash_fn, 2. * head_dim * dense_mask_nnz, 'flash-dense')
+ print('========================================\n')
+
+
+def pit_sparse_flash_attention_forward(
+ query: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ key: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ value: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ v_idx: torch.Tensor, # [BATCH, N_HEADS, NNZ_V]
+ s_idx: torch.Tensor, # [BATCH, N_HEADS, NNZ_S]
+ block_size_M: int = 64,
+ block_size_N: int = 64,
+):
+ q_len = query.shape[2]
+ pad = block_size_M - (query.shape[2] & (block_size_M - 1))
+ query = torch.nn.functional.pad(query, [0, 0, 0, pad, 0, 0, 0, 0])
+ key = torch.nn.functional.pad(key, [0, 0, 0, pad, 0, 0, 0, 0])
+ value = torch.nn.functional.pad(value, [0, 0, 0, pad, 0, 0, 0, 0])
+ batch_size, num_heads, context_size, head_dim = query.shape
+ v_idx = v_idx.to(torch.int32).reshape((batch_size, num_heads, -1)).sort(dim=-1, descending=False)[0]
+ s_idx = s_idx.to(torch.int32).reshape((batch_size, num_heads, -1)).sort(dim=-1, descending=True)[0]
+ seqlens = torch.tensor([context_size], dtype=torch.int32, device=query.device)
+ sm_scale = head_dim ** -0.5
+ col_count, col_index = pycuda_build_index(seqlens, v_idx, s_idx, block_size_M)
+ out = triton_sparse_forward(query, key, value, seqlens, col_count, col_index, sm_scale, block_size_M, block_size_N)[...,:q_len,:]
+ return out
diff --git a/minference/ops/pit_sparse_flash_attention_v2.py b/minference/ops/pit_sparse_flash_attention_v2.py
new file mode 100644
index 0000000..730809d
--- /dev/null
+++ b/minference/ops/pit_sparse_flash_attention_v2.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import torch
+import triton
+import triton.language as tl
+
+from ..cuda import convert_vertical_slash_indexes
+
+
+# @triton.autotune(
+# configs=[
+# triton.Config({}, num_stages=1, num_warps=4),
+# triton.Config({}, num_stages=1, num_warps=8),
+# triton.Config({}, num_stages=2, num_warps=4),
+# triton.Config({}, num_stages=2, num_warps=8),
+# triton.Config({}, num_stages=3, num_warps=4),
+# triton.Config({}, num_stages=3, num_warps=8),
+# triton.Config({}, num_stages=4, num_warps=4),
+# triton.Config({}, num_stages=4, num_warps=8),
+# triton.Config({}, num_stages=5, num_warps=4),
+# triton.Config({}, num_stages=5, num_warps=8),
+# ],
+# key=['N_CTX'],
+# )
+@triton.jit
+def _triton_mixed_sparse_attn_fwd_kernel(
+ Q, K, V, seqlens, sm_scale,
+ block_count, block_offset, column_count, column_index,
+ Out,
+ stride_qz, stride_qh, stride_qm, stride_qk,
+ stride_kz, stride_kh, stride_kn, stride_kk,
+ stride_vz, stride_vh, stride_vn, stride_vk,
+ stride_oz, stride_oh, stride_om, stride_ok,
+ Z, H, N_CTX,
+ NUM_ROWS, NNZ_S, NNZ_V,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ BLOCK_DMODEL: tl.constexpr,
+ dtype: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hz = tl.program_id(1)
+
+ seqlen = tl.load(seqlens + off_hz // H)
+ if start_m * BLOCK_M >= seqlen:
+ return
+
+ # initialize offsets
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_n = tl.arange(0, BLOCK_N)
+ offs_d = tl.arange(0, BLOCK_DMODEL)
+
+ qo_offset = (off_hz // H) * stride_qz + (off_hz % H) * stride_qh
+ kv_offset = (off_hz // H) * stride_kz + (off_hz % H) * stride_kh
+
+ q_ptrs = Q + qo_offset + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
+ k_ptrs = K + kv_offset + offs_d[:, None] * stride_kk
+ v_ptrs = V + kv_offset + offs_d[None, :] * stride_vk
+ o_ptrs = Out + qo_offset + offs_m[:, None] * stride_om + offs_d[None, :] * stride_ok
+
+ num_blks = tl.load(block_count + off_hz * NUM_ROWS + start_m)
+ blks_ptr = block_offset + (off_hz * NUM_ROWS + start_m) * NNZ_S
+ num_cols = tl.load(column_count + off_hz * NUM_ROWS + start_m)
+ cols_ptr = column_index + (off_hz * NUM_ROWS + start_m) * NNZ_V
+
+ # initialize pointer to m and l
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
+ # scale sm_scale by log_2(e) and use
+ # 2^x instead of exp in the loop because CSE and LICM
+ # don't work as expected with `exp` in the loop
+ qk_scale = sm_scale * 1.44269504
+ # load q: it will stay in SRAM throughout
+ q = tl.load(q_ptrs)
+ q = (q * qk_scale).to(dtype)
+
+ # loop over k, v and update accumulator
+ m_mask = offs_m[:, None] < seqlen
+
+ for block_index in range(num_blks):
+ start_n = tl.load(blks_ptr + block_index)
+ cols = start_n + offs_n
+ n_mask = cols < seqlen
+ # -- load k, v --
+ k = tl.load(k_ptrs + cols[None, :] * stride_kn, mask=n_mask[None, :], other=0.0)
+ v = tl.load(v_ptrs + cols[:, None] * stride_vn, mask=n_mask[:, None], other=0.0)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ causal_mask = cols[None, :] <= offs_m[:, None]
+ qk = tl.where(m_mask & causal_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+
+ for start_n in range(0, num_cols, BLOCK_N):
+ n_mask = start_n + offs_n < num_cols
+ cols = tl.load(cols_ptr + start_n + offs_n, mask=n_mask, other=0)
+ # -- load k, v --
+ k = tl.load(k_ptrs + cols[None, :] * stride_kn, mask=n_mask[None, :], other=0.0)
+ v = tl.load(v_ptrs + cols[:, None] * stride_vn, mask=n_mask[:, None], other=0.0)
+ # -- compute qk --
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk = tl.where(m_mask & n_mask, qk, float("-inf"))
+ qk += tl.dot(q, k)
+ # -- compute scaling constant --
+ m_i_new = tl.maximum(m_i, tl.max(qk, 1))
+ alpha = tl.math.exp2(m_i - m_i_new)
+ p = tl.math.exp2(qk - m_i_new[:, None])
+ # -- scale and update acc --
+ acc_scale = l_i * 0 + alpha # workaround some compiler bug
+ acc *= acc_scale[:, None]
+ acc += tl.dot(p.to(dtype), v)
+ # -- update m_i and l_i --
+ l_i = l_i * alpha + tl.sum(p, 1)
+ m_i = m_i_new
+
+ # write back O
+ acc /= l_i[:, None]
+ # acc = tl.where(m_mask, acc / l_i[:, None], 0.0)
+ tl.store(o_ptrs, acc.to(dtype), mask=m_mask)
+
+
+def _triton_mixed_sparse_attention(
+ q: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ k: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ v: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ seqlens: torch.Tensor, # [BATCH, ]
+ block_count: torch.Tensor, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ block_offset: torch.Tensor, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_S]
+ column_count: torch.Tensor, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M)]
+ column_index: torch.Tensor, # [BATCH, N_HEADS, cdiv(N_CTX, BLOCK_SIZE_M), NNZ_V]
+ sm_scale: float,
+ block_size_M: int = 64,
+ block_size_N: int = 64,
+) -> torch.Tensor:
+ # shape constraints
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
+ assert Lq == Lk and Lk == Lv
+ assert Lk in {16, 32, 64, 128}
+ o = torch.zeros_like(q)
+ grid = (triton.cdiv(q.shape[2], block_size_M), q.shape[0] * q.shape[1], 1)
+ dtype = tl.bfloat16 if q.dtype == torch.bfloat16 else tl.float16
+ _triton_mixed_sparse_attn_fwd_kernel[grid](
+ q, k, v, seqlens, sm_scale,
+ block_count, block_offset, column_count, column_index,
+ o,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
+ q.shape[0], q.shape[1], q.shape[2],
+ block_count.shape[-1], block_offset.shape[-1], column_index.shape[-1],
+ BLOCK_M=block_size_M, BLOCK_N=block_size_N,
+ BLOCK_DMODEL=Lk,
+ dtype=dtype,
+ num_warps=4, num_stages=2,
+ )
+
+ return o
+
+
+def vertical_slash_sparse_attention(
+ query: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ key: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ value: torch.Tensor, # [BATCH, N_HEADS, N_CTX, D_HEAD]
+ v_idx: torch.Tensor, # [BATCH, N_HEADS, NNZ_V]
+ s_idx: torch.Tensor, # [BATCH, N_HEADS, NNZ_S]
+ block_size_M: int = 64,
+ block_size_N: int = 64,
+):
+ batch_size, num_heads, context_size, head_dim = query.shape
+ pad = block_size_M - (context_size & (block_size_M - 1))
+ query = torch.nn.functional.pad(query, [0, 0, 0, pad, 0, 0, 0, 0])
+ key = torch.nn.functional.pad(key, [0, 0, 0, pad, 0, 0, 0, 0])
+ value = torch.nn.functional.pad(value, [0, 0, 0, pad, 0, 0, 0, 0])
+
+ if head_dim not in [16, 32, 64, 128, 256, 512]:
+ target_dim = 2 ** math.ceil(math.log2(head_dim)) - head_dim
+ query = torch.nn.functional.pad(query, [0, target_dim, 0, 0, 0, 0, 0, 0])
+ key = torch.nn.functional.pad(key, [0, target_dim, 0, 0, 0, 0, 0, 0])
+ value = torch.nn.functional.pad(value, [0, target_dim, 0, 0, 0, 0, 0, 0])
+
+ v_idx = v_idx.to(torch.int32).reshape((batch_size, num_heads, -1)).sort(dim=-1, descending=False)[0]
+ s_idx = s_idx.to(torch.int32).reshape((batch_size, num_heads, -1)).sort(dim=-1, descending=True)[0]
+ seqlens = torch.tensor([context_size], dtype=torch.int32, device=query.device)
+ sm_scale = head_dim ** -0.5
+ block_count, block_offset, column_count, column_index = convert_vertical_slash_indexes(
+ seqlens, v_idx, s_idx, context_size, block_size_M, block_size_N,
+ )
+ out = _triton_mixed_sparse_attention(
+ query, key, value, seqlens,
+ block_count, block_offset, column_count, column_index,
+ sm_scale, block_size_M, block_size_N,
+ )
+ return out[..., :context_size, :head_dim]
diff --git a/minference/ops/streaming_kernel.py b/minference/ops/streaming_kernel.py
new file mode 100644
index 0000000..8297b16
--- /dev/null
+++ b/minference/ops/streaming_kernel.py
@@ -0,0 +1,766 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+"""
+Fused Attention
+===============
+
+This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao (https://tridao.me/publications/flash2/flash2.pdf)
+Credits: OpenAI kernel team
+
+Extra Credits:
+- Original flash attention paper (https://arxiv.org/abs/2205.14135)
+- Rabe and Staats (https://arxiv.org/pdf/2112.05682v2.pdf)
+
+"""
+
+import math
+
+import torch
+import triton
+import triton.language as tl
+
+_BLOCK_N=64
+_BLOCK_M=64
+
+@triton.jit
+def _attn_fwd_inner(acc, l_i, m_i, q,
+ K_block_ptr, V_block_ptr,
+ start_m, qk_scale, N_CTX,
+ sliding_window_offset, sliding_window_size,
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, SLIDING_WINDOW: tl.constexpr,
+ IS_EVEN_M: tl.constexpr, IS_EVEN_N: tl.constexpr, COMPLEMENT_SLIDING_WINDOW: tl.constexpr
+ ):
+ # range of values handled by this stage
+ if SLIDING_WINDOW and not COMPLEMENT_SLIDING_WINDOW:
+ if COMPLEMENT_SLIDING_WINDOW:
+ lo = 0
+ hi = (((start_m + 1) * BLOCK_M + sliding_window_offset - sliding_window_size + BLOCK_N - 1) // BLOCK_N) * BLOCK_N
+ else:
+ lo = ((start_m * BLOCK_M + sliding_window_offset - sliding_window_size + 1) // BLOCK_N) * BLOCK_N
+ hi = ((((start_m + 1) * BLOCK_M - 1) + sliding_window_offset + BLOCK_N) // BLOCK_N) * BLOCK_N
+ if lo < 0:
+ lo = 0
+ if hi > N_CTX:
+ hi = N_CTX
+
+ # lo = 0
+ # hi = N_CTX
+ lo = tl.multiple_of(lo, BLOCK_N)
+ K_block_ptr = tl.advance(K_block_ptr, (0, lo))
+ V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
+ else:
+ lo, hi = 0, N_CTX
+
+ # loop over k, v and update accumulator
+ for start_n in range(lo, hi, BLOCK_N):
+ start_n = tl.multiple_of(start_n, BLOCK_N)
+ # -- compute qk ----
+ if IS_EVEN_N:
+ k = tl.load(K_block_ptr)
+ else:
+ k = tl.load(K_block_ptr, boundary_check=(0, 1), padding_option="zero")
+
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk += tl.dot(q, k)
+ qk = qk * qk_scale
+
+ if SLIDING_WINDOW:
+ dist = tl.arange(0, BLOCK_M)[:, None] - tl.arange(0, BLOCK_N)[None, :] \
+ + start_m * BLOCK_M - start_n + sliding_window_offset
+
+ if COMPLEMENT_SLIDING_WINDOW:
+ mask = (dist >= sliding_window_size)
+ else:
+ mask = (dist >= 0) & (dist < sliding_window_size)
+
+ qk = tl.where(mask, qk, float("-inf"))
+
+ if not IS_EVEN_N:
+ qk = tl.where(((tl.arange(0, BLOCK_N) + start_n) < N_CTX)[None, :], qk, float("-inf"))
+
+ m_ij = tl.maximum(m_i, tl.max(qk, 1))
+ qk = qk - m_ij[:, None]
+ p = tl.math.exp2(qk)
+
+ if SLIDING_WINDOW:
+ p = tl.where(mask, p, 0)
+
+ if not IS_EVEN_N:
+ p = tl.where(((tl.arange(0, BLOCK_N) + start_n) < N_CTX)[None, :], p, 0)
+
+ l_ij = tl.sum(p, 1)
+ # -- update m_i and l_i
+ tmp = m_i - m_ij
+ alpha_mask = (tmp != tmp) # check nan
+ alpha = tl.math.exp2(tmp)
+ alpha = tl.where(alpha_mask, 1., alpha)
+ l_i = l_i * alpha + l_ij
+ # -- update output accumulator --
+ acc = acc * alpha[:, None]
+ # update acc
+ if IS_EVEN_N:
+ v = tl.load(V_block_ptr)
+ else:
+ v = tl.load(V_block_ptr, boundary_check=(0, 1), padding_option="zero")
+
+ acc += tl.dot(p.to(v.dtype), v)
+ # update m_i and l_i
+ m_i = m_ij
+ V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
+ K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
+
+ return acc, l_i, m_i
+
+
+@triton.heuristics(
+ {
+ "IS_EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0,
+ "IS_EVEN_N": lambda args: args["NKV_CTX"] % args["BLOCK_N"] == 0,
+ }
+)
+@triton.jit
+def _attn_fwd(Q, K, V, sm_scale, M, Out, L,#
+ stride_qz, stride_qh, stride_qm, stride_qk, #
+ stride_kz, stride_kh, stride_kn, stride_kk, #
+ stride_vz, stride_vh, stride_vk, stride_vn, #
+ stride_oz, stride_oh, stride_om, stride_on, #
+ Z, H, H_KV, #
+ N_CTX, #
+ ROUND_CTX,
+ NKV_CTX,
+ sliding_window_offset,
+ sliding_window_size,
+ IS_EVEN_M: tl.constexpr,
+ IS_EVEN_N: tl.constexpr,
+ BLOCK_M: tl.constexpr, #
+ BLOCK_DMODEL: tl.constexpr, #
+ BLOCK_N: tl.constexpr, #
+ END: tl.constexpr,
+ INIT: tl.constexpr,
+ SLIDING_WINDOW: tl.constexpr,
+ COMPLEMENT_SLIDING_WINDOW: tl.constexpr
+ ):
+
+ start_m = tl.program_id(0)
+ off_hz = tl.program_id(1)
+ off_z = off_hz // H
+ off_h = off_hz % H
+ off_hkv = off_h // (H//H_KV)
+ q_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
+ k_offset = off_z.to(tl.int64) * stride_kz + off_hkv.to(tl.int64) * stride_kh
+ v_offset = off_z.to(tl.int64) * stride_vz + off_hkv.to(tl.int64) * stride_vh
+ o_offset = off_z.to(tl.int64) * stride_oz + off_h.to(tl.int64) * stride_oh
+
+ # block pointers
+ Q_block_ptr = tl.make_block_ptr(
+ base=Q + q_offset,
+ shape=(N_CTX, BLOCK_DMODEL),
+ strides=(stride_qm, stride_qk),
+ offsets=(start_m * BLOCK_M, 0),
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
+ order=(1, 0),
+ )
+ V_block_ptr = tl.make_block_ptr(
+ base=V + v_offset,
+ shape=(NKV_CTX, BLOCK_DMODEL),
+ strides=(stride_vk, stride_vn),
+ offsets=(0, 0),
+ block_shape=(BLOCK_N, BLOCK_DMODEL),
+ order=(1, 0),
+ )
+ K_block_ptr = tl.make_block_ptr(
+ base=K + k_offset,
+ shape=(BLOCK_DMODEL, NKV_CTX),
+ strides=(stride_kk, stride_kn),
+ offsets=(0, 0),
+ block_shape=(BLOCK_DMODEL, BLOCK_N),
+ order=(0, 1),
+ )
+ O_block_ptr = tl.make_block_ptr(
+ base=Out + o_offset,
+ shape=(ROUND_CTX, BLOCK_DMODEL),
+ strides=(stride_om, stride_on),
+ offsets=(start_m * BLOCK_M, 0),
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
+ order=(1, 0),
+ )
+ # initialize offsets
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ # initialize pointer to m and l
+ m_ptrs = M + off_hz * ROUND_CTX + offs_m
+ l_ptrs = L + off_hz * ROUND_CTX + offs_m
+ if INIT:
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
+ else:
+ # don't have to check boundary for q len
+ m_i = tl.load(m_ptrs).to(tl.float32)
+ l_i = tl.load(l_ptrs).to(tl.float32)
+ acc = tl.load(O_block_ptr).to(tl.float32)
+
+ qk_scale = sm_scale
+ qk_scale *= 1.4426950408889634 # 1/log(2)
+ # load q: it will stay in SRAM throughout
+ if IS_EVEN_M:
+ q = tl.load(Q_block_ptr)
+ else:
+ q = tl.load(Q_block_ptr, boundary_check=(0, 1), padding_option="zero")
+
+ acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
+ start_m, qk_scale, NKV_CTX, #
+ sliding_window_offset, sliding_window_size,
+ BLOCK_M, BLOCK_DMODEL, BLOCK_N, SLIDING_WINDOW, IS_EVEN_M, IS_EVEN_N,
+ COMPLEMENT_SLIDING_WINDOW)
+ # epilogue
+ if (END):
+ m_i += tl.math.log2(l_i)
+ acc = acc / l_i[:, None]
+ else:
+ tl.store(l_ptrs, l_i)
+
+ tl.store(m_ptrs, m_i)
+ tl.store(O_block_ptr, acc.to(Out.type.element_ty))
+
+
+@triton.heuristics(
+ {
+ "IS_EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0,
+ "IS_EVEN_N": lambda args: args["NKV_CTX"] % args["BLOCK_N"] == 0,
+ }
+)
+@triton.jit
+def _score_kernel(
+ Q, K, M, sm_scale, Out,
+ stride_qz, stride_qh, stride_qm, stride_qk, #
+ stride_kz, stride_kh, stride_kn, stride_kk, #
+ stride_oz, stride_oh, stride_on,
+ Z, H, H_KV, #
+ N_CTX, #
+ ROUND_CTX,
+ NKV_CTX,
+ sliding_window_offset,
+ sliding_window_size,
+ SLIDING_WINDOW: tl.constexpr,
+ COMPLEMENT_SLIDING_WINDOW: tl.constexpr,
+ IS_EVEN_M: tl.constexpr,
+ IS_EVEN_N: tl.constexpr,
+ BLOCK_M: tl.constexpr, #
+ BLOCK_DMODEL: tl.constexpr, #
+ BLOCK_N: tl.constexpr, #
+):
+ start_n = tl.program_id(0)
+ off_hz = tl.program_id(1)
+ off_z = off_hz // H
+ off_h = off_hz % H
+ off_hkv = off_h // (H//H_KV)
+ q_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
+ k_offset = off_z.to(tl.int64) * stride_kz + off_hkv.to(tl.int64) * stride_kh
+ m_ptrs = M + off_hz * ROUND_CTX + tl.arange(0, BLOCK_M)
+ o = tl.zeros([BLOCK_M], dtype=tl.float32)
+
+ Q_block_ptr = tl.make_block_ptr(
+ base=Q + q_offset,
+ shape=(N_CTX, BLOCK_DMODEL),
+ strides=(stride_qm, stride_qk),
+ offsets=(0, 0),
+ block_shape=(BLOCK_M, BLOCK_DMODEL),
+ order=(1, 0),
+ )
+ K_block_ptr = tl.make_block_ptr(
+ base=K + k_offset,
+ shape=(BLOCK_DMODEL, NKV_CTX),
+ strides=(stride_kk, stride_kn),
+ offsets=(0, start_n * BLOCK_N),
+ block_shape=(BLOCK_DMODEL, BLOCK_N),
+ order=(0, 1),
+ )
+
+ if IS_EVEN_N:
+ k = tl.load(K_block_ptr)
+ else:
+ k = tl.load(K_block_ptr, boundary_check=(0, 1), padding_option="zero")
+
+
+ lo = 0
+ hi = ROUND_CTX
+ qk_scale = sm_scale
+ qk_scale *= 1.4426950408889634 # 1/log(2)
+
+ for start_m in range(lo, hi, BLOCK_M):
+ start_m = tl.multiple_of(start_m, BLOCK_M)
+ if IS_EVEN_M:
+ q = tl.load(Q_block_ptr)
+ else:
+ q = tl.load(Q_block_ptr, boundary_check=(0,1), padding_option="zero")
+
+ m = tl.load(m_ptrs)
+
+ # calc qk
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk += tl.dot(q, k)
+ qk = qk * qk_scale
+
+ if SLIDING_WINDOW:
+ # dist = tl.arange(start_m, start_m + BLOCK_M)[:, None] \
+ # - tl.arange(start_n * BLOCK_N, (start_n + 1) + BLOCK_N)[None, :] + sliding_window_offset
+ dist = tl.arange(0, BLOCK_M)[:, None] - tl.arange(0, BLOCK_N)[None, :] \
+ + start_m - start_n * BLOCK_N + sliding_window_offset
+
+ if COMPLEMENT_SLIDING_WINDOW:
+ mask = (dist >= sliding_window_size)
+ else:
+ mask = (dist >= 0) & (dist < sliding_window_size)
+
+ qk = qk - m[:, None]
+ p = tl.math.exp2(qk) # (BLOCK_M, BLOCK_N)
+
+ if SLIDING_WINDOW:
+ p = tl.where(mask, p, 0)
+
+ if not IS_EVEN_N:
+ p = tl.where(
+ ((tl.arange(0, BLOCK_M) + start_m) < N_CTX)[:, None],
+ p, 0
+ )
+
+ o += tl.sum(p, axis=0)
+
+
+ Q_block_ptr = tl.advance(Q_block_ptr, offsets=(BLOCK_M, 0))
+ m_ptrs = m_ptrs + BLOCK_M
+
+ o_offset = off_z.to(tl.int64) * stride_oz + off_h.to(tl.int64) * stride_oh
+ o_range = tl.arange(0, BLOCK_N) + start_n * BLOCK_N # orange
+ o_ptrs = Out + o_offset + o_range
+ tl.store(o_ptrs, o.to(Out.type.element_ty), mask = o_range < NKV_CTX)
+
+def get_score(q, k, m, sliding_window, complement_sliding_window):
+ assert q.dim() == 4
+ assert k.dim() == 4
+ assert m.dim() == 3
+ assert q.shape[:2] == m.shape[:2]
+ N_CTX = q.size(-2)
+ NKV_CTX = k.size(-2)
+ ROUND_CTX = m.size(-1)
+ ret = torch.zeros(
+ (q.size(0), q.size(1), k.size(2)),
+ dtype=k.dtype, device=k.device
+ )
+ if sliding_window is not None:
+ sliding_window_offset, sliding_window_size = sliding_window
+ else:
+ sliding_window_offset, sliding_window_size = None, None
+
+
+ grid = lambda META: (
+ triton.cdiv(k.shape[2], META["BLOCK_N"]),
+ q.shape[0] * q.shape[1]
+ )
+ sm_scale = 1 / math.sqrt(q.size(-1))
+
+ global _BLOCK_N
+ global _BLOCK_M
+
+ try:
+ _score_kernel[grid](
+ q, k, m, sm_scale, ret,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ ret.stride(0), ret.stride(1), ret.stride(2),
+ q.size(0), q.size(1), k.size(1),
+ N_CTX, ROUND_CTX, NKV_CTX,
+ sliding_window_offset,
+ sliding_window_size,
+ SLIDING_WINDOW=(sliding_window is not None),
+ COMPLEMENT_SLIDING_WINDOW=complement_sliding_window,
+ BLOCK_M=_BLOCK_M,
+ BLOCK_N=_BLOCK_N,
+ BLOCK_DMODEL=q.size(-1)
+ )
+ except triton.OutOfResources as E:
+ from warnings import warn
+ _BLOCK_N = _BLOCK_N // 2
+ _BLOCK_M = _BLOCK_M // 2
+ warn(f"Triton Attention Output Resources. {E}\nUse smaller block size {_BLOCK_N}.")
+ _score_kernel[grid](
+ q, k, m, sm_scale, ret,
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
+ ret.stride(0), ret.stride(1), ret.stride(2),
+ q.size(0), q.size(1), k.size(1),
+ N_CTX, ROUND_CTX, NKV_CTX,
+ sliding_window_offset,
+ sliding_window_size,
+ SLIDING_WINDOW=(sliding_window is not None),
+ COMPLEMENT_SLIDING_WINDOW=complement_sliding_window,
+ BLOCK_M=_BLOCK_M,
+ BLOCK_N=_BLOCK_N,
+ BLOCK_DMODEL=q.size(-1)
+ )
+
+ return ret
+
+def _forward(
+ q, k, v, sm_scale,
+ o = None, m = None, l = None, end = False,
+ sliding_window=None, init=False,
+ complement_sliding_window=False
+):
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
+
+ assert Lq == Lk and Lk == Lv
+ assert Lk in {16, 32, 64, 128}
+
+ q_round_len = math.ceil(q.shape[2] / 64) * 64
+
+ if sliding_window is not None:
+ sliding_window_offset, sliding_window_size = sliding_window
+ else:
+ sliding_window_offset, sliding_window_size = None, None
+
+ grid = lambda META: (
+ triton.cdiv(q.shape[2], META["BLOCK_M"]),
+ q.shape[0] * q.shape[1],
+ )
+
+ global _BLOCK_N
+ global _BLOCK_M
+
+ try:
+ _attn_fwd[grid](
+ q, k, v, sm_scale, m, o, l, #
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
+ q.shape[0], q.shape[1], k.shape[1], #
+ q.shape[2], #
+ q_round_len,
+ k.shape[2],
+ sliding_window_offset,
+ sliding_window_size,
+ BLOCK_DMODEL=Lk, #
+ END=end,
+ INIT=init,
+ BLOCK_M=_BLOCK_M,
+ BLOCK_N=_BLOCK_N,
+ SLIDING_WINDOW=(sliding_window is not None),
+ COMPLEMENT_SLIDING_WINDOW=complement_sliding_window,
+ num_warps=4,
+ num_stages=4
+ )
+ except triton.OutOfResources as E:
+ _BLOCK_N = _BLOCK_N // 2
+ _BLOCK_M = _BLOCK_M // 2
+ from warnings import warn
+ warn(f"Triton Attention Output Resources. {E}\nUse smaller block size {_BLOCK_N}.")
+ _attn_fwd[grid](
+ q, k, v, sm_scale, m, o, l, #
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
+ q.shape[0], q.shape[1], k.shape[1], #
+ q.shape[2], #
+ q_round_len,
+ k.shape[2],
+ sliding_window_offset,
+ sliding_window_size,
+ BLOCK_DMODEL=Lk, #
+ END=end,
+ INIT=init,
+ BLOCK_M=_BLOCK_M,
+ BLOCK_N=_BLOCK_N,
+ SLIDING_WINDOW=(sliding_window is not None),
+ COMPLEMENT_SLIDING_WINDOW=complement_sliding_window,
+ num_warps=4,
+ num_stages=4
+ )
+
+
+ if end:
+ o = o[:, :, :q.shape[2], :].contiguous().to(q.dtype)
+
+ return o, m, l
+
+class MultiStageDotProductionAttention:
+ def __init__(
+ self,
+ q_shape,
+ dtype,
+ device,
+ ):
+ self.q_shape = q_shape
+ self.dtype = dtype
+ self.device = device
+ self.end = False
+ self.ret = torch.zeros(
+ q_shape, dtype=dtype, device=device
+ )
+ self.score_list = []
+
+ def append(
+ self,
+ q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
+ sliding_window=None, complement_sliding_window: bool = False,
+ end=False, get_score=False,
+ *args, **kwargs
+ ):
+ raise NotImplementedError
+
+
+ def get_result(self):
+ return self.ret, self.score_list
+
+
+class TritonMultiStageDotProductionAttention(MultiStageDotProductionAttention):
+ def __init__(self, q_shape, dtype, device):
+ self.q_shape = q_shape
+ self.dtype = dtype
+ self.device = device
+ q_round_len = math.ceil(q_shape[2] / 64) * 64
+ o_shape = (q_shape[0], q_shape[1], q_round_len, q_shape[3])
+ m_shape = (q_shape[0], q_shape[1], q_round_len)
+ l_shape = (q_shape[0], q_shape[1], q_round_len)
+
+ self.o = torch.empty(o_shape, device=device, dtype=torch.float32)
+ self.m = torch.empty(m_shape, device=device, dtype=torch.float32)
+ self.l = torch.empty(l_shape, device=device, dtype=torch.float32)
+ self.q_list = []
+ self.k_list = []
+ self.sliding_window_list = []
+ self.complement_sliding_window_list = []
+ self.score_list = []
+ self.end = False
+ self.init = False
+
+ def finalize(self):
+ self.end = True
+ for q, k, sliding_window, comp in zip(self.q_list, self.k_list, self.sliding_window_list, self.complement_sliding_window_list):
+ if q is not None:
+ score = get_score(q, k, self.m, sliding_window, comp)
+ self.score_list.append(score)
+ else:
+ self.score_list.append(None)
+
+ self.ret = self.o
+
+ def append(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, end=False, get_score=False, sliding_window = None, complement_sliding_window: bool = False):
+ assert q.shape == self.q_shape
+
+ if isinstance(sliding_window, int):
+ sliding_window = (
+ k.shape[2] - q.shape[2], sliding_window
+ )
+
+ q = q.contiguous()
+ k = k.contiguous()
+ v = v.contiguous()
+
+ sm_scale = 1 / math.sqrt(q.shape[-1])
+ o, m, l = _forward(
+ q, k, v, sm_scale, self.o, self.m, self.l,
+ sliding_window=sliding_window, end=end, init=not self.init,
+ complement_sliding_window=complement_sliding_window
+ )
+ self.init = True
+ self.o = o
+ self.m = m
+ self.l = l
+ if get_score:
+ self.q_list.append(q)
+ self.k_list.append(k)
+ self.sliding_window_list.append(sliding_window)
+ self.complement_sliding_window_list.append(complement_sliding_window)
+ else:
+ self.q_list.append(None)
+ self.k_list.append(None)
+ self.sliding_window_list.append(None)
+ self.complement_sliding_window_list.append(None)
+
+ if end:
+ assert not self.end
+ self.finalize()
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+def streaming_forward(
+ q, k, v,
+ n_init, n_local,
+):
+ # q,k,v should be tensors already equipped with RoPE
+ # k,v should already repeated to align with q.shape
+
+ assert q.dim() == 4 # (bsz, num_heads, seqlen, head_dim)
+ assert q.shape == k.shape == v.shape
+
+ head_dim = q.shape[-1]
+ if head_dim not in [16, 32, 64, 128, 256, 512]:
+ target_dim = 2 ** math.ceil(math.log2(head_dim)) - head_dim
+ q = torch.nn.functional.pad(q, [0, target_dim, 0, 0, 0, 0, 0, 0])
+ k = torch.nn.functional.pad(k, [0, target_dim, 0, 0, 0, 0, 0, 0])
+ v = torch.nn.functional.pad(v, [0, target_dim, 0, 0, 0, 0, 0, 0])
+
+ q_len = q.size(2)
+ k_len = k.size(2)
+
+ attn = TritonMultiStageDotProductionAttention(q.shape, q.dtype, q.device)
+
+ if k_len > n_local:
+ init_k = k[:, :, :n_init, :].contiguous()
+ init_v = v[:, :, :n_init, :].contiguous()
+
+ attn.append(q, k, v, sliding_window=n_local)
+ attn.append(
+ q, init_k, init_v, end=True,
+ sliding_window=(k_len - q_len, n_local), complement_sliding_window=True
+ )
+ else:
+ attn.append(q, k, v, sliding_window=n_local, end=True)
+
+ score, _ = attn.get_result()
+ return score[...,:head_dim]
+
+def streaming_forward2(
+ q, k, v,
+ n_init, n_local,
+):
+ q_len = q.size(2)
+ k_len = k.size(2)
+
+ attn = TritonMultiStageDotProductionAttention(q.shape, q.dtype, q.device)
+
+ if k_len > n_local:
+ init_k = k[:, :, :n_init, :].contiguous()
+ init_v = v[:, :, :n_init, :].contiguous()
+
+ else:
+ init_k = torch.empty(
+ (k.size(0), k.size(1), 0, k.size(3)),
+ dtype=k.dtype, device=k.device
+ )
+ init_v = torch.empty(
+ (v.size(0), v.size(1), 0, v.size(3)),
+ dtype=v.dtype, device=v.device
+ )
+
+ attn.append(q, k, v, sliding_window=n_local)
+ attn.append(
+ q, init_k, init_v, end=True,
+ sliding_window=(k_len - q_len, n_local), complement_sliding_window=True
+ )
+
+ score, _ = attn.get_result()
+ return score
+
+def stream_llm_forward(n_local, n_init, *args, **kwargs):
+ Attn = TritonMultiStageDotProductionAttention
+ def forward(self, query : torch.Tensor,
+ key_value : torch.Tensor,
+ position_bias : torch.Tensor,
+ use_cache: bool,
+ past_key_value,
+ project_q, project_k, project_v, attention_out,
+ dim_head, num_heads, num_heads_kv
+ ):
+
+ batch_size = query.size(0)
+ len_q = query.size(1)
+ len_k = key_value.size(1)
+
+ h_q = project_q(query) # (batch, len_q, num_heads * dim_head)
+ h_k = project_k(key_value) # (batch, len_k, num_heads * dim_head)
+ h_v = project_v(key_value) # (batch, len_k, num_heads * dim_head)
+
+ h_q = h_q.view(batch_size, len_q, num_heads, dim_head).permute(0, 2, 1, 3) # (batch, num_heads, len_q, dim_head)
+ h_k = h_k.view(batch_size, len_k, num_heads_kv, dim_head).permute(0, 2, 1, 3) # (batch, num_heads_kv, len_k, dim_head)
+ h_v = h_v.view(batch_size, len_k, num_heads_kv, dim_head).permute(0, 2, 1, 3) # (batch, num_heads_kv, len_k, dim_head)
+
+ h_q = h_q.contiguous() # (batch * num_heads, len_q, dim_head)
+ h_k = h_k.contiguous() # (batch * num_heads, len_k, dim_head)
+ h_v = h_v.contiguous() # (batch * num_heads, len_k, dim_head)
+
+ if past_key_value is not None:
+ h_k = torch.cat([past_key_value[0], h_k], dim=-2)
+ h_v = torch.cat([past_key_value[1], h_v], dim=-2)
+
+ len_k += past_key_value[2]
+
+ if use_cache:
+ if len_k <= n_local + n_init:
+ h_k_cache = h_k
+ h_v_cache = h_v
+ else:
+ h_k_cache = torch.cat([h_k[:,:, :n_init, :], h_k[:, :, max(0, h_k.size(-2) - n_local):, :]], dim=2)
+ h_v_cache = torch.cat([h_v[:,:, :n_init, :], h_v[:, :, max(0, h_k.size(-2) - n_local):, :]], dim=2)
+
+ current_key_value = (h_k_cache, h_v_cache, len_k)
+
+ else:
+ current_key_value = None
+
+ h_q_ = h_q
+ h_k_ = h_k
+ h_v_ = h_v
+
+ if len_q + n_local < h_k_.size(-2):
+ h_k_ = h_k_[:, :, h_k_.size(-2) - len_q - n_local:, :].contiguous().clone()
+ h_v_ = h_v_[:, :, h_v_.size(-2) - len_q - n_local:, :].contiguous().clone()
+
+ local_h_q, local_h_k = position_bias(h_q_, h_k_)
+ local_h_v = h_v_
+
+ if len_k > n_local:
+ init_h_q = position_bias.apply_rotary_pos_emb_one_angle(
+ h_q, n_local + n_init
+ )
+ init_h_k = position_bias.apply_rotary_pos_emb(
+ h_k[:, :, :n_init, :].contiguous(),
+ n_init, n_init, position_bias._cos_cached, position_bias._sin_cached
+ )
+ init_h_v = h_v[:, :, :n_init, :].contiguous()
+
+ else:
+ init_h_q = h_q
+ init_h_k = torch.empty(
+ (batch_size, num_heads_kv, 0, dim_head),
+ device=h_k.device,
+ dtype=h_k.dtype
+ )
+ init_h_v = torch.empty(
+ (batch_size, num_heads_kv, 0, dim_head),
+ device=h_v.device,
+ dtype=h_v.dtype
+ )
+
+ attn = Attn(local_h_q.shape, local_h_q.dtype, local_h_q.device)
+ attn.append(local_h_q, local_h_k, local_h_v, sliding_window=n_local)
+ attn.append(
+ init_h_q, init_h_k, init_h_v, end=True,
+ sliding_window=(len_k - len_q, n_local),
+ complement_sliding_window=True
+ )
+ score, _ = attn.get_result()
+
+ score = score.view(batch_size, num_heads, len_q, dim_head).permute(0, 2, 1, 3).contiguous() # (batch, len_q, num_heads, dim_head)
+ score = score.reshape(batch_size, len_q, num_heads * dim_head) # (batch, len_q, num_heads * dim_head)
+
+ score = attention_out(score)
+
+ if use_cache:
+ return score, current_key_value
+ else:
+ return score
+
+ return forward
diff --git a/minference/patch.py b/minference/patch.py
new file mode 100644
index 0000000..211b166
--- /dev/null
+++ b/minference/patch.py
@@ -0,0 +1,1401 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import json
+
+import torch
+import transformers
+from transformers.cache_utils import *
+from transformers.models.llama.modeling_llama import *
+
+from .modules.inf_llm import InfLLMGenerator, inf_llm_forward
+from .modules.minference_forward import (
+ gather_last_q_vertical_slash_topk_v4,
+ gather_last_q_vertical_slash_topk_vllm,
+ init_minference_parameters,
+ minference_forward,
+ minference_kv_cache_cpu_forward,
+ minference_vllm_forward,
+ minference_with_snapkv_forward,
+ search_pattern,
+ sum_all_diagonal_matrix,
+)
+from .ops.streaming_kernel import stream_llm_forward
+from .utils import patch_glm_4_1m
+
+KV_CACHE_CPU_DEVICE = "cpu"
+
+
+class RotaryEmbeddingESM(torch.nn.Module):
+ """
+ Rotary position embeddings based on those in
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
+ matrices which depend on their relative positions.
+ """
+
+ def __init__(
+ self,
+ dim: int,
+ base: Union[int, float] = 10000,
+ distance_scale: Union[int, float] = 1,
+ is_glm4: bool = False,
+ ):
+ super().__init__()
+ self.base = base
+ self.distance_scale = distance_scale
+
+ # Generate and save the inverse frequency buffer (non trainable)
+ inv_freq = 1.0 / (
+ base ** (torch.arange(0, dim, 2, device="cuda", dtype=torch.float32) / dim)
+ )
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ self._seq_len_cached = -1
+ self._cos_cached = None
+ self._sin_cached = None
+ self.is_glm4 = is_glm4
+ self.dim = dim
+
+ def rotate_half(self, x):
+ x1, x2 = x.chunk(2, dim=-1)
+ return torch.cat((-x2, x1), dim=-1)
+
+ def apply_rotary_pos_emb(self, x, length, right, cos, sin):
+ dtype = x.dtype
+ if self.is_glm4:
+ cos = cos[right - length : right, ...]
+ return apply_rotary_pos_emb_glm4(x, cos)
+ if cos.dim() == 2:
+ cos = cos[right - length : right, :]
+ sin = sin[right - length : right, :]
+ elif cos.dim() == 3:
+ cos = cos[:, right - length : right, :]
+ sin = sin[:, right - length : right, :]
+ elif cos.dim() == 4:
+ cos = cos[:, :, right - length : right, :]
+ sin = sin[:, :, right - length : right, :]
+
+ return ((x.float() * cos) + (self.rotate_half(x).float() * sin)).to(dtype)
+
+ def _update_cos_sin_tables(self, x, seq_dim):
+ seq_len = x.size(seq_dim)
+ if seq_len > self._seq_len_cached:
+ self._seq_len_cached = seq_len
+ t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq)
+ freqs = torch.outer(t * self.distance_scale, self.inv_freq).float()
+ if self.is_glm4:
+ cache = torch.stack(
+ [torch.cos(freqs), torch.sin(freqs)], dim=-1
+ ).bfloat16()
+ self._cos_cached, self._sin_cached = cache, None
+ else:
+ emb = torch.cat((freqs, freqs), dim=-1)
+ if x.dim() == 2:
+ self._cos_cached = emb.cos()
+ self._sin_cached = emb.sin()
+ elif x.dim() == 3:
+ self._cos_cached = emb.cos()[None, :, :]
+ self._sin_cached = emb.sin()[None, :, :]
+ elif x.dim() == 4:
+ self._cos_cached = emb.cos()[None, None, :, :]
+ self._sin_cached = emb.sin()[None, None, :, :]
+ return self._cos_cached, self._sin_cached
+
+ def _update_cos_sin_tables_len(self, seq_len, device, dim=None):
+ if seq_len > self._seq_len_cached:
+ if dim is None:
+ assert self._cos_cached is not None
+ dim = self._cos_cached.dim()
+
+ self._seq_len_cached = seq_len
+ t = torch.arange(seq_len, device=device).type_as(self.inv_freq)
+ freqs = torch.outer(t * self.distance_scale, self.inv_freq)
+ if self.is_glm4:
+ cache = torch.stack(
+ [torch.cos(freqs), torch.sin(freqs)], dim=-1
+ ).bfloat16()
+ self._cos_cached, self._sin_cached = cache, None
+ else:
+ emb = torch.cat((freqs, freqs), dim=-1)
+ if dim == 2:
+ self._cos_cached = emb.cos()
+ self._sin_cached = emb.sin()
+ elif dim == 3:
+ self._cos_cached = emb.cos()[None, :, :]
+ self._sin_cached = emb.sin()[None, :, :]
+ elif dim == 4:
+ self._cos_cached = emb.cos()[None, None, :, :]
+ self._sin_cached = emb.sin()[None, None, :, :]
+
+ return self._cos_cached, self._sin_cached
+
+ def apply_rotary_pos_emb_one_angle(self, x: torch.Tensor, index):
+ dtype = x.dtype
+ cos, sin = self._update_cos_sin_tables_len(max(index, x.shape[-2]), x.device)
+ if self.is_glm4:
+ return apply_rotary_pos_emb_glm4(x, cos)
+ if cos.dim() == 2:
+ cos = cos[index - 1 : index, :]
+ sin = sin[index - 1 : index, :]
+ elif cos.dim() == 3:
+ cos = cos[:, index - 1 : index, :]
+ sin = sin[:, index - 1 : index, :]
+ elif cos.dim() == 4:
+ cos = cos[:, :, index - 1 : index, :]
+ sin = sin[:, :, index - 1 : index, :]
+
+ return ((x.float() * cos) + (self.rotate_half(x).float() * sin)).to(dtype)
+
+ def forward(
+ self, q: torch.Tensor, k: torch.Tensor, seq_dim=-2
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
+ k, seq_dim=seq_dim
+ )
+ return (
+ self.apply_rotary_pos_emb(
+ q, q.size(seq_dim), k.size(seq_dim), self._cos_cached, self._sin_cached
+ ),
+ self.apply_rotary_pos_emb(
+ k, k.size(seq_dim), k.size(seq_dim), self._cos_cached, self._sin_cached
+ ),
+ )
+
+
+@torch.jit.script
+def apply_rotary_pos_emb_glm4(
+ x: torch.Tensor, rope_cache: torch.Tensor
+) -> torch.Tensor:
+ # x: [b, np, sq, hn]
+ b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3)
+ rot_dim = rope_cache.shape[-2] * 2
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
+ # truncate to support variable sizes
+ # import ipdb;ipdb.set_trace()
+ rope_cache = rope_cache[:sq]
+ xshaped = x.reshape(b, np, sq, rot_dim // 2, 2)
+ rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2)
+ x_out2 = torch.stack(
+ [
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
+ ],
+ -1,
+ )
+ x_out2 = x_out2.flatten(3)
+ return torch.cat((x_out2, x_pass), dim=-1)
+
+
+ATTN_FORWRAD = {
+ "streaming": stream_llm_forward,
+ "minference": minference_forward,
+ "inf_llm": inf_llm_forward,
+}
+
+
+def huggingface_forward(forward):
+ def hf_forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask=None,
+ position_ids=None,
+ past_key_value=None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ):
+ assert not output_attentions
+
+ # for GLM-4
+ if "q_proj" not in self.__dict__["_modules"]:
+ query_pos = self.num_heads * self.head_dim
+ key_value_pos = query_pos // self.num_key_value_groups
+ self.q_proj = torch.nn.Linear(
+ hidden_states.size(-1),
+ query_pos,
+ device=hidden_states.device,
+ dtype=hidden_states.dtype,
+ )
+ self.k_proj = torch.nn.Linear(
+ hidden_states.size(-1),
+ key_value_pos,
+ device=hidden_states.device,
+ dtype=hidden_states.dtype,
+ )
+ self.v_proj = torch.nn.Linear(
+ hidden_states.size(-1),
+ key_value_pos,
+ device=hidden_states.device,
+ dtype=hidden_states.dtype,
+ )
+
+ self.q_proj.weight.copy_(self.qkv_proj.weight[:query_pos, :])
+ self.k_proj.weight.copy_(
+ self.qkv_proj.weight[query_pos : query_pos + key_value_pos, :]
+ )
+ self.v_proj.weight.copy_(
+ self.qkv_proj.weight[query_pos + key_value_pos :, :]
+ )
+
+ self.q_proj.bias.copy_(self.qkv_proj.bias[:query_pos])
+ self.k_proj.bias.copy_(
+ self.qkv_proj.bias[query_pos : query_pos + key_value_pos]
+ )
+ self.v_proj.bias.copy_(self.qkv_proj.bias[query_pos + key_value_pos :])
+
+ del self.qkv_proj
+
+ ret = forward(
+ self,
+ hidden_states,
+ hidden_states,
+ position_ids,
+ use_cache,
+ past_key_value,
+ self.q_proj,
+ self.k_proj,
+ self.v_proj,
+ self.o_proj,
+ self.head_dim,
+ self.num_heads,
+ self.num_key_value_heads,
+ )
+ if use_cache:
+ o, pkv = ret
+ else:
+ o = ret
+ pkv = None
+
+ return o, None, pkv
+
+ return hf_forward
+
+
+def hf_437_prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ **kwargs,
+):
+ if past_key_values is not None:
+ if isinstance(past_key_values, transformers.cache_utils.Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+
+def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ **kwargs,
+):
+ # With static cache, the `past_key_values` is None
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
+ has_static_cache = False
+ if past_key_values is None:
+ past_key_values = getattr(
+ getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None
+ )
+ has_static_cache = past_key_values is not None
+
+ past_length = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, transformers.cache_utils.Cache):
+ past_length = (
+ cache_position[0]
+ if cache_position is not None
+ else past_key_values.get_seq_length()
+ )
+ max_cache_length = (
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
+ if past_key_values.get_max_length() is not None
+ else None
+ )
+ cache_length = (
+ past_length
+ if max_cache_length is None
+ else torch.min(max_cache_length, past_length)
+ )
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
+ else:
+ # cache_length = past_length = past_key_values[0][0].shape[2]
+ cache_length = past_length = cache_position[0]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
+ # TODO: use `next_tokens` directly instead.
+ model_inputs = {"input_ids": input_ids.contiguous()}
+
+ input_length = (
+ position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
+ )
+ if cache_position is None:
+ cache_position = torch.arange(
+ past_length, past_length + input_length, device=input_ids.device
+ )
+ else:
+ cache_position = cache_position[-input_length:]
+
+ if has_static_cache:
+ past_key_values = None
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+
+def prepare_inputs_for_generation_snapkv(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ **kwargs,
+):
+ if past_key_values is None: # [SnapKV]
+ for layer in self.model.layers:
+ layer.self_attn.kv_seq_len = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ # cache_length = past_length = past_key_values[0][0].shape[2]
+ # max_cache_length = None
+ cache_length = past_length = self.model.layers[0].self_attn.kv_seq_len
+ max_cache_length = None
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+
+def _prepare_decoder_attention_mask_inference(
+ self, attention_mask, input_shape, inputs_embeds, past_key_values_length
+):
+ # [bsz, seq_len]
+ if past_key_values_length > 0 and attention_mask is not None:
+ attention_mask = torch.cat(
+ (
+ torch.full(
+ (input_shape[0], past_key_values_length),
+ True,
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ ),
+ attention_mask,
+ ),
+ dim=-1,
+ )
+
+ if attention_mask is not None and torch.all(attention_mask):
+ return None # This uses the faster call when training with full samples
+
+ return attention_mask
+
+
+def forward_llama_decoder_layer(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ padding_mask: Optional[torch.LongTensor] = None,
+) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states.clone()
+ batch, seq_len, embed_dim = hidden_states.shape
+
+ for start_idx in range(0, seq_len, 32000):
+ end_idx = min(seq_len, start_idx + 32000)
+ hidden_states[:, start_idx:end_idx, :] = self.input_layernorm(
+ hidden_states[:, start_idx:end_idx, :]
+ )
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ padding_mask=padding_mask,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ for start_idx in range(0, seq_len, 32000):
+ end_idx = min(seq_len, start_idx + 32000)
+ part_hidden_states = hidden_states[:, start_idx:end_idx, :].clone()
+ part_hidden_states = self.post_attention_layernorm(part_hidden_states)
+ part_hidden_states = self.mlp(part_hidden_states)
+ hidden_states[:, start_idx:end_idx, :] += part_hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+def forward_llama_model(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = (
+ output_attentions
+ if output_attentions is not None
+ else self.config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states
+ if output_hidden_states is not None
+ else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape[:2]
+ elif inputs_embeds is not None:
+ batch_size, seq_length = inputs_embeds.shape[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length,
+ seq_length + past_key_values_length,
+ dtype=torch.long,
+ device=device,
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past),
+ dtype=torch.bool,
+ device=inputs_embeds.device,
+ )
+ padding_mask = None
+ else:
+ if 0 in attention_mask:
+ padding_mask = attention_mask
+ else:
+ padding_mask = None
+
+ attention_mask = self._prepare_decoder_attention_mask(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ # embed positions
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ batch, seq_len, embed_dim = hidden_states.shape
+ for start_idx in range(0, seq_len, 32000):
+ end_idx = min(seq_len, start_idx + 32000)
+ hidden_states[:, start_idx:end_idx, :] = self.norm(
+ hidden_states[:, start_idx:end_idx, :]
+ )
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = (
+ next_decoder_cache.to_legacy_cache()
+ if use_legacy_cache
+ else next_decoder_cache
+ )
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
+ if v is not None
+ )
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+def forward_llama_for_causal_lm(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+) -> Union[Tuple, CausalLMOutputWithPast]:
+ # assert labels is not None
+ output_attentions = (
+ output_attentions
+ if output_attentions is not None
+ else self.config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states
+ if output_hidden_states is not None
+ else self.config.output_hidden_states
+ )
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ torch.cuda.empty_cache()
+
+ hidden_states = outputs[0]
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(reduction="sum")
+ valid_seq_len = input_ids.shape[-1] - 1
+ valid_seq_len_slide_win = torch.sum(labels[:, 1:] >= 0).item()
+ # print("valid_seq_len_slide_win", valid_seq_len)
+ loss = 0.0
+
+ for start_idx in range(0, valid_seq_len, 32000):
+ end_idx = min(start_idx + 32000, valid_seq_len)
+ shift_logits = self.lm_head(
+ hidden_states[..., start_idx:end_idx, :]
+ ).float()
+ shift_labels = labels[..., start_idx + 1 : end_idx + 1].contiguous()
+ # Flatten the tokens
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss += loss_fct(shift_logits, shift_labels)
+
+ loss /= valid_seq_len_slide_win
+ logits = None
+ else:
+ if self.config.to_dict().get("is_ppl", False):
+ logits = self.lm_head(hidden_states)
+ else:
+ logits = self.lm_head(hidden_states[:, -1:]).float()
+ loss = None
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ )
+
+
+def minference_patch(model, config):
+ from transformers import LlamaForCausalLM
+
+ if config.kv_cache_cpu:
+ global KV_CACHE_CPU_DEVICE
+ KV_CACHE_CPU_DEVICE = config.kv_cache_cpu_device
+ model.config.kv_cache_cpu_device = config.kv_cache_cpu_device
+ return minference_patch_kv_cache_cpu(model)
+ if config.use_snapkv:
+ return minference_patch_with_snapkv(model)
+
+ model = patch_glm_4_1m(model)
+
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ DecoderLayer = model.model.layers[0].__class__
+
+ forward = minference_forward()
+
+ def update_module(m):
+ if isinstance(m, Attention):
+ m.init_minference_parameters = init_minference_parameters.__get__(
+ m, Attention
+ )
+ m.gather_last_q_vertical_slash_topk_v4 = (
+ gather_last_q_vertical_slash_topk_v4.__get__(m, Attention)
+ )
+ m.forward = forward.__get__(m, Attention)
+ if isinstance(m, DecoderLayer):
+ m.forward = forward_llama_decoder_layer.__get__(m, DecoderLayer)
+
+ model.apply(update_module)
+ model.prepare_inputs_for_generation = hf_437_prepare_inputs_for_generation.__get__(
+ model, model.__class__
+ )
+ model.model._use_sdpa = False
+
+ model.model._prepare_decoder_attention_mask = (
+ _prepare_decoder_attention_mask_inference.__get__(
+ model.model, model.model.__class__
+ )
+ )
+ model.model.forward = forward_llama_model.__get__(
+ model.model, model.model.__class__
+ )
+ model.forward = forward_llama_for_causal_lm.__get__(model, model.__class__)
+
+ print("Patched model for minference..")
+ return model
+
+
+def minference_patch_kv_cache_cpu(model):
+ from transformers import LlamaForCausalLM
+
+ transformers.cache_utils.DynamicCache.update = cpu_cache_update
+ transformers.cache_utils.DynamicCache.get = cpu_cache_get
+
+ model = patch_glm_4_1m(model)
+
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ DecoderLayer = model.model.layers[0].__class__
+
+ forward = minference_kv_cache_cpu_forward()
+
+ def update_module(m):
+ if isinstance(m, Attention):
+ m.init_minference_parameters = init_minference_parameters.__get__(
+ m, Attention
+ )
+ m.gather_last_q_vertical_slash_topk_v4 = (
+ gather_last_q_vertical_slash_topk_v4.__get__(m, Attention)
+ )
+ m.forward = forward.__get__(m, Attention)
+ if isinstance(m, DecoderLayer):
+ m.forward = forward_llama_decoder_layer.__get__(m, DecoderLayer)
+
+ model.apply(update_module)
+ model.prepare_inputs_for_generation = hf_437_prepare_inputs_for_generation.__get__(
+ model, model.__class__
+ )
+ model.model._use_sdpa = False
+
+ model.model._prepare_decoder_attention_mask = (
+ _prepare_decoder_attention_mask_inference.__get__(
+ model.model, model.model.__class__
+ )
+ )
+ model.model.forward = forward_llama_model.__get__(
+ model.model, model.model.__class__
+ )
+ model.forward = forward_llama_for_causal_lm.__get__(model, model.__class__)
+
+ print("Patched model for MInference load KV Cache to CPU.")
+ return model
+
+
+def minference_patch_with_snapkv(model):
+ from transformers import LlamaForCausalLM
+
+ model = patch_glm_4_1m(model)
+
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ DecoderLayer = model.model.layers[0].__class__
+
+ forward = minference_with_snapkv_forward()
+
+ def update_module(m):
+ if isinstance(m, Attention):
+ m.init_minference_parameters = init_minference_parameters.__get__(
+ m, Attention
+ )
+ m.gather_last_q_vertical_slash_topk_v4 = (
+ gather_last_q_vertical_slash_topk_v4.__get__(m, Attention)
+ )
+ m.forward = forward.__get__(m, Attention)
+ if isinstance(m, DecoderLayer):
+ m.forward = forward_llama_decoder_layer.__get__(m, DecoderLayer)
+
+ model.apply(update_module)
+ model.prepare_inputs_for_generation = prepare_inputs_for_generation_snapkv.__get__(
+ model, model.__class__
+ )
+ model.model._use_sdpa = False
+
+ model.model._prepare_decoder_attention_mask = (
+ _prepare_decoder_attention_mask_inference.__get__(
+ model.model, model.model.__class__
+ )
+ )
+ model.model.forward = forward_llama_model.__get__(
+ model.model, model.model.__class__
+ )
+ model.forward = forward_llama_for_causal_lm.__get__(model, model.__class__)
+
+ print("Patched model for minference with SanpKV..")
+ return model
+
+
+def llama_model_forward_vllm(
+ self,
+ input_ids: Optional[torch.Tensor],
+ positions: torch.Tensor,
+ kv_caches: List[torch.Tensor],
+ attn_metadata,
+ inputs_embeds: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ if inputs_embeds is not None:
+ hidden_states = inputs_embeds
+ else:
+ hidden_states = self.get_input_embeddings(input_ids)
+ residual = None
+ for i in range(len(self.layers)):
+ layer = self.layers[i]
+ hidden_states, residual = layer(
+ positions,
+ hidden_states,
+ kv_caches[i],
+ attn_metadata,
+ residual,
+ layer_idx=i,
+ )
+ hidden_states, _ = self.norm(hidden_states, residual)
+ return hidden_states
+
+
+def llama_layer_forward_vllm(
+ self,
+ positions: torch.Tensor,
+ hidden_states: torch.Tensor,
+ kv_cache: torch.Tensor,
+ attn_metadata,
+ residual: Optional[torch.Tensor],
+ layer_idx: int,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ # Self Attention
+ if residual is None:
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ else:
+ hidden_states, residual = self.input_layernorm(hidden_states, residual)
+ hidden_states = self.self_attn(
+ positions=positions,
+ hidden_states=hidden_states,
+ kv_cache=kv_cache,
+ attn_metadata=attn_metadata,
+ layer_idx=layer_idx,
+ )
+
+ # Fully Connected
+ hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
+ hidden_states = self.mlp(hidden_states)
+ return hidden_states, residual
+
+
+def llama_attn_forward_vllm(
+ self,
+ positions: torch.Tensor,
+ hidden_states: torch.Tensor,
+ kv_cache: torch.Tensor,
+ attn_metadata,
+ layer_idx: int,
+) -> torch.Tensor:
+ qkv, _ = self.qkv_proj(hidden_states)
+ q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
+ q, k = self.rotary_emb(positions, q, k)
+ attn_output = self.attn(q, k, v, kv_cache, attn_metadata, self.kv_scale, layer_idx)
+ output, _ = self.o_proj(attn_output)
+ return output
+
+
+def vllm_attn_forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ kv_cache: Optional[torch.Tensor],
+ attn_metadata,
+ kv_scale: float = 1.0,
+ layer_idx: int = 0,
+) -> torch.Tensor:
+ return self.impl.forward(
+ query, key, value, kv_cache, attn_metadata, kv_scale, layer_idx
+ )
+
+
+def minference_patch_vllm(
+ llm,
+ config_file,
+):
+ from vllm.attention import Attention
+ from vllm.model_executor.models.llama import (
+ LlamaAttention,
+ LlamaDecoderLayer,
+ LlamaForCausalLM,
+ LlamaModel,
+ )
+
+ config = json.load(open(config_file))
+ attn_forward = minference_vllm_forward(config)
+
+ def update_module(m):
+ if isinstance(m, Attention):
+ m.forward = vllm_attn_forward.__get__(m, Attention)
+
+ m = m.impl
+ m_cls = m.__class__
+ m.gather_last_q_vertical_slash_topk_vllm = (
+ gather_last_q_vertical_slash_topk_vllm.__get__(m, m_cls)
+ )
+ m.forward = attn_forward.__get__(m, m_cls)
+ if isinstance(m, LlamaDecoderLayer):
+ m.forward = llama_layer_forward_vllm.__get__(m, LlamaDecoderLayer)
+ if isinstance(m, LlamaModel):
+ m.forward = llama_model_forward_vllm.__get__(m, LlamaModel)
+ if isinstance(m, LlamaAttention):
+ m.forward = llama_attn_forward_vllm.__get__(m, LlamaAttention)
+
+ llm.llm_engine.model_executor.driver_worker.model_runner.model.apply(update_module)
+
+ print("Patched model for minference with vLLM..")
+ return llm
+
+
+def patch_hf(
+ model,
+ attn_type: str = "inf_llm",
+ attn_kwargs: dict = {},
+ base=None,
+ distance_scale=None,
+ **kwargs,
+):
+ attn_kwargs.update(kwargs)
+ # This approach lacks scalability and will be refactored.
+ from transformers import LlamaForCausalLM, MistralForCausalLM, Qwen2ForCausalLM
+ from transformers.models.llama.modeling_llama import (
+ BaseModelOutputWithPast,
+ LlamaAttention,
+ LlamaModel,
+ )
+ from transformers.models.mistral.modeling_mistral import (
+ MistralAttention,
+ MistralModel,
+ )
+ from transformers.models.qwen2.modeling_qwen2 import Qwen2Attention, Qwen2Model
+
+ def model_forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask=None,
+ position_ids=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ *args,
+ **kwargs,
+ ):
+ output_attentions = (
+ output_attentions
+ if output_attentions is not None
+ else self.config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states
+ if output_hidden_states is not None
+ else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError(
+ "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError(
+ "You have to specify either decoder_input_ids or decoder_inputs_embeds"
+ )
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ if hasattr(self, "config") and hasattr(self.config, "scale_emb"):
+ inputs_embeds = inputs_embeds * self.config.scale_emb
+
+ if use_cache:
+ pkv = tuple()
+
+ else:
+ pkv = None
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for i, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=self.position_bias,
+ past_key_value=(
+ past_key_values[i] if past_key_values is not None else None
+ ),
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ _cache = layer_outputs[2 if output_attentions else 1]
+ pkv = pkv + (_cache,)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ # hidden_states = self.norm(hidden_states)
+ for start_idx in range(0, hidden_states.size(1), 32000):
+ end_idx = min(hidden_states.size(1), start_idx + 32000)
+ hidden_states[:, start_idx:end_idx, :] = self.norm(
+ hidden_states[:, start_idx:end_idx, :]
+ )
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, pkv, all_hidden_states, all_self_attns]
+ if v is not None
+ )
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=pkv,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ forward = huggingface_forward(ATTN_FORWRAD[attn_type](**attn_kwargs))
+ model = patch_glm_4_1m(model)
+
+ is_glm4 = False
+ if isinstance(model, LlamaForCausalLM):
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ elif isinstance(model, MistralForCausalLM):
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ elif isinstance(model, Qwen2ForCausalLM):
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ elif model.__class__.__name__ == "MiniCPMForCausalLM":
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ elif model.__class__.__name__ == "Phi3ForCausalLM":
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ elif model.__class__.__name__ == "ChatGLMForConditionalGeneration":
+ Attention = model.model.layers[0].self_attn.__class__
+ Model = model.model.__class__
+ base = model.model.layers[0].self_attn.rotary_emb.rope_ratio * 10000
+ is_glm4 = True
+ else:
+ raise ValueError("Only supports llama, mistral and qwen2 models.")
+
+ hf_rope = model.model.layers[0].self_attn.rotary_emb
+ base = base if base is not None else hf_rope.base
+ distance_scale = distance_scale if distance_scale is not None else 1.0
+ rope = RotaryEmbeddingESM(hf_rope.dim, base, distance_scale, is_glm4=is_glm4)
+ model.model.position_bias = rope
+ model.model.hf_position_bias = hf_rope
+ DecoderLayer = model.model.layers[0].__class__
+
+ def set_forward(m):
+ if isinstance(m, Attention):
+ m._old_forward = m.forward
+ m.forward = forward.__get__(m, Attention)
+ if isinstance(m, DecoderLayer):
+ m.forward = forward_llama_decoder_layer.__get__(m, DecoderLayer)
+
+ model.apply(set_forward)
+
+ model._old_prepare_inputs_for_generation = model.prepare_inputs_for_generation
+ model.prepare_inputs_for_generation = prepare_inputs_for_generation.__get__(
+ model, model.__class__
+ )
+ model.model._old_forward = model.model.forward
+ model.model.forward = model_forward.__get__(model.model, Model)
+ model.forward = forward_llama_for_causal_lm.__get__(model, model.__class__)
+
+ if attn_type == "inf_llm":
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model.config._name_or_path, trust_remote_code=True
+ )
+ model = InfLLMGenerator(model, tokenizer)
+
+ print("Patched model ...")
+ return model
+
+
+def fp8_cache_update(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
+
+ Parameters:
+ key_states (`torch.Tensor`):
+ The new key states to cache.
+ value_states (`torch.Tensor`):
+ The new value states to cache.
+ layer_idx (`int`):
+ The index of the layer to cache the states for.
+ cache_kwargs (`Dict[str, Any]`, `optional`):
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
+
+ Return:
+ A tuple containing the updated key and value states.
+ """
+ # Update the number of seen tokens
+ if layer_idx == 0:
+ self.seen_tokens += key_states.shape[-2]
+
+ # Update the cache
+ if len(self.key_cache) <= layer_idx:
+ self.key_cache.append(key_states.to(torch.float8_e5m2))
+ self.value_cache.append(value_states.to(torch.float8_e5m2))
+ else:
+ self.key_cache[layer_idx] = torch.cat(
+ [self.key_cache[layer_idx], key_states.to(torch.float8_e5m2)], dim=-2
+ )
+ self.value_cache[layer_idx] = torch.cat(
+ [self.value_cache[layer_idx], value_states.to(torch.float8_e5m2)], dim=-2
+ )
+
+ return self.key_cache[layer_idx].to(key_states.dtype), self.value_cache[
+ layer_idx
+ ].to(key_states.dtype)
+
+
+def cpu_cache_update(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ if layer_idx == 0:
+ if "_seen_tokens" in self.__dict__:
+ self._seen_tokens += key_states.shape[-2]
+ else:
+ self.seen_tokens += key_states.shape[-2]
+
+ # Update the cache
+ if len(self.key_cache) <= layer_idx:
+ self.key_cache.append(key_states.to(KV_CACHE_CPU_DEVICE))
+ self.value_cache.append(value_states.to(KV_CACHE_CPU_DEVICE))
+ else:
+ self.key_cache[layer_idx] = torch.cat(
+ [self.key_cache[layer_idx], key_states.to(KV_CACHE_CPU_DEVICE)], dim=-2
+ )
+ self.value_cache[layer_idx] = torch.cat(
+ [self.value_cache[layer_idx], value_states.to(KV_CACHE_CPU_DEVICE)], dim=-2
+ )
+
+
+def cpu_cache_get(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ head_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ if layer_idx == 0:
+ if "_seen_tokens" in self.__dict__:
+ self._seen_tokens += key_states.shape[-2]
+ else:
+ self.seen_tokens += key_states.shape[-2]
+
+ # Update the cache
+ if len(self.key_cache) <= layer_idx:
+ return key_states, value_states
+ elif KV_CACHE_CPU_DEVICE == "cpu":
+ key_states = torch.cat(
+ [self.key_cache[layer_idx][:, head_idx : head_idx + 1].cuda(), key_states],
+ dim=-2,
+ )
+ value_states = torch.cat(
+ [
+ self.value_cache[layer_idx][:, head_idx : head_idx + 1].cuda(),
+ value_states,
+ ],
+ dim=-2,
+ )
+ return key_states, value_states
+ key_states = torch.cat(
+ [self.key_cache[layer_idx][:, head_idx : head_idx + 1], key_states],
+ dim=-2,
+ )
+ value_states = torch.cat(
+ [
+ self.value_cache[layer_idx][:, head_idx : head_idx + 1],
+ value_states,
+ ],
+ dim=-2,
+ )
+ return key_states, value_states
diff --git a/minference/utils.py b/minference/utils.py
new file mode 100644
index 0000000..e71c252
--- /dev/null
+++ b/minference/utils.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import torch
+
+
+def patch_glm_4_1m(model):
+ # Support THUDM/glm-4-9b-chat-1m
+ if model.__class__.__name__ == "ChatGLMForConditionalGeneration":
+ model.model = model.transformer
+ del model.transformer
+ model.model.embed_tokens = model.model.embedding.word_embeddings
+ del model.model.embedding
+ model.lm_head = model.model.output_layer
+ del model.model.output_layer
+ model.model.norm = model.model.encoder.final_layernorm
+ del model.model.encoder.final_layernorm
+ model.model.layers = model.model.encoder.layers
+ del model.model.encoder
+ for layer_idx in range(0, len(model.model.layers)):
+ model.model.layers[layer_idx].self_attn = model.model.layers[
+ layer_idx
+ ].self_attention
+ del model.model.layers[layer_idx].self_attention
+ model.model.layers[layer_idx].self_attn.qkv_proj = model.model.layers[
+ layer_idx
+ ].self_attn.query_key_value
+ del model.model.layers[layer_idx].self_attn.query_key_value
+ model.model.layers[layer_idx].self_attn.o_proj = model.model.layers[
+ layer_idx
+ ].self_attn.dense
+ del model.model.layers[layer_idx].self_attn.dense
+ model.model.layers[
+ layer_idx
+ ].self_attn.rotary_emb = model.model.rotary_pos_emb
+ model.model.layers[layer_idx].self_attn.config = model.config
+ model.model.layers[layer_idx].self_attn.layer_idx = layer_idx
+ config = model.config
+ model.model.layers[
+ layer_idx
+ ].self_attn.attention_dropout = config.attention_dropout
+ model.model.layers[layer_idx].self_attn.hidden_size = config.hidden_size
+ model.model.layers[
+ layer_idx
+ ].self_attn.num_heads = config.num_attention_heads
+ model.model.layers[layer_idx].self_attn.head_dim = (
+ config.hidden_size // config.num_attention_heads
+ )
+ model.model.layers[
+ layer_idx
+ ].self_attn.num_key_value_heads = config.multi_query_group_num
+ model.model.layers[layer_idx].self_attn.num_key_value_groups = (
+ config.num_attention_heads // config.multi_query_group_num
+ )
+ model.model.layers[
+ layer_idx
+ ].self_attn.max_position_embeddings = config.seq_length
+ model.model.layers[layer_idx].self_attn.rope_theta = config.rope_ratio
+ model.model.layers[layer_idx].self_attn.is_causal = True
+ model.model.gradient_checkpointing = False
+ torch.cuda.empty_cache()
+ return model
diff --git a/minference/version.py b/minference/version.py
new file mode 100644
index 0000000..b6245f9
--- /dev/null
+++ b/minference/version.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+_MAJOR = "0"
+_MINOR = "1"
+# On master and in a nightly release the patch should be one ahead of the last
+# released build.
+_PATCH = "0"
+# This is mainly for nightly builds which have the suffix ".dev$DATE". See
+# https://semver.org/#is-v123-a-semantic-version for the semantics.
+_SUFFIX = ""
+
+VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
+VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..c3ddf04
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,15 @@
+[black]
+line-length = 88
+target-version = ['py38']
+include = '\.pyi?$'
+
+[isort]
+atomic = True
+profile = black
+line_length = 88
+skip_gitignore = True
+known_first_party = ["minference"]
+
+[flake8]
+ignore = E203, E501, E741, W503, W605
+max-line-length = 88
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..e5147c5
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2024 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+
+import os
+import subprocess
+
+import torch
+from packaging.version import Version, parse
+from setuptools import find_packages, setup
+from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
+
+# PEP0440 compatible formatted version, see:
+# https://www.python.org/dev/peps/pep-0440/
+#
+# release markers:
+# X.Y
+# X.Y.Z # For bugfix releases
+#
+# pre-release markers:
+# X.YaN # Alpha release
+# X.YbN # Beta release
+# X.YrcN # Release Candidate
+# X.Y # Final release
+
+# version.py defines the VERSION and VERSION_SHORT variables.
+# We use exec here so we don't import allennlp whilst setting up.
+VERSION = {} # type: ignore
+with open("minference/version.py", "r") as version_file:
+ exec(version_file.read(), VERSION)
+
+INSTALL_REQUIRES = [
+ "transformers>=4.37.0",
+ "accelerate",
+ "torch",
+ "triton",
+ "flash_attn",
+ "pycuda==2023.1",
+]
+QUANLITY_REQUIRES = [
+ "black==21.4b0",
+ "flake8>=3.8.3",
+ "isort>=5.5.4",
+ "pre-commit",
+ "pytest",
+ "pytest-xdist",
+]
+DEV_REQUIRES = INSTALL_REQUIRES + QUANLITY_REQUIRES
+
+MAIN_CUDA_VERSION = "12.1"
+
+
+def _is_cuda() -> bool:
+ return torch.version.cuda is not None
+
+
+def get_nvcc_cuda_version() -> Version:
+ """Get the CUDA version from nvcc.
+
+ Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
+ """
+ assert CUDA_HOME is not None, "CUDA_HOME is not set"
+ nvcc_output = subprocess.check_output(
+ [CUDA_HOME + "/bin/nvcc", "-V"], universal_newlines=True
+ )
+ output = nvcc_output.split()
+ release_idx = output.index("release") + 1
+ nvcc_cuda_version = parse(output[release_idx].split(",")[0])
+ return nvcc_cuda_version
+
+
+def get_minference_version() -> str:
+ version = VERSION["VERSION"]
+
+ if _is_cuda():
+ cuda_version = str(get_nvcc_cuda_version())
+ if cuda_version != MAIN_CUDA_VERSION:
+ cuda_version_str = cuda_version.replace(".", "")[:3]
+ version += f"+cu{cuda_version_str}"
+ else:
+ raise RuntimeError("Unknown runtime environment")
+
+ return version
+
+
+ext_modules = [
+ CUDAExtension(
+ name="minference.cuda",
+ sources=[
+ os.path.join("csrc", "kernels.cpp"),
+ os.path.join("csrc", "vertical_slash_index.cu"),
+ ],
+ extra_compile_args=["-std=c++17", "-O3"],
+ )
+]
+
+setup(
+ name="minference",
+ version=get_minference_version(),
+ author="The MInference team",
+ author_email="hjiang@microsoft.com",
+ description="To speed up Long-context LLMs' inference, approximate and dynamic sparse calculate the attention, which reduces inference latency by up to 10x for pre-filling on an A100 while maintaining accuracy.",
+ long_description=open("README.md", encoding="utf8").read(),
+ long_description_content_type="text/markdown",
+ keywords="LLMs Inference, Long-Context LLMs, Dynamic Sparse Attention, Efficient Inference",
+ license="MIT License",
+ url="https://github.com/microsoft/MInference",
+ classifiers=[
+ "Intended Audience :: Science/Research",
+ "Development Status :: 3 - Alpha",
+ "Programming Language :: Python :: 3",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ ],
+ package_dir={"": "."},
+ packages=find_packages(
+ exclude=(
+ "csrc",
+ "dist",
+ "examples",
+ "experiments",
+ "images",
+ "test",
+ "minference.egg-info",
+ )
+ ),
+ extras_require={
+ "dev": DEV_REQUIRES,
+ "quality": QUANLITY_REQUIRES,
+ },
+ install_requires=INSTALL_REQUIRES,
+ setup_requires=[
+ "packaging",
+ "psutil",
+ "ninja",
+ ],
+ include_package_data=True,
+ python_requires=">=3.8.0",
+ zip_safe=False,
+ ext_modules=ext_modules,
+ cmdclass={"build_ext": BuildExtension},
+)