Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions CI/check_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import argparse
import os

import akida
from cnn2snn import convert
from quantizeml import load_model
from compute_device import compute_min_device


def process_model(file_path):
try:
Expand All @@ -18,7 +18,7 @@ def process_model(file_path):
raise RuntimeError(f"❌ Error converting {file_path}: {e}") from e

try:
device = compute_min_device(model_ak, enable_hwpr=True)
device = akida.compute_min_device(model_ak, enable_hwpr=True)
result = len(device.mesh.nps) // 4
print(f"✅ {file_path}: needs {result} Akida nodes")
except Exception as e:
Expand All @@ -28,10 +28,9 @@ def process_model(file_path):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--models", nargs="+", required=True,
help="Path to model files (.h5 or .onnx)")
help="Path to model files (.h5 or .onnx)")
args = parser.parse_args()

# Process each model
for model_file in args.models:
if os.path.exists(model_file):
process_model(model_file)
process_model(model_file)
257 changes: 0 additions & 257 deletions CI/compute_device.py

This file was deleted.

1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ In addition, some models can be evaluated directly through [Akida Cloud](https:/
| Vision | Classification | [MobileNetV2 1.0](vision/classification/mobilenetv2/imagenet) | 224 | ImageNet | 3.5M | 8 | 70.35% | | | 7 |
| Vision | Classification | [MobileNetV2 0.75](vision/classification/mobilenetv2/imagenet) | 160 | ImageNet | 2.6M | 8 | 62.85% | | | 4 ☁️ |
| Vision | Classification | [MobileNetV2 0.35](vision/classification/mobilenetv2/imagenet) | 96 | ImageNet | 1.2M | 8 | 43.47% | | | 2 ☁️ |
| Vision | Classification | [MobileNetV4 1.0](vision/classification/mobilenetv4/imagenet) | 224 | ImageNet | 3.77M | 8 | 71.86% | | | 8 |
| Vision | Classification | [MobileNetV2_1.0](vision/classification/mobilenetv2/cifar10) | 128 | CIFAR-10 | 2.25M | 8 | 93.96% | | | 5 ☁️ |
| Vision | Classification | [MobileNetV2_1.0](vision/classification/mobilenetv2/oxford_flowers) | 224 | Oxford_Flower | 2.4M | 8 | 91.97% | | | 7 |
| Vision | Classification | [MobileNetV4_1.0](vision/classification/mobilenetv4/cifar10) | 128 | CIFAR-10 | 2.5M | 8 | 94.72% | | | 7 |
Expand Down
19 changes: 19 additions & 0 deletions vision/classification/mobilenetv4/imagenet/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# MobileNetV4-Conv-Small

## Source
https://huggingface.co/timm/mobilenetv4_conv_small.e2400_r224_in1k

## Environment
```
torch: 2.0
torchvision
timm
quantizeml: 1.1.1
```

## References
- **MobileNet-v4** Model from the paper [Exploring the MobileNetV4 Architecture](https://arxiv.org/abs/2404.10518)
- Model Card [MobileNetV4-Conv-Small](https://huggingface.co/timm/mobilenetv4_conv_small.e2400_r224_in1k)

## License
Please refer to [https://huggingface.co/timm/mobilenetv4_conv_small.e2400_r224_in1k](https://huggingface.co/timm/mobilenetv4_conv_small.e2400_r224_in1k)
Git LFS file not shown
Git LFS file not shown