diff --git a/README.md b/README.md
index 5cdbe41..8a7ad38 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
Welcome to the [THOP](https://github.com/ultralytics/thop) repository, your comprehensive solution for profiling PyTorch models by computing the number of Multiply-Accumulate Operations (MACs) and parameters. This tool is essential for deep learning practitioners to evaluate model efficiency and performance.
-[![GitHub Actions](https://github.com/ultralytics/thop/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/main.yml) [![PyPI version](https://badge.fury.io/py/ultralytics-thop.svg)](https://badge.fury.io/py/ultralytics-thop)
+[![GitHub Actions](https://github.com/ultralytics/thop/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/main.yml)
## 📄 Description
@@ -15,6 +15,8 @@ THOP offers an intuitive API to profile PyTorch models by calculating the number
You can install THOP via pip:
+[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics-thop?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics-thop/) [![Downloads](https://static.pepy.tech/badge/ultralytics-thop)](https://pepy.tech/project/thop) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics-thop?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics-thop/)
+
```bash
pip install ultralytics-thop
```
diff --git a/benchmark/evaluate_famous_models.py b/benchmark/evaluate_famous_models.py
index 5fb1cbf..451f97e 100644
--- a/benchmark/evaluate_famous_models.py
+++ b/benchmark/evaluate_famous_models.py
@@ -19,10 +19,13 @@
device = "cuda"
for name in model_names:
- model = models.__dict__[name]().to(device)
- dsize = (1, 3, 224, 224)
- if "inception" in name:
- dsize = (1, 3, 299, 299)
- inputs = torch.randn(dsize).to(device)
- total_ops, total_params = profile(model, (inputs,), verbose=False)
- print("%s | %.2f | %.2f" % (name, total_params / (1000**2), total_ops / (1000**3)))
+ try:
+ model = models.__dict__[name]().to(device)
+ dsize = (1, 3, 224, 224)
+ if "inception" in name:
+ dsize = (1, 3, 299, 299)
+ inputs = torch.randn(dsize).to(device)
+ total_ops, total_params = profile(model, (inputs,), verbose=False)
+ print("%s | %.2f | %.2f" % (name, total_params / (1000**2), total_ops / (1000**3)))
+ except Exception as e:
+ print(f"Warning: failed to process {e}")
diff --git a/thop/__init__.py b/thop/__init__.py
index 6e2189d..1f887d4 100644
--- a/thop/__init__.py
+++ b/thop/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "0.2.5"
+__version__ = "0.2.6"
import torch