Skip to content

Commit e29faad

Browse files
fix bechmark.py
1 parent f20f9a3 commit e29faad

File tree

4 files changed

+121
-128
lines changed

4 files changed

+121
-128
lines changed

Diff for: BENCHMARKS.MD

+44
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,50 @@ V2 -> V3, Added new options and a different video with more duplicated frames fo
99
V1 -> V2, Longer video for more accurate results, More time in between runs to cool of the gpu, more options ( dedup )
1010

1111
#### Testing Methodology V4
12+
# Benchmark Results
13+
14+
## System Information
15+
16+
- **CPU:** 13th Gen Intel(R) Core(TM) i7-13700K
17+
- **Total RAM:** 31.77 GB
18+
- **Graphics Card 0:** Intel(R) UHD Graphics 770
19+
- **Graphics Card 1:** NVIDIA GeForce RTX 3090
20+
21+
**Version:** 2.0.0 ( Pre-Release )
22+
**Testing Methodology:** V4.4
23+
24+
## Upscale Results
25+
26+
| Model | Time (s) | FPS |
27+
|----------------------------|----------|-------|
28+
| shufflecugan | 30.65 | 23.49 |
29+
| compact | 50.81 | 14.17 |
30+
| ultracompact | 29.01 | 24.82 |
31+
| superultracompact | 9.96 | 72.31 |
32+
| span | 57.32 | 12.56 |
33+
| compact-tensorrt | 44.26 | 21.69 |
34+
| ultracompact-tensorrt | 24.63 | 38.97 |
35+
| superultracompact-tensorrt | 9.80 | 97.99 |
36+
| shufflecugan-tensorrt | 23.42 | 40.98 |
37+
| span-tensorrt | 36.82 | 26.08 |
38+
39+
## Interpolate Results
40+
41+
| Model | Time (s) | FPS |
42+
|---------------------------------|----------|--------|
43+
| rife4.6 | 16.46 | 145.78 |
44+
| rife4.6-ensemble | 22.20 | 86.50 |
45+
| rife4.22 | 23.34 | 102.82 |
46+
| rife4.22-ensemble | 18.85 | 101.83 |
47+
| rife4.22-lite | 17.16 | 139.84 |
48+
| rife4.22-lite-ensemble | 14.44 | 132.97 |
49+
| rife4.6-tensorrt | 8.83 | 271.90 |
50+
| rife4.6-tensorrt-ensemble | 12.38 | 155.06 |
51+
| rife4.22-tensorrt | 15.50 | 154.86 |
52+
| rife4.22-tensorrt-ensemble | 12.72 | 150.94 |
53+
| rife4.22-lite-tensorrt | 11.55 | 207.73 |
54+
| rife4.22-lite-tensorrt-ensemble | 9.54 | 201.18 |
55+
1256
# Toji
1357
![benchmarkResults](https://github.com/user-attachments/assets/195e38da-b3d8-45af-868c-def2b3034e7f)
1458

Diff for: CHANGELOG.MD

+2
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
2525
- Improved `Input/Output` handling which should result in a much more dynamic encoding selection for batch processes.
2626
- Slight adjustment to chained processes should yield in a small performance boost.
2727
- Lazifying TAS has improved start up times by up to `1s` in certain workloads, this should improve the responsiveness of TAS by a margin of `30%` on average.
28+
- Improved and streamlined TAS benchmarkig.
2829

2930
#### Removals:
3031
- Vidgear package
@@ -37,6 +38,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
3738
- `16bit` workflow wouldn't work due to undeclared variable.
3839
- Fixed an issue where RIFE TRT would produce unwanted results on the 2nd frame of the output video when CPU usage was high.
3940
- Fixed an issue where RIFE TRT would display duplicated frames past `2x` interpolation.
41+
- Fixed `benchmark.py` issues caused by backend changes.
4042

4143
#### Breaking Changes:
4244
- If you are updating from `TAS 1.9.8, 1.9.9, 1.9.10`, YOU will have to delete all `RIFE` related Engines and regenerate them.

Diff for: benchmark.py

+29-25
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
if not os.path.exists(mainPath):
1818
os.makedirs(mainPath)
1919

20-
ffmpegLogPath = os.path.join(mainPath, "ffmpegLog.txt")
21-
logTxtPath = os.path.join(mainPath, "log.txt")
20+
ffmpegLogPath = os.path.join(mainPath, "ffmpeg.log")
21+
tasLogPath = os.path.join(mainPath, "TAS.log")
2222

2323

24-
def runAllBenchmarks(executor, version, inputVideo=None):
24+
def runAllBenchmarks(executor, version, inputVideo=None, systemInfo=None):
2525
print(
2626
"Running all benchmarks. Depending on your system, this may take a while. Please be patient and keep the terminal in focus at all time."
2727
)
@@ -33,8 +33,6 @@ def runAllBenchmarks(executor, version, inputVideo=None):
3333
"Interpolate": runInterpolateBenchmark(inputVideo, executor),
3434
}
3535

36-
systemInfo = parseSystemInfo()
37-
3836
with open("benchmarkResults.json", "w") as f:
3937
json.dump(
4038
{
@@ -80,7 +78,9 @@ def getClip(executor):
8078
outputPath = "output/test.mp4"
8179
cmd = executor + ["--input", CLIPURL, "--output", outputPath]
8280
subprocess.Popen(cmd, shell=False).wait()
83-
return os.path.abspath(outputPath)
81+
systemInfo = parseSystemInfo()
82+
83+
return os.path.abspath(outputPath), systemInfo
8484

8585

8686
def runUpscaleBenchmark(inputVideo, executor):
@@ -94,9 +94,10 @@ def runUpscaleBenchmark(inputVideo, executor):
9494
"--upscale",
9595
"--upscale_method",
9696
method,
97+
"--static",
9798
"--benchmark",
9899
"--outpoint",
99-
"16" if "-tensorrt" in method else "12",
100+
"20" if "-tensorrt" in method else "15",
100101
]
101102
print(f"Running command: {' '.join(cmd)}") # Debugging line
102103
subprocess.run(cmd, check=True, cwd=os.path.dirname(os.path.abspath(__file__)))
@@ -143,10 +144,13 @@ def runInterpolateBenchmark(inputVideo, executor):
143144
"--interpolate_method",
144145
method,
145146
"--benchmark",
146-
"--ensemble",
147+
# "--ensemble",
147148
"--outpoint",
148149
"20",
149150
]
151+
152+
if method not in ["rife4.22", "rife4.22-lite"]:
153+
cmd += ["--ensemble"]
150154
print(f"Running command: {' '.join(cmd)}") # Debugging line
151155
subprocess.run(cmd, cwd=os.path.dirname(os.path.abspath(__file__)))
152156

@@ -158,24 +162,24 @@ def runInterpolateBenchmark(inputVideo, executor):
158162

159163

160164
def parseFPS():
161-
with open(ffmpegLogPath, "r") as file:
162-
output = file.read()
163-
matches = re.findall(r"fps=\s*([\d.]+)", output)
164-
# Filter out fps values that are 0.0 or 0
165-
filtered = [float(fps) for fps in matches if float(fps) > 0]
166-
if filtered:
167-
highestFPS = max(filtered)
168-
averageFPS = round(sum(filtered) / len(filtered), 2)
169-
print("Highest FPS:", highestFPS, "Average FPS:", averageFPS)
170-
return ("Highest FPS:", highestFPS, "Average FPS:", averageFPS)
171-
else:
172-
print("Couldn't identify FPS value. Skipping...")
173-
return None
165+
with open(tasLogPath, "r") as file:
166+
for line in file:
167+
if "Total Execution Time" in line:
168+
match = re.search(
169+
r"Total Execution Time: ([\d.]+) seconds - FPS: ([\d.]+)", line
170+
)
171+
if match:
172+
total_execution_time = float(match.group(1))
173+
fps = float(match.group(2))
174+
print(f"Total Execution Time: {total_execution_time} seconds")
175+
print(f"FPS: {fps}")
176+
return total_execution_time, fps
177+
return None, None
174178

175179

176180
def parseSystemInfo():
177181
systemInfo = {}
178-
with open(logTxtPath, "r") as file:
182+
with open(tasLogPath, "r") as file:
179183
lines = file.readlines()
180184
start = lines.index("============== System Checker ==============\n") + 1
181185
end = lines.index("============== Arguments Checker ==============\n")
@@ -189,7 +193,7 @@ def parseSystemInfo():
189193
if __name__ == "__main__":
190194
TIMESLEEP = 2
191195
CLIPURL = "https://www.youtube.com/watch?v=kpeUMAVJCig"
192-
TESTINGVERSION = "V4.3"
196+
TESTINGVERSION = "V4.4"
193197

194198
upscaleMethods = [
195199
"shufflecugan",
@@ -224,7 +228,7 @@ def parseSystemInfo():
224228

225229
currentTest = 0
226230
executor, version = getExe()
227-
inputVideo = getClip(executor)
231+
inputVideo, systemInfo = getClip(executor)
228232

229233
# Define the questions
230234
questions = [
@@ -274,4 +278,4 @@ def parseSystemInfo():
274278
print(f"Using {' '.join(executor)} version {version}")
275279
print("Current working directory:", os.getcwd())
276280

277-
runAllBenchmarks(executor, version, inputVideo)
281+
runAllBenchmarks(executor, version, inputVideo, systemInfo)

Diff for: src/utils/jsonToTable.py

+46-103
Original file line numberDiff line numberDiff line change
@@ -1,104 +1,47 @@
11
import json
2-
import pandas as pd
3-
import matplotlib.pyplot as plt
4-
import argparse
5-
6-
from matplotlib.gridspec import GridSpec
7-
8-
def jsonToTable(input: str) -> None:
9-
with open(input, "r") as file:
10-
data = json.load(file)
11-
12-
fig = plt.figure(figsize=(16, 12))
13-
gs = GridSpec(2, 2, width_ratios=[1, 1.5], height_ratios=[1, 1])
14-
fig.suptitle("Benchmark Results", fontsize=16)
15-
16-
ax1 = fig.add_subplot(gs[:, 0])
17-
systemInfo = pd.DataFrame.from_dict(
18-
data["System Info"], orient="index", columns=["Value"]
19-
)
20-
systemInfo.index.name = "Property"
21-
systemInfo = systemInfo.reset_index()
22-
23-
versionInfo = pd.DataFrame(
24-
{
25-
"Property": ["Version", "Testing Methodology"],
26-
"Value": [data["Version"], data["Testing Methodology"]],
27-
}
28-
)
29-
systemInfo = pd.concat([versionInfo, systemInfo]).reset_index(drop=True)
30-
31-
ax1.axis("off")
32-
ax1.set_title("System Information")
33-
table1 = ax1.table(
34-
cellText=systemInfo.values,
35-
colLabels=systemInfo.columns,
36-
cellLoc="left",
37-
loc="center",
38-
)
39-
table1.auto_set_font_size(False)
40-
table1.set_fontsize(9)
41-
table1.scale(1, 1.5)
42-
43-
ax2 = fig.add_subplot(gs[0, 1])
44-
upscaleData = data["Results"]["Upscale"]
45-
upscaleDf = pd.DataFrame.from_dict(
46-
upscaleData,
47-
orient="index",
48-
columns=["Highest FPS", "Highest FPS Value", "Average FPS", "Average FPS Value"],
49-
)
50-
upscaleDf = upscaleDf.drop(["Highest FPS", "Average FPS"], axis=1)
51-
upscaleDf.index.name = "Model"
52-
upscaleDf = upscaleDf.reset_index()
53-
upscaleDf["Model"] = upscaleDf["Model"].str.title()
54-
55-
ax2.axis("off")
56-
ax2.set_title("Upscale Results")
57-
table2 = ax2.table(
58-
cellText=upscaleDf.values, colLabels=upscaleDf.columns, cellLoc="left", loc="center"
59-
)
60-
table2.auto_set_font_size(False)
61-
table2.set_fontsize(9)
62-
table2.scale(1, 1.5)
63-
64-
ax3 = fig.add_subplot(gs[1, 1])
65-
interpolateData = data["Results"]["Interpolate"]
66-
interpolateDf = pd.DataFrame.from_dict(
67-
interpolateData,
68-
orient="index",
69-
columns=["Highest FPS", "Highest FPS Value", "Average FPS", "Average FPS Value"],
70-
)
71-
interpolateDf = interpolateDf.drop(["Highest FPS", "Average FPS"], axis=1)
72-
interpolateDf.index.name = "Model"
73-
interpolateDf = interpolateDf.reset_index()
74-
interpolateDf["Model"] = interpolateDf["Model"].str.title()
75-
76-
ax3.axis("off")
77-
ax3.set_title("Interpolate Results")
78-
table3 = ax3.table(
79-
cellText=interpolateDf.values,
80-
colLabels=interpolateDf.columns,
81-
cellLoc="left",
82-
loc="center",
83-
)
84-
table3.auto_set_font_size(False)
85-
table3.set_fontsize(9)
86-
table3.scale(1, 1.5)
87-
88-
plt.tight_layout()
89-
plt.savefig("benchmarkResults.png", dpi=300, bbox_inches="tight")
90-
91-
plt.show()
92-
93-
if __name__ == "__main__":
94-
95-
parser = argparse.ArgumentParser()
96-
parser.add_argument(
97-
"--input",
98-
type=str,
99-
help="Path to the JSON file containing the benchmark results",
100-
required=True,
101-
)
102-
103-
args = parser.parse_args()
104-
jsonToTable(args.input)
2+
import random
3+
import string
4+
5+
jsonPath = r"G:\TheAnimeScripter\benchmarkResults.json"
6+
7+
8+
def jsonToMarkdownTable(jsonData):
9+
markdown = "# Benchmark Results\n\n"
10+
11+
markdown += "## System Information\n\n"
12+
for key, value in jsonData["System Info"].items():
13+
markdown += f"- **{key}:** {value}\n"
14+
markdown += "\n"
15+
16+
markdown += f"**Version:** {jsonData['Version']} \n"
17+
markdown += f"**Testing Methodology:** {jsonData['Testing Methodology']}\n\n"
18+
19+
for category, models in jsonData["Results"].items():
20+
markdown += f"## {category} Results\n\n"
21+
markdown += "| Model | Time (s) | FPS |\n"
22+
markdown += "|-------|-----------|-----|\n"
23+
24+
for model, results in models.items():
25+
timeMs, fps = results
26+
markdown += f"| {model} | {timeMs:.2f} | {fps:.2f} |\n"
27+
28+
markdown += "\n"
29+
30+
return markdown
31+
32+
33+
def generateRandomFilename(extension="md"):
34+
randomString = "".join(random.choices(string.ascii_lowercase + string.digits, k=8))
35+
return f"{randomString}.{extension}"
36+
37+
38+
with open(jsonPath, "r") as file:
39+
jsonData = json.load(file)
40+
41+
markdownOutput = jsonToMarkdownTable(jsonData)
42+
43+
randomFilename = generateRandomFilename()
44+
with open(randomFilename, "w") as file:
45+
file.write(markdownOutput)
46+
47+
print(f"{randomFilename} has been generated successfully.")

0 commit comments

Comments
 (0)