Skip to content

Commit

Permalink
add benchmarks (#62)
Browse files Browse the repository at this point in the history
* add code to generate benchmarks

* add benchmark info in README
  • Loading branch information
Sid-Bhatia-0 authored Nov 5, 2021
1 parent 22c3203 commit 8bc5f51
Show file tree
Hide file tree
Showing 4 changed files with 239 additions and 0 deletions.
46 changes: 46 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ This is a lightweight package that provides exact and efficient (for the most pa
- [API](#api)
- [Draw with bounds checking](#draw-with-bounds-checking)
- [Visualization](#visualization)
- [Benchmarks](#benchmarks)

[List of shapes](#list-of-shapes):

Expand Down Expand Up @@ -88,6 +89,51 @@ By default, the `draw!` function draws the clipped shape, that is, it draws only

The `visualize` function helps visualize a binary image inside the terminal using Unicode block characters to represent pixels. This is a quick tool to verify that your drawing algorithms are functioning as intended. This works well for low resolution images. You can maximize your terminal window and reduce the font size to visualize higher resolutions images.

### Benchmarks

In order to generate the benchmarks, clone this repository and start the julia REPL inside the `/benchmark` directory using the `Project.toml` and `Manifest.toml` files given there:

```
benchmark $ julia --project=.
```
And then execute the following:
```julia-repl
julia> include("benchmark.jl");
julia> generate_benchmark_file();
julia> generate_benchmark_file();
```

The `generate_benchmark_file()` function produces a markdown file whose name is a timestamp so that multiple calls to the function don't overwrite the same file. I usually run the `generate_benchmark_file()` function twice and take the result of the second one just to make sure that everything is already compiled before the second one is run.

For details on what exact shapes are drawn while generating these benchmarks, see `/benchmark/benchmarks.jl`.

Here are the benchmarks for `v0.2.0`:

Date: 2021_11_04_17_10_38 (yyyy_mm_dd_HH_MM_SS)

**Note:** The time in benchmarks is the median time.

| |64|256|1024|
|:---:|:---:|:---:|:---:|
|Point|4.022 ns<br>0 bytes|4.151 ns<br>0 bytes|4.149 ns<br>0 bytes|
|Background|173.898 ns<br>0 bytes|6.716 μs<br>0 bytes|233.739 μs<br>0 bytes|
|Line|185.178 ns<br>0 bytes|841.583 ns<br>0 bytes|3.419 μs<br>0 bytes|
|VerticalLine|22.484 ns<br>0 bytes|37.138 ns<br>0 bytes|94.983 ns<br>0 bytes|
|HorizontalLine|43.455 ns<br>0 bytes|909.604 ns<br>0 bytes|4.702 μs<br>0 bytes|
|ThickLine|9.998 μs<br>0 bytes|223.935 μs<br>0 bytes|8.741 ms<br>0 bytes|
|Circle|170.603 ns<br>0 bytes|1.205 μs<br>0 bytes|9.209 μs<br>0 bytes|
|ThickCircle|1.621 μs<br>0 bytes|66.197 μs<br>0 bytes|1.376 ms<br>0 bytes|
|FilledCircle|1.101 μs<br>0 bytes|9.043 μs<br>0 bytes|199.065 μs<br>0 bytes|
|Rectangle|91.700 ns<br>0 bytes|1.748 μs<br>0 bytes|11.386 μs<br>0 bytes|
|ThickRectangle|3.374 μs<br>0 bytes|54.228 μs<br>0 bytes|891.390 μs<br>0 bytes|
|FilledRectangle|4.393 μs<br>0 bytes|72.216 μs<br>0 bytes|1.179 ms<br>0 bytes|
|Cross|52.212 ns<br>0 bytes|945.432 ns<br>0 bytes|5.289 μs<br>0 bytes|
|HollowCross|71.390 ns<br>0 bytes|926.110 ns<br>0 bytes|4.900 μs<br>0 bytes|

## List of drawables

1. ### `Point`
Expand Down
74 changes: 74 additions & 0 deletions benchmark/Manifest.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# This file is machine-generated - editing it directly is not advised

[[BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "61adeb0823084487000600ef8b1c00cc2474cd47"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.2.0"

[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"

[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"

[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"

[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"

[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"

[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"

[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "98f59ff3639b3d9485a03a72f3ab35bab9465720"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.0.6"

[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"

[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"

[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"

[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"

[[SimpleDraw]]
path = ".."
uuid = "d1acf6f4-8553-480e-80ae-3c883f0a995a"
version = "0.1.0"

[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"

[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
4 changes: 4 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
SimpleDraw = "d1acf6f4-8553-480e-80ae-3c883f0a995a"
115 changes: 115 additions & 0 deletions benchmark/benchmark.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import BenchmarkTools as BT
import Dates
import SimpleDraw as SD

const SHAPES = [
SD.Point,
SD.Background,
SD.Line,
SD.VerticalLine,
SD.HorizontalLine,
SD.ThickLine,
SD.Circle,
SD.ThickCircle,
SD.FilledCircle,
SD.Rectangle,
SD.ThickRectangle,
SD.FilledRectangle,
SD.Cross,
SD.HollowCross,
]

const SIZES = [64, 256, 1024]

get_shape(::Type{SD.Point}, n) = SD.Point(n ÷ 2, n ÷ 2)
get_shape(::Type{SD.Background}, n) = SD.Background()
get_shape(::Type{SD.VerticalLine}, n) = SD.VerticalLine(2, n - 1, n ÷ 2)
get_shape(::Type{SD.HorizontalLine}, n) = SD.HorizontalLine(n ÷ 2, 2, n - 1)
get_shape(::Type{SD.Line}, n) = SD.Line(SD.Point(3, 2), SD.Point(n - 2, n - 1))
get_shape(::Type{SD.ThickLine}, n) = SD.ThickLine(SD.Point(n ÷ 8, n ÷ 8), SD.Point(n - n ÷ 8 + 1, n - n ÷ 8 + 1), n ÷ 8)
get_shape(::Type{SD.Circle}, n) = SD.Circle(SD.Point(n ÷ 2, n ÷ 2), n ÷ 2 - 1)
get_shape(::Type{SD.ThickCircle}, n) = SD.ThickCircle(SD.Point(n ÷ 2, n ÷ 2), n ÷ 2 - 1, n ÷ 4)
get_shape(::Type{SD.FilledCircle}, n) = SD.FilledCircle(SD.Point(n ÷ 2, n ÷ 2), n ÷ 2 - 1)
get_shape(::Type{SD.Rectangle}, n) = SD.Rectangle(SD.Point(2, 2), n - 1, n - 1)
get_shape(::Type{SD.ThickRectangle}, n) = SD.ThickRectangle(SD.Point(2, 2), n - 1, n - 1, n ÷ 4)
get_shape(::Type{SD.FilledRectangle}, n) = SD.FilledRectangle(SD.Point(2, 2), n - 1, n - 1)
get_shape(::Type{SD.Cross}, n) = SD.Cross(SD.Point(n ÷ 2, n ÷ 2), n ÷ 2 - 1)
get_shape(::Type{SD.HollowCross}, n) = SD.HollowCross(SD.Point(n ÷ 2, n ÷ 2), n ÷ 2 - 1)

function get_benchmarks(shape_types, sizes)
benchmarks = Dict()

color = 0x00ffffff
for n in sizes
image = zeros(typeof(color), n, n)
for Shape in shape_types
shape = get_shape(Shape, n)
benchmark = BT.@benchmark SD.draw!($(Ref(image))[], $(Ref(shape))[], $(Ref(color))[])
benchmarks[(nameof(Shape), n)] = get_summary(benchmark)
@info "(shape = $(nameof(Shape)), n = $(n)) benchmark complete"
end
fill!(image, zero(color))
end

return benchmarks
end

function get_summary(trial::BT.Trial)
median_trial = BT.median(trial)
memory = BT.prettymemory(median_trial.memory)
median_time = BT.prettytime(median_trial.time)
return memory, median_time
end

function get_table(shape_types, sizes, benchmarks)
table = "| |"
for n in sizes
table = table * "$(n)|"
end
table = table * "\n"

table = table * "|"
for _ in 1:length(sizes)+1
table = table * ":---:|"
end
table = table * "\n"

for Shape in shape_types
shape_name = nameof(Shape)
table = table * "|"
table = table * "$(shape_name)|"

for n in sizes
memory, median_time = benchmarks[(shape_name, n)]
table = table * "$(median_time)<br>$(memory)|"
end
table = table * "\n"
end

return table
end

function generate_benchmark_file(; shape_types = SHAPES, sizes = SIZES, file_name = nothing)
date = Dates.format(Dates.now(), "yyyy_mm_dd_HH_MM_SS")

if isnothing(file_name)
file_name = date * ".md"
end

io = open(file_name, "w")

println(io, "Date: $(date) (yyyy_mm_dd_HH_MM_SS)")
println(io)
println(io, "**Note:** The time in benchmarks is the median time.")
println(io)

benchmarks = get_benchmarks(shape_types, sizes)

table = get_table(shape_types, sizes, benchmarks)

println(io, table)

close(io)

return nothing
end

0 comments on commit 8bc5f51

Please sign in to comment.