Skip to content

Commit f43aba3

Browse files
committed
accumulator/btcacc: Use sync.Pool for byte slices in package common
There are functions in these packages that allocate and de-allocate byte slices numerous times per function call. Using a pool for byte slices help reduce this allocation and speeds up the code.
1 parent f8046ea commit f43aba3

File tree

3 files changed

+50
-56
lines changed

3 files changed

+50
-56
lines changed

accumulator/types.go

+18-10
Original file line numberDiff line numberDiff line change
@@ -5,26 +5,28 @@ import (
55
"crypto/sha512"
66
"fmt"
77
"math/rand"
8+
9+
"github.com/mit-dci/utreexo/common"
810
)
911

10-
// Hash :
12+
// Hash is the 32 bytes of a sha256 hash
1113
type Hash [32]byte
1214

1315
// Prefix for printfs
1416
func (h Hash) Prefix() []byte {
1517
return h[:4]
1618
}
1719

18-
// Mini :
20+
// Mini takes the first 12 slices of a hash and outputs a MiniHash
1921
func (h Hash) Mini() (m MiniHash) {
2022
copy(m[:], h[:12])
2123
return
2224
}
2325

24-
// MiniHash :
26+
// MiniHash is the first 12 bytes of a sha256 hash
2527
type MiniHash [12]byte
2628

27-
// HashFromString :
29+
// HashFromString takes a string and hashes with sha256
2830
func HashFromString(s string) Hash {
2931
return sha256.Sum256([]byte(s))
3032
}
@@ -35,7 +37,8 @@ type arrow struct {
3537
collapse bool
3638
}
3739

38-
// Node :
40+
// node is an element in the utreexo tree and is represented by a position
41+
// and a hash
3942
type node struct {
4043
Pos uint64
4144
Val Hash
@@ -53,14 +56,18 @@ type simLeaf struct {
5356
duration int32
5457
}
5558

56-
// Parent gets you the merkle parent. So far no committing to height.
57-
// if the left child is zero it should crash...
59+
// parentHash gets you the merkle parent of two children hashes.
60+
// TODO So far no committing to height.
5861
func parentHash(l, r Hash) Hash {
5962
var empty Hash
6063
if l == empty || r == empty {
6164
panic("got an empty leaf here. ")
6265
}
63-
return sha512.Sum512_256(append(l[:], r[:]...))
66+
buf := common.NewFreeBytes()
67+
defer buf.Free()
68+
buf.Bytes = append(buf.Bytes, l[:]...)
69+
buf.Bytes = append(buf.Bytes, r[:]...)
70+
return sha512.Sum512_256(buf.Bytes)
6471
}
6572

6673
// SimChain is for testing; it spits out "blocks" of adds and deletes
@@ -73,7 +80,7 @@ type SimChain struct {
7380
lookahead int32
7481
}
7582

76-
// NewSimChain :
83+
// NewSimChain initializes and returns a Simchain
7784
func NewSimChain(duration uint32) *SimChain {
7885
var s SimChain
7986
s.blockHeight = -1
@@ -119,7 +126,8 @@ func (s *SimChain) ttlString() string {
119126
return x
120127
}
121128

122-
// NextBlock :
129+
// NextBlock outputs a new simulation block given the additions for the block
130+
// to be outputed
123131
func (s *SimChain) NextBlock(numAdds uint32) ([]Leaf, []int32, []Hash) {
124132
s.blockHeight++
125133
fmt.Printf("blockHeight %d\n", s.blockHeight)

btcacc/leaf.go

+15-6
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ import (
88
"fmt"
99
"io"
1010
"strconv"
11+
12+
"github.com/mit-dci/utreexo/common"
1113
)
1214

1315
const HashSize = 32
@@ -71,14 +73,19 @@ func (l *LeafData) Serialize(w io.Writer) (err error) {
7173

7274
_, err = w.Write(l.BlockHash[:])
7375
_, err = w.Write(l.TxHash[:])
74-
err = binary.Write(w, binary.BigEndian, l.Index)
75-
err = binary.Write(w, binary.BigEndian, hcb)
76-
err = binary.Write(w, binary.BigEndian, l.Amt)
76+
77+
freeBytes := common.NewFreeBytes()
78+
defer freeBytes.Free()
79+
80+
err = freeBytes.PutUint32(w, binary.BigEndian, l.Index)
81+
err = freeBytes.PutUint32(w, binary.BigEndian, uint32(hcb))
82+
err = freeBytes.PutUint64(w, binary.BigEndian, uint64(l.Amt))
83+
7784
if len(l.PkScript) > 10000 {
7885
err = fmt.Errorf("pksize too long")
7986
return
8087
}
81-
err = binary.Write(w, binary.BigEndian, uint16(len(l.PkScript)))
88+
err = freeBytes.PutUint16(w, binary.BigEndian, uint16(len(l.PkScript)))
8289
_, err = w.Write(l.PkScript)
8390
return
8491
}
@@ -121,7 +128,9 @@ func (l *LeafData) Deserialize(r io.Reader) (err error) {
121128

122129
// LeafHash turns a LeafData into a LeafHash
123130
func (l *LeafData) LeafHash() [32]byte {
124-
var buf bytes.Buffer
125-
l.Serialize(&buf)
131+
freeBytes := common.NewFreeBytes()
132+
defer freeBytes.Free()
133+
buf := bytes.NewBuffer(freeBytes.Bytes)
134+
l.Serialize(buf)
126135
return sha512.Sum512_256(buf.Bytes())
127136
}

btcacc/udata.go

+17-40
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,11 @@ import (
77
"io"
88

99
"github.com/mit-dci/utreexo/accumulator"
10+
"github.com/mit-dci/utreexo/common"
1011
)
1112

13+
// UData is all the data needed to verify the utreexo accumulator proof
14+
// for a given block
1215
type UData struct {
1316
Height int32
1417
AccProof accumulator.BatchProof
@@ -52,22 +55,20 @@ func (ud *UData) ProofSanity(nl uint64, h uint8) bool {
5255
return false
5356
}
5457
}
55-
// return to presorted target list
56-
// ud.AccProof.Targets = presort
58+
5759
return true
5860
}
5961

6062
// on disk
6163
// aaff aaff 0000 0014 0000 0001 0000 0001 0000 0000 0000 0000 0000 0000
6264
// magic | size | height | numttls | ttl0 | numTgts | ????
6365

64-
// ToBytes serializes UData into bytes.
66+
// Serialize serializes UData into bytes.
6567
// First, height, 4 bytes.
6668
// Then, number of TTL values (4 bytes, even though we only need 2)
67-
// Then a bunch of TTL values, (4B each) one for each txo in the associated block
68-
// batch proof
69-
// Bunch of LeafDatas
70-
69+
// Then a bunch of TTL values, (4B each) one for each txo in the
70+
// associated block batch proof
71+
// And the rest is a bunch of LeafDatas
7172
func (ud *UData) Serialize(w io.Writer) (err error) {
7273
err = binary.Write(w, binary.BigEndian, ud.Height)
7374
if err != nil { // ^ 4B block height
@@ -89,43 +90,28 @@ func (ud *UData) Serialize(w io.Writer) (err error) {
8990
return
9091
}
9192

92-
// fmt.Printf("accproof %d bytes\n", ud.AccProof.SerializeSize())
93-
9493
// write all the leafdatas
9594
for _, ld := range ud.Stxos {
96-
// fmt.Printf("writing ld %d %s\n", i, ld.ToString())
9795
err = ld.Serialize(w)
9896
if err != nil {
9997
return
10098
}
101-
// fmt.Printf("h %d leaf %d %s len %d\n",
102-
// ud.Height, i, ld.Outpoint.String(), len(ld.PkScript))
10399
}
104100

105101
return
106102
}
107103

108-
//
104+
// SerializeSize outputs the size of the udata when it is serialized
109105
func (ud *UData) SerializeSize() int {
110106
var ldsize int
111-
var b bytes.Buffer
112-
113-
// TODO this is slow, can remove double checking once it works reliably
114-
for _, l := range ud.Stxos {
115-
ldsize += l.SerializeSize()
116-
b.Reset()
117-
l.Serialize(&b)
118-
if b.Len() != l.SerializeSize() {
119-
fmt.Printf(" b.Len() %d, l.SerializeSize() %d\n",
120-
b.Len(), l.SerializeSize())
121-
}
122-
}
107+
buf := common.NewFreeBytes()
108+
bufWriter := bytes.NewBuffer(buf.Bytes)
123109

124-
b.Reset()
125-
ud.AccProof.Serialize(&b)
126-
if b.Len() != ud.AccProof.SerializeSize() {
110+
bufWriter.Reset()
111+
ud.AccProof.Serialize(bufWriter)
112+
if bufWriter.Len() != ud.AccProof.SerializeSize() {
127113
fmt.Printf(" b.Len() %d, AccProof.SerializeSize() %d\n",
128-
b.Len(), ud.AccProof.SerializeSize())
114+
bufWriter.Len(), ud.AccProof.SerializeSize())
129115
}
130116

131117
guess := 8 + (4 * len(ud.TxoTTLs)) + ud.AccProof.SerializeSize() + ldsize
@@ -134,23 +120,20 @@ func (ud *UData) SerializeSize() int {
134120
return guess
135121
}
136122

123+
// Deserialize reads from the reader and deserializes the udata
137124
func (ud *UData) Deserialize(r io.Reader) (err error) {
138-
139125
err = binary.Read(r, binary.BigEndian, &ud.Height)
140126
if err != nil { // ^ 4B block height
141127
fmt.Printf("ud deser Height err %s\n", err.Error())
142128
return
143129
}
144-
// fmt.Printf("read height %d\n", ud.Height)
145130

146131
var numTTLs uint32
147132
err = binary.Read(r, binary.BigEndian, &numTTLs)
148133
if err != nil { // ^ 4B num ttls
149134
fmt.Printf("ud deser numTTLs err %s\n", err.Error())
150135
return
151136
}
152-
// fmt.Printf("read ttls %d\n", numTTLs)
153-
// fmt.Printf("UData deser read h %d - %d ttls ", ud.Height, numTTLs)
154137

155138
ud.TxoTTLs = make([]int32, numTTLs)
156139
for i, _ := range ud.TxoTTLs { // write all ttls
@@ -159,7 +142,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
159142
fmt.Printf("ud deser LeafTTLs[%d] err %s\n", i, err.Error())
160143
return
161144
}
162-
// fmt.Printf("read ttl[%d] %d\n", i, ud.TxoTTLs[i])
163145
}
164146

165147
err = ud.AccProof.Deserialize(r)
@@ -168,8 +150,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
168150
return
169151
}
170152

171-
// fmt.Printf("%d byte accproof, read %d targets\n",
172-
// ud.AccProof.SerializeSize(), len(ud.AccProof.Targets))
173153
// we've already gotten targets. 1 leafdata per target
174154
ud.Stxos = make([]LeafData, len(ud.AccProof.Targets))
175155
for i, _ := range ud.Stxos {
@@ -180,9 +160,6 @@ func (ud *UData) Deserialize(r io.Reader) (err error) {
180160
ud.Height, numTTLs, len(ud.AccProof.Targets), i, err.Error())
181161
return
182162
}
183-
// fmt.Printf("h %d leaf %d %s len %d\n",
184-
// ud.Height, i, ud.Stxos[i].Outpoint.String(), len(ud.Stxos[i].PkScript))
185-
186163
}
187164

188165
return
@@ -212,6 +189,7 @@ func GenUData(delLeaves []LeafData, forest *accumulator.Forest, height int32) (
212189

213190
ud.Height = height
214191
ud.Stxos = delLeaves
192+
215193
// make slice of hashes from leafdata
216194
delHashes := make([]accumulator.Hash, len(ud.Stxos))
217195
for i, _ := range ud.Stxos {
@@ -233,6 +211,5 @@ func GenUData(delLeaves []LeafData, forest *accumulator.Forest, height int32) (
233211
return
234212
}
235213

236-
// fmt.Printf(ud.AccProof.ToString())
237214
return
238215
}

0 commit comments

Comments
 (0)