cryptocore: add urandom + randprefetch benchmarks

The benchmark that supported the decision for 512-byte
prefetching previously lived outside the repo.

Let's add it where it belongs so it cannot get lost.
This commit is contained in:
Jakob Unterwurzacher 2017-08-16 18:33:00 +02:00
parent 838bf883df
commit 312ea32bb7
3 changed files with 51 additions and 14 deletions

View File

@ -6,20 +6,9 @@ import (
"sync"
)
/*
Number of bytes to prefetch.
512 looks like a good compromise between throughput and latency:
Benchmark16-2 3000000 567 ns/op 28.18 MB/s
Benchmark64-2 5000000 293 ns/op 54.51 MB/s
Benchmark128-2 10000000 220 ns/op 72.48 MB/s
Benchmark256-2 10000000 210 ns/op 76.17 MB/s
Benchmark512-2 10000000 191 ns/op 83.75 MB/s
Benchmark1024-2 10000000 171 ns/op 93.48 MB/s
Benchmark2048-2 10000000 165 ns/op 96.45 MB/s
Benchmark4096-2 10000000 165 ns/op 96.58 MB/s
Benchmark40960-2 10000000 147 ns/op 108.82 MB/s
*/
// Number of bytes to prefetch.
// 512 looks like a good compromise between throughput and latency - see
// randsize_test.go for numbers.
const prefetchN = 512
func init() {

View File

@ -38,3 +38,11 @@ func TestRandPrefetch(t *testing.T) {
t.Errorf("random data should be incompressible, but: in=%d compressed=%d\n", p*l*l, b.Len())
}
}
func BenchmarkRandPrefetch(b *testing.B) {
// 16-byte nonces are default since gocryptfs v0.7
b.SetBytes(16)
for i := 0; i < b.N; i++ {
randPrefetcher.read(16)
}
}

View File

@ -0,0 +1,40 @@
// +build go1.7
// ^^^^^^^^^^^^ we use the "sub-benchmark" feature that was added in Go 1.7
package cryptocore
import (
"fmt"
"testing"
)
/*
The troughput we get from /dev/urandom / getentropy depends a lot on the used
block size. Results on my Pentium G630 running Linux 4.11:
BenchmarkRandSize/16-2 3000000 571 ns/op 27.98 MB/s
BenchmarkRandSize/32-2 3000000 585 ns/op 54.66 MB/s
BenchmarkRandSize/64-2 2000000 860 ns/op 74.36 MB/s
BenchmarkRandSize/128-2 1000000 1197 ns/op 106.90 MB/s
BenchmarkRandSize/256-2 1000000 1867 ns/op 137.06 MB/s
BenchmarkRandSize/512-2 500000 3187 ns/op 160.61 MB/s
BenchmarkRandSize/1024-2 200000 5888 ns/op 173.91 MB/s
BenchmarkRandSize/2048-2 100000 11554 ns/op 177.25 MB/s
BenchmarkRandSize/4096-2 100000 22523 ns/op 181.86 MB/s
BenchmarkRandSize/8192-2 30000 43111 ns/op 190.02 MB/s
Results are similar when testing with dd, so this is not due to Go allocation
overhead: dd if=/dev/urandom bs=16 count=100000 of=/dev/null
*/
func BenchmarkUrandomBlocksize(b *testing.B) {
for s := 16; s <= 8192; s *= 2 {
title := fmt.Sprintf("%d", s)
b.Run(title, func(b *testing.B) {
b.SetBytes(int64(s))
for i := 0; i < b.N; i++ {
RandBytes(s)
}
})
}
}