fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves streaming write performance on hdds running btrfs. Tar extract slows down on btrfs for some reason. See https://github.com/rfjakob/gocryptfs/issues/63 Benchmarks: encfs v1.9.1 ============ $ ./benchmark.bash -encfs /mnt/hdd-ext4 Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s UNTAR: 20.79 LS: 3.04 RM: 6.62 $ ./benchmark.bash -encfs /mnt/hdd-btrfs Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40 WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s UNTAR: 24.51 LS: 2.73 RM: 5.32 gocryptfs v1.1.1-26-g4a7f8ef ============================ $ ./benchmark.bash /mnt/hdd-ext4 Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s UNTAR: 22.23 LS: 1.47 RM: 4.17 $ ./benchmark.bash /mnt/hdd-btrfs Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8 WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s UNTAR: 69.87 LS: 1.52 RM: 5.33 gocryptfs v1.1.1-32 =================== $ ./benchmark.bash /mnt/hdd-ext4 Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3 WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s UNTAR: 23.46 LS: 1.46 RM: 4.67 $ ./benchmark.bash /mnt/hdd-btrfs/ Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s UNTAR: 116.87 LS: 1.84 RM: 6.34
This commit is contained in:
parent
80c50b9dbc
commit
024511d9c7
@ -255,15 +255,14 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
|
||||
}
|
||||
fileID := f.fileTableEntry.ID
|
||||
defer f.fileTableEntry.IDLock.RUnlock()
|
||||
|
||||
var written uint32
|
||||
// Handle payload data
|
||||
status := fuse.OK
|
||||
dataBuf := bytes.NewBuffer(data)
|
||||
blocks := f.contentEnc.ExplodePlainRange(uint64(off), uint64(len(data)))
|
||||
for _, b := range blocks {
|
||||
|
||||
writeChain := make([][]byte, len(blocks))
|
||||
var numOutBytes int
|
||||
for i, b := range blocks {
|
||||
blockData := dataBuf.Next(int(b.Length))
|
||||
|
||||
// Incomplete block -> Read-Modify-Write
|
||||
if b.IsPartial() {
|
||||
// Read
|
||||
@ -272,38 +271,41 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
|
||||
oldData, status = f.doRead(o, f.contentEnc.PlainBS())
|
||||
if status != fuse.OK {
|
||||
tlog.Warn.Printf("ino%d fh%d: RMW read failed: %s", f.devIno.ino, f.intFd(), status.String())
|
||||
return written, status
|
||||
return 0, status
|
||||
}
|
||||
// Modify
|
||||
blockData = f.contentEnc.MergeBlocks(oldData, blockData, int(b.Skip))
|
||||
tlog.Debug.Printf("len(oldData)=%d len(blockData)=%d", len(oldData), len(blockData))
|
||||
}
|
||||
|
||||
// Encrypt
|
||||
blockOffset := b.BlockCipherOff()
|
||||
blockData = f.contentEnc.EncryptBlock(blockData, b.BlockNo, fileID)
|
||||
tlog.Debug.Printf("ino%d: Writing %d bytes to block #%d",
|
||||
f.devIno.ino, uint64(len(blockData))-f.contentEnc.BlockOverhead(), b.BlockNo)
|
||||
|
||||
// Prevent partially written (=corrupt) blocks by preallocating the space beforehand
|
||||
err := syscallcompat.EnospcPrealloc(int(f.fd.Fd()), int64(blockOffset), int64(len(blockData)))
|
||||
// Store output data in the writeChain
|
||||
writeChain[i] = blockData
|
||||
numOutBytes += len(blockData)
|
||||
}
|
||||
// Concatenenate all elements in the writeChain into one contigous buffer
|
||||
tmp := make([]byte, numOutBytes)
|
||||
writeBuf := bytes.NewBuffer(tmp[:0])
|
||||
for _, w := range writeChain {
|
||||
writeBuf.Write(w)
|
||||
}
|
||||
// Preallocate so we cannot run out of space in the middle of the write.
|
||||
// This prevents partially written (=corrupt) blocks.
|
||||
cOff := blocks[0].BlockCipherOff()
|
||||
err := syscallcompat.EnospcPrealloc(int(f.fd.Fd()), int64(cOff), int64(writeBuf.Len()))
|
||||
if err != nil {
|
||||
tlog.Warn.Printf("ino%d fh%d: doWrite: prealloc failed: %s", f.devIno.ino, f.intFd(), err.Error())
|
||||
status = fuse.ToStatus(err)
|
||||
break
|
||||
return 0, fuse.ToStatus(err)
|
||||
}
|
||||
|
||||
// Write
|
||||
_, err = f.fd.WriteAt(blockData, int64(blockOffset))
|
||||
|
||||
_, err = f.fd.WriteAt(writeBuf.Bytes(), int64(cOff))
|
||||
if err != nil {
|
||||
tlog.Warn.Printf("doWrite: Write failed: %s", err.Error())
|
||||
status = fuse.ToStatus(err)
|
||||
break
|
||||
return 0, fuse.ToStatus(err)
|
||||
}
|
||||
written += uint32(b.Length)
|
||||
}
|
||||
return written, status
|
||||
return uint32(len(data)), fuse.OK
|
||||
}
|
||||
|
||||
// isConsecutiveWrite returns true if the current write
|
||||
|
Loading…
Reference in New Issue
Block a user