2016-02-06 19:27:59 +01:00
|
|
|
package fusefrontend
|
2015-09-08 00:54:24 +02:00
|
|
|
|
2015-12-19 13:21:15 +01:00
|
|
|
// FUSE operations on file handles
|
|
|
|
|
2015-09-08 00:54:24 +02:00
|
|
|
import (
|
|
|
|
"bytes"
|
2018-04-03 21:19:44 +02:00
|
|
|
"fmt"
|
2015-10-04 14:36:20 +02:00
|
|
|
"io"
|
2016-05-30 09:29:30 +02:00
|
|
|
"log"
|
2015-09-08 00:54:24 +02:00
|
|
|
"os"
|
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hanwen/go-fuse/fuse"
|
|
|
|
"github.com/hanwen/go-fuse/fuse/nodefs"
|
2016-02-06 19:20:54 +01:00
|
|
|
|
|
|
|
"github.com/rfjakob/gocryptfs/internal/contentenc"
|
2017-05-01 17:26:50 +02:00
|
|
|
"github.com/rfjakob/gocryptfs/internal/openfiletable"
|
2017-03-18 16:01:50 +01:00
|
|
|
"github.com/rfjakob/gocryptfs/internal/serialize_reads"
|
2017-04-24 00:25:02 +02:00
|
|
|
"github.com/rfjakob/gocryptfs/internal/stupidgcm"
|
2016-07-03 17:51:40 +02:00
|
|
|
"github.com/rfjakob/gocryptfs/internal/syscallcompat"
|
2016-06-15 23:30:44 +02:00
|
|
|
"github.com/rfjakob/gocryptfs/internal/tlog"
|
2015-09-08 00:54:24 +02:00
|
|
|
)
|
|
|
|
|
2018-07-01 19:10:57 +02:00
|
|
|
var _ nodefs.File = &File{} // Verify that interface is implemented.
|
2017-05-01 19:12:37 +02:00
|
|
|
|
2015-09-08 00:54:24 +02:00
|
|
|
// File - based on loopbackFile in go-fuse/fuse/nodefs/files.go
|
2018-07-01 19:10:57 +02:00
|
|
|
type File struct {
|
2015-09-08 00:54:24 +02:00
|
|
|
fd *os.File
|
2016-05-30 09:29:30 +02:00
|
|
|
// Has Release() already been called on this file? This also means that the
|
|
|
|
// wlock entry has been freed, so let's not crash trying to access it.
|
|
|
|
// Due to concurrency, Release can overtake other operations. These will
|
|
|
|
// return EBADF in that case.
|
|
|
|
released bool
|
2016-01-25 00:51:28 +01:00
|
|
|
// fdLock prevents the fd to be closed while we are in the middle of
|
|
|
|
// an operation.
|
|
|
|
// Every FUSE entrypoint should RLock(). The only user of Lock() is
|
2016-05-30 09:29:30 +02:00
|
|
|
// Release(), which closes the fd and sets "released" to true.
|
2016-01-25 00:51:28 +01:00
|
|
|
fdLock sync.RWMutex
|
2016-02-06 19:20:54 +01:00
|
|
|
// Content encryption helper
|
|
|
|
contentEnc *contentenc.ContentEnc
|
2016-11-17 20:32:19 +01:00
|
|
|
// Device and inode number uniquely identify the backing file
|
2017-05-01 17:26:50 +02:00
|
|
|
qIno openfiletable.QIno
|
|
|
|
// Entry in the open file table
|
|
|
|
fileTableEntry *openfiletable.Entry
|
2016-09-25 16:30:29 +02:00
|
|
|
// go-fuse nodefs.loopbackFile
|
|
|
|
loopbackFile nodefs.File
|
2016-11-17 22:29:45 +01:00
|
|
|
// Store where the last byte was written
|
2016-10-25 23:57:30 +02:00
|
|
|
lastWrittenOffset int64
|
|
|
|
// The opCount is used to judge whether "lastWrittenOffset" is still
|
|
|
|
// guaranteed to be correct.
|
|
|
|
lastOpCount uint64
|
2016-11-24 22:36:04 +01:00
|
|
|
// Parent filesystem
|
|
|
|
fs *FS
|
2017-04-23 00:06:56 +02:00
|
|
|
// We embed a nodefs.NewDefaultFile() that returns ENOSYS for every operation we
|
|
|
|
// have not implemented. This prevents build breakage when the go-fuse library
|
|
|
|
// adds new methods to the nodefs.File interface.
|
|
|
|
nodefs.File
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2016-10-02 06:14:18 +02:00
|
|
|
// NewFile returns a new go-fuse File instance.
|
2018-07-01 19:10:57 +02:00
|
|
|
func NewFile(fd *os.File, fs *FS) (*File, fuse.Status) {
|
2015-10-03 13:36:49 +02:00
|
|
|
var st syscall.Stat_t
|
2016-05-29 22:41:46 +02:00
|
|
|
err := syscall.Fstat(int(fd.Fd()), &st)
|
|
|
|
if err != nil {
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Warn.Printf("NewFile: Fstat on fd %d failed: %v\n", fd.Fd(), err)
|
2016-05-29 22:41:46 +02:00
|
|
|
return nil, fuse.ToStatus(err)
|
|
|
|
}
|
2017-05-01 17:26:50 +02:00
|
|
|
qi := openfiletable.QInoFromStat(&st)
|
|
|
|
e := openfiletable.Register(qi)
|
2015-10-03 13:36:49 +02:00
|
|
|
|
2018-07-01 19:10:57 +02:00
|
|
|
return &File{
|
2016-11-17 22:29:45 +01:00
|
|
|
fd: fd,
|
2016-11-24 22:36:04 +01:00
|
|
|
contentEnc: fs.contentEnc,
|
2017-05-01 17:26:50 +02:00
|
|
|
qIno: qi,
|
|
|
|
fileTableEntry: e,
|
2016-11-17 22:29:45 +01:00
|
|
|
loopbackFile: nodefs.NewLoopbackFile(fd),
|
2016-11-24 22:36:04 +01:00
|
|
|
fs: fs,
|
2017-04-23 00:06:56 +02:00
|
|
|
File: nodefs.NewDefaultFile(),
|
2016-05-29 22:41:46 +02:00
|
|
|
}, fuse.OK
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2016-01-25 00:51:28 +01:00
|
|
|
// intFd - return the backing file descriptor as an integer. Used for debug
|
|
|
|
// messages.
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) intFd() int {
|
2016-01-25 00:51:28 +01:00
|
|
|
return int(f.fd.Fd())
|
|
|
|
}
|
|
|
|
|
2016-11-17 22:29:45 +01:00
|
|
|
// readFileID loads the file header from disk and extracts the file ID.
|
|
|
|
// Returns io.EOF if the file is empty.
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) readFileID() ([]byte, error) {
|
2017-03-12 21:11:02 +01:00
|
|
|
// We read +1 byte to determine if the file has actual content
|
|
|
|
// and not only the header. A header-only file will be considered empty.
|
|
|
|
// This makes File ID poisoning more difficult.
|
|
|
|
readLen := contentenc.HeaderLen + 1
|
|
|
|
buf := make([]byte, readLen)
|
|
|
|
n, err := f.fd.ReadAt(buf, 0)
|
2015-11-01 01:32:33 +01:00
|
|
|
if err != nil {
|
2017-03-12 21:11:02 +01:00
|
|
|
if err == io.EOF && n != 0 {
|
2018-04-02 18:32:30 +02:00
|
|
|
tlog.Warn.Printf("readFileID %d: incomplete file, got %d instead of %d bytes",
|
2017-05-01 17:26:50 +02:00
|
|
|
f.qIno.Ino, n, readLen)
|
2018-07-01 15:48:53 +02:00
|
|
|
f.fs.reportMitigatedCorruption(fmt.Sprint(f.qIno.Ino))
|
2017-03-12 21:11:02 +01:00
|
|
|
}
|
2016-11-17 22:29:45 +01:00
|
|
|
return nil, err
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
2017-03-12 21:11:02 +01:00
|
|
|
buf = buf[:contentenc.HeaderLen]
|
2016-02-06 19:20:54 +01:00
|
|
|
h, err := contentenc.ParseHeader(buf)
|
2015-11-01 01:32:33 +01:00
|
|
|
if err != nil {
|
2016-11-17 22:29:45 +01:00
|
|
|
return nil, err
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
2016-11-17 22:29:45 +01:00
|
|
|
return h.ID, nil
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
|
|
|
|
2016-11-17 22:29:45 +01:00
|
|
|
// createHeader creates a new random header and writes it to disk.
|
|
|
|
// Returns the new file ID.
|
|
|
|
// The caller must hold fileIDLock.Lock().
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) createHeader() (fileID []byte, err error) {
|
2016-02-06 19:20:54 +01:00
|
|
|
h := contentenc.RandomHeader()
|
2015-11-01 01:32:33 +01:00
|
|
|
buf := h.Pack()
|
2015-12-06 15:05:52 +01:00
|
|
|
// Prevent partially written (=corrupt) header by preallocating the space beforehand
|
2016-11-24 22:36:04 +01:00
|
|
|
if !f.fs.args.NoPrealloc {
|
|
|
|
err = syscallcompat.EnospcPrealloc(int(f.fd.Fd()), 0, contentenc.HeaderLen)
|
|
|
|
if err != nil {
|
2018-07-15 14:14:12 +02:00
|
|
|
if !syscallcompat.IsENOSPC(err) {
|
|
|
|
tlog.Warn.Printf("ino%d: createHeader: prealloc failed: %s\n", f.qIno.Ino, err.Error())
|
|
|
|
}
|
2016-11-24 22:36:04 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-12-06 15:05:52 +01:00
|
|
|
}
|
|
|
|
// Actually write header
|
|
|
|
_, err = f.fd.WriteAt(buf, 0)
|
2015-11-01 01:32:33 +01:00
|
|
|
if err != nil {
|
2016-11-17 22:29:45 +01:00
|
|
|
return nil, err
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
2016-11-17 22:29:45 +01:00
|
|
|
return h.ID, err
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
|
|
|
|
2017-06-30 23:11:38 +02:00
|
|
|
// doRead - read "length" plaintext bytes from plaintext offset "off" and append
|
|
|
|
// to "dst".
|
2015-11-01 01:32:33 +01:00
|
|
|
// Arguments "length" and "off" do not have to be block-aligned.
|
2015-09-30 22:36:53 +02:00
|
|
|
//
|
2015-11-01 01:32:33 +01:00
|
|
|
// doRead reads the corresponding ciphertext blocks from disk, decrypts them and
|
2015-10-03 13:36:49 +02:00
|
|
|
// returns the requested part of the plaintext.
|
|
|
|
//
|
2015-11-01 01:32:33 +01:00
|
|
|
// Called by Read() for normal reading,
|
2018-07-15 14:14:12 +02:00
|
|
|
// by Write() and Truncate() via doWrite() for Read-Modify-Write.
|
2018-07-22 22:29:22 +02:00
|
|
|
func (f *File) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) {
|
|
|
|
// Get the file ID, either from the open file table, or from disk.
|
|
|
|
var fileID []byte
|
|
|
|
f.fileTableEntry.IDLock.Lock()
|
|
|
|
if f.fileTableEntry.ID != nil {
|
|
|
|
// Use the cached value in the file table
|
|
|
|
fileID = f.fileTableEntry.ID
|
|
|
|
} else {
|
|
|
|
// Not cached, we have to read it from disk.
|
|
|
|
var err error
|
|
|
|
fileID, err = f.readFileID()
|
|
|
|
if err != nil {
|
|
|
|
f.fileTableEntry.IDLock.Unlock()
|
2018-07-15 14:14:12 +02:00
|
|
|
if err == io.EOF {
|
2018-07-22 22:29:22 +02:00
|
|
|
// Empty file
|
2018-07-15 14:14:12 +02:00
|
|
|
return nil, fuse.OK
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
tlog.Warn.Printf("doRead %d: corrupt header: %v", f.qIno.Ino, err)
|
|
|
|
return nil, fuse.EIO
|
|
|
|
}
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
2018-07-22 22:29:22 +02:00
|
|
|
// Save into the file table
|
|
|
|
f.fileTableEntry.ID = fileID
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
2018-07-22 22:29:22 +02:00
|
|
|
f.fileTableEntry.IDLock.Unlock()
|
2018-07-15 14:14:12 +02:00
|
|
|
if fileID == nil {
|
2018-07-22 22:29:22 +02:00
|
|
|
log.Panicf("fileID=%v", fileID)
|
2018-07-15 14:14:12 +02:00
|
|
|
}
|
2015-09-08 00:54:24 +02:00
|
|
|
// Read the backing ciphertext in one go
|
2016-02-06 19:20:54 +01:00
|
|
|
blocks := f.contentEnc.ExplodePlainRange(off, length)
|
2015-11-01 12:11:36 +01:00
|
|
|
alignedOffset, alignedLength := blocks[0].JointCiphertextRange(blocks)
|
|
|
|
skip := blocks[0].Skip
|
2017-10-17 21:47:32 +02:00
|
|
|
tlog.Debug.Printf("doRead: off=%d len=%d -> off=%d len=%d skip=%d\n",
|
|
|
|
off, length, alignedOffset, alignedLength, skip)
|
2017-03-18 16:01:50 +01:00
|
|
|
|
2017-06-30 23:15:31 +02:00
|
|
|
ciphertext := f.fs.contentEnc.CReqPool.Get()
|
|
|
|
ciphertext = ciphertext[:int(alignedLength)]
|
2015-09-08 21:35:06 +02:00
|
|
|
n, err := f.fd.ReadAt(ciphertext, int64(alignedOffset))
|
2015-09-08 00:54:24 +02:00
|
|
|
if err != nil && err != io.EOF {
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Warn.Printf("read: ReadAt: %s", err.Error())
|
2015-09-08 00:54:24 +02:00
|
|
|
return nil, fuse.ToStatus(err)
|
|
|
|
}
|
2017-07-02 15:59:38 +02:00
|
|
|
// The ReadAt came back empty. We can skip all the decryption and return early.
|
|
|
|
if n == 0 {
|
|
|
|
f.fs.contentEnc.CReqPool.Put(ciphertext)
|
|
|
|
return dst, fuse.OK
|
|
|
|
}
|
2015-10-03 13:36:49 +02:00
|
|
|
// Truncate ciphertext buffer down to actually read bytes
|
|
|
|
ciphertext = ciphertext[0:n]
|
2015-10-06 22:27:37 +02:00
|
|
|
|
2015-11-01 12:11:36 +01:00
|
|
|
firstBlockNo := blocks[0].BlockNo
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Debug.Printf("ReadAt offset=%d bytes (%d blocks), want=%d, got=%d", alignedOffset, firstBlockNo, alignedLength, n)
|
2015-09-08 00:54:24 +02:00
|
|
|
|
|
|
|
// Decrypt it
|
2016-11-17 22:29:45 +01:00
|
|
|
plaintext, err := f.contentEnc.DecryptBlocks(ciphertext, firstBlockNo, fileID)
|
2017-06-30 23:15:31 +02:00
|
|
|
f.fs.contentEnc.CReqPool.Put(ciphertext)
|
2015-09-08 00:54:24 +02:00
|
|
|
if err != nil {
|
2017-04-24 00:25:02 +02:00
|
|
|
if f.fs.args.ForceDecode && err == stupidgcm.ErrAuth {
|
|
|
|
// We do not have the information which block was corrupt here anymore,
|
|
|
|
// but DecryptBlocks() has already logged it anyway.
|
2018-04-02 18:32:30 +02:00
|
|
|
tlog.Warn.Printf("doRead %d: off=%d len=%d: returning corrupt data due to forcedecode",
|
2017-05-01 17:26:50 +02:00
|
|
|
f.qIno.Ino, off, length)
|
2017-04-24 00:25:02 +02:00
|
|
|
} else {
|
|
|
|
curruptBlockNo := firstBlockNo + f.contentEnc.PlainOffToBlockNo(uint64(len(plaintext)))
|
2018-04-02 18:32:30 +02:00
|
|
|
tlog.Warn.Printf("doRead %d: corrupt block #%d: %v", f.qIno.Ino, curruptBlockNo, err)
|
2017-04-08 02:09:28 +02:00
|
|
|
return nil, fuse.EIO
|
|
|
|
}
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Crop down to the relevant part
|
|
|
|
var out []byte
|
|
|
|
lenHave := len(plaintext)
|
2015-11-01 12:11:36 +01:00
|
|
|
lenWant := int(skip + length)
|
2015-09-08 00:54:24 +02:00
|
|
|
if lenHave > lenWant {
|
2015-11-01 12:11:36 +01:00
|
|
|
out = plaintext[skip:lenWant]
|
|
|
|
} else if lenHave > int(skip) {
|
2015-09-08 00:54:24 +02:00
|
|
|
out = plaintext[skip:lenHave]
|
2015-09-08 21:35:06 +02:00
|
|
|
}
|
2015-11-01 01:32:33 +01:00
|
|
|
// else: out stays empty, file was smaller than the requested offset
|
2015-09-08 21:35:06 +02:00
|
|
|
|
2017-06-30 23:30:57 +02:00
|
|
|
out = append(dst, out...)
|
|
|
|
f.fs.contentEnc.PReqPool.Put(plaintext)
|
|
|
|
|
|
|
|
return out, fuse.OK
|
2015-09-08 21:35:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read - FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fuse.Status) {
|
2018-04-01 21:21:55 +02:00
|
|
|
if len(buf) > fuse.MAX_KERNEL_WRITE {
|
|
|
|
// This would crash us due to our fixed-size buffer pool
|
|
|
|
tlog.Warn.Printf("Read: rejecting oversized request with EMSGSIZE, len=%d", len(buf))
|
|
|
|
return nil, fuse.Status(syscall.EMSGSIZE)
|
|
|
|
}
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2015-11-01 01:32:33 +01:00
|
|
|
|
2018-07-22 22:29:22 +02:00
|
|
|
f.fileTableEntry.ContentLock.RLock()
|
|
|
|
defer f.fileTableEntry.ContentLock.RUnlock()
|
|
|
|
|
2017-05-01 17:26:50 +02:00
|
|
|
tlog.Debug.Printf("ino%d: FUSE Read: offset=%d length=%d", f.qIno.Ino, len(buf), off)
|
2017-03-18 16:01:50 +01:00
|
|
|
if f.fs.args.SerializeReads {
|
|
|
|
serialize_reads.Wait(off, len(buf))
|
|
|
|
}
|
2018-07-22 22:29:22 +02:00
|
|
|
out, status := f.doRead(buf[:0], uint64(off), uint64(len(buf)))
|
2017-03-18 16:01:50 +01:00
|
|
|
if f.fs.args.SerializeReads {
|
|
|
|
serialize_reads.Done()
|
|
|
|
}
|
2015-09-08 21:35:06 +02:00
|
|
|
if status != fuse.OK {
|
|
|
|
return nil, status
|
|
|
|
}
|
2017-05-01 17:26:50 +02:00
|
|
|
tlog.Debug.Printf("ino%d: Read: status %v, returning %d bytes", f.qIno.Ino, status, len(out))
|
2015-09-08 21:35:06 +02:00
|
|
|
return fuse.ReadResultData(out), status
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2015-11-01 01:32:33 +01:00
|
|
|
// doWrite - encrypt "data" and write it to plaintext offset "off"
|
|
|
|
//
|
|
|
|
// Arguments do not have to be block-aligned, read-modify-write is
|
2016-10-24 19:18:13 +02:00
|
|
|
// performed internally as necessary
|
2015-11-01 01:32:33 +01:00
|
|
|
//
|
|
|
|
// Called by Write() for normal writing,
|
|
|
|
// and by Truncate() to rewrite the last file block.
|
2016-07-01 23:29:31 +02:00
|
|
|
//
|
|
|
|
// Empty writes do nothing and are allowed.
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) doWrite(data []byte, off int64) (uint32, fuse.Status) {
|
2018-07-15 14:14:12 +02:00
|
|
|
fileWasEmpty := false
|
2018-07-22 22:29:22 +02:00
|
|
|
// Get the file ID, create a new one if it does not exist yet.
|
|
|
|
var fileID []byte
|
|
|
|
// The caller has exclusively locked ContentLock, which blocks all other
|
|
|
|
// readers and writers. No need to take IDLock.
|
|
|
|
if f.fileTableEntry.ID != nil {
|
|
|
|
fileID = f.fileTableEntry.ID
|
|
|
|
} else {
|
|
|
|
// If the file ID is not cached, read it from disk
|
|
|
|
var err error
|
|
|
|
fileID, err = f.readFileID()
|
2018-07-15 12:40:23 +02:00
|
|
|
// Write a new file header if the file is empty
|
2015-11-01 01:32:33 +01:00
|
|
|
if err == io.EOF {
|
2018-07-22 22:29:22 +02:00
|
|
|
fileID, err = f.createHeader()
|
2018-07-15 14:14:12 +02:00
|
|
|
fileWasEmpty = true
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return 0, fuse.ToStatus(err)
|
|
|
|
}
|
2018-07-22 22:29:22 +02:00
|
|
|
f.fileTableEntry.ID = fileID
|
2015-11-01 01:32:33 +01:00
|
|
|
}
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
// Handle payload data
|
2015-09-08 00:54:24 +02:00
|
|
|
dataBuf := bytes.NewBuffer(data)
|
2016-02-06 19:20:54 +01:00
|
|
|
blocks := f.contentEnc.ExplodePlainRange(uint64(off), uint64(len(data)))
|
2017-06-01 21:39:47 +02:00
|
|
|
toEncrypt := make([][]byte, len(blocks))
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
for i, b := range blocks {
|
2015-09-08 00:54:24 +02:00
|
|
|
blockData := dataBuf.Next(int(b.Length))
|
|
|
|
// Incomplete block -> Read-Modify-Write
|
|
|
|
if b.IsPartial() {
|
|
|
|
// Read
|
2018-07-22 22:29:22 +02:00
|
|
|
oldData, status := f.doRead(nil, b.BlockPlainOff(), f.contentEnc.PlainBS())
|
2015-09-08 00:54:24 +02:00
|
|
|
if status != fuse.OK {
|
2017-05-01 17:26:50 +02:00
|
|
|
tlog.Warn.Printf("ino%d fh%d: RMW read failed: %s", f.qIno.Ino, f.intFd(), status.String())
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
return 0, status
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
// Modify
|
2016-02-06 19:20:54 +01:00
|
|
|
blockData = f.contentEnc.MergeBlocks(oldData, blockData, int(b.Skip))
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Debug.Printf("len(oldData)=%d len(blockData)=%d", len(oldData), len(blockData))
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Debug.Printf("ino%d: Writing %d bytes to block #%d",
|
2017-05-01 17:26:50 +02:00
|
|
|
f.qIno.Ino, uint64(len(blockData))-f.contentEnc.BlockOverhead(), b.BlockNo)
|
2017-06-01 21:39:47 +02:00
|
|
|
// Write into the to-encrypt list
|
|
|
|
toEncrypt[i] = blockData
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
}
|
2017-06-01 21:39:47 +02:00
|
|
|
// Encrypt all blocks
|
|
|
|
ciphertext := f.contentEnc.EncryptBlocks(toEncrypt, blocks[0].BlockNo, f.fileTableEntry.ID)
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
// Preallocate so we cannot run out of space in the middle of the write.
|
|
|
|
// This prevents partially written (=corrupt) blocks.
|
2016-11-24 22:36:04 +01:00
|
|
|
var err error
|
2017-06-01 21:39:47 +02:00
|
|
|
cOff := int64(blocks[0].BlockCipherOff())
|
2016-11-24 22:36:04 +01:00
|
|
|
if !f.fs.args.NoPrealloc {
|
2017-06-01 21:39:47 +02:00
|
|
|
err = syscallcompat.EnospcPrealloc(int(f.fd.Fd()), cOff, int64(len(ciphertext)))
|
2016-11-24 22:36:04 +01:00
|
|
|
if err != nil {
|
2018-07-15 14:14:12 +02:00
|
|
|
if !syscallcompat.IsENOSPC(err) {
|
|
|
|
tlog.Warn.Printf("ino%d fh%d: doWrite: prealloc failed: %v", f.qIno.Ino, f.intFd(), err)
|
|
|
|
}
|
|
|
|
if fileWasEmpty {
|
|
|
|
// Kill the file header again
|
|
|
|
f.fileTableEntry.ID = nil
|
|
|
|
err2 := syscall.Ftruncate(int(f.fd.Fd()), 0)
|
|
|
|
if err2 != nil {
|
|
|
|
tlog.Warn.Printf("ino%d fh%d: doWrite: rollback failed: %v", f.qIno.Ino, f.intFd(), err2)
|
|
|
|
}
|
|
|
|
}
|
2016-11-24 22:36:04 +01:00
|
|
|
return 0, fuse.ToStatus(err)
|
|
|
|
}
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
}
|
|
|
|
// Write
|
2017-06-01 21:39:47 +02:00
|
|
|
_, err = f.fd.WriteAt(ciphertext, cOff)
|
2017-06-29 22:05:23 +02:00
|
|
|
// Return memory to CReqPool
|
|
|
|
f.fs.contentEnc.CReqPool.Put(ciphertext)
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
if err != nil {
|
2018-10-17 22:16:06 +02:00
|
|
|
tlog.Warn.Printf("ino%d fh%d: doWrite: WriteAt off=%d len=%d failed: %v",
|
|
|
|
f.qIno.Ino, f.intFd(), cOff, len(ciphertext), err)
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
return 0, fuse.ToStatus(err)
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
fusefrontend: coalesce 4kB writes
This improves performance on hdds running ext4, and improves
streaming write performance on hdds running btrfs. Tar extract
slows down on btrfs for some reason.
See https://github.com/rfjakob/gocryptfs/issues/63
Benchmarks:
encfs v1.9.1
============
$ ./benchmark.bash -encfs /mnt/hdd-ext4
Testing EncFS at /mnt/hdd-ext4/benchmark.bash.u0g
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,48354 s, 88,4 MB/s
UNTAR: 20.79
LS: 3.04
RM: 6.62
$ ./benchmark.bash -encfs /mnt/hdd-btrfs
Testing EncFS at /mnt/hdd-btrfs/benchmark.bash.h40
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,52552 s, 85,9 MB/s
UNTAR: 24.51
LS: 2.73
RM: 5.32
gocryptfs v1.1.1-26-g4a7f8ef
============================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.1KG
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,55782 s, 84,1 MB/s
UNTAR: 22.23
LS: 1.47
RM: 4.17
$ ./benchmark.bash /mnt/hdd-btrfs
Testing gocryptfs at /mnt/hdd-btrfs/benchmark.bash.2t8
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 6,87206 s, 19,1 MB/s
UNTAR: 69.87
LS: 1.52
RM: 5.33
gocryptfs v1.1.1-32
===================
$ ./benchmark.bash /mnt/hdd-ext4
Testing gocryptfs at /mnt/hdd-ext4/benchmark.bash.Qt3
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 1,22577 s, 107 MB/s
UNTAR: 23.46
LS: 1.46
RM: 4.67
$ ./benchmark.bash /mnt/hdd-btrfs/
Testing gocryptfs at /mnt/hdd-btrfs//benchmark.bash.XVk
WRITE: 131072000 bytes (131 MB, 125 MiB) copied, 3,68735 s, 35,5 MB/s
UNTAR: 116.87
LS: 1.84
RM: 6.34
2016-11-24 00:03:30 +01:00
|
|
|
return uint32(len(data)), fuse.OK
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2016-10-25 23:57:30 +02:00
|
|
|
// isConsecutiveWrite returns true if the current write
|
|
|
|
// directly (in time and space) follows the last write.
|
|
|
|
// This is an optimisation for streaming writes on NFS where a
|
|
|
|
// Stat() call is very expensive.
|
2016-11-17 20:32:19 +01:00
|
|
|
// The caller must "wlock.lock(f.devIno.ino)" otherwise this check would be racy.
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) isConsecutiveWrite(off int64) bool {
|
2017-05-01 21:57:18 +02:00
|
|
|
opCount := openfiletable.WriteOpCount()
|
2016-10-25 23:57:30 +02:00
|
|
|
return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1
|
|
|
|
}
|
|
|
|
|
2015-10-04 11:39:35 +02:00
|
|
|
// Write - FUSE call
|
2016-07-01 23:29:31 +02:00
|
|
|
//
|
|
|
|
// If the write creates a hole, pads the file to the next block boundary.
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Write(data []byte, off int64) (uint32, fuse.Status) {
|
2018-04-01 21:21:55 +02:00
|
|
|
if len(data) > fuse.MAX_KERNEL_WRITE {
|
|
|
|
// This would crash us due to our fixed-size buffer pool
|
|
|
|
tlog.Warn.Printf("Write: rejecting oversized request with EMSGSIZE, len=%d", len(data))
|
|
|
|
return 0, fuse.Status(syscall.EMSGSIZE)
|
|
|
|
}
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2016-05-30 09:29:30 +02:00
|
|
|
if f.released {
|
2018-07-22 22:29:22 +02:00
|
|
|
// The file descriptor has been closed concurrently
|
2017-05-01 17:26:50 +02:00
|
|
|
tlog.Warn.Printf("ino%d fh%d: Write on released file", f.qIno.Ino, f.intFd())
|
2016-05-08 23:16:40 +02:00
|
|
|
return 0, fuse.EBADF
|
|
|
|
}
|
2017-05-01 21:57:18 +02:00
|
|
|
f.fileTableEntry.ContentLock.Lock()
|
|
|
|
defer f.fileTableEntry.ContentLock.Unlock()
|
2017-05-01 17:26:50 +02:00
|
|
|
tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.qIno.Ino, off, len(data))
|
2016-10-25 22:37:45 +02:00
|
|
|
// If the write creates a file hole, we have to zero-pad the last block.
|
2016-10-25 23:57:30 +02:00
|
|
|
// But if the write directly follows an earlier write, it cannot create a
|
|
|
|
// hole, and we can save one Stat() call.
|
|
|
|
if !f.isConsecutiveWrite(off) {
|
|
|
|
status := f.writePadHole(off)
|
|
|
|
if !status.Ok() {
|
|
|
|
return 0, status
|
|
|
|
}
|
|
|
|
}
|
|
|
|
n, status := f.doWrite(data, off)
|
|
|
|
if status.Ok() {
|
2017-05-01 21:57:18 +02:00
|
|
|
f.lastOpCount = openfiletable.WriteOpCount()
|
2016-10-25 23:57:30 +02:00
|
|
|
f.lastWrittenOffset = off + int64(len(data)) - 1
|
2015-10-04 14:21:07 +02:00
|
|
|
}
|
2016-10-25 23:57:30 +02:00
|
|
|
return n, status
|
2015-10-04 11:39:35 +02:00
|
|
|
}
|
|
|
|
|
2016-01-25 00:51:28 +01:00
|
|
|
// Release - FUSE call, close file
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Release() {
|
2015-11-01 01:32:33 +01:00
|
|
|
f.fdLock.Lock()
|
2016-05-30 09:29:30 +02:00
|
|
|
if f.released {
|
2017-05-01 17:26:50 +02:00
|
|
|
log.Panicf("ino%d fh%d: double release", f.qIno.Ino, f.intFd())
|
2016-05-30 09:29:30 +02:00
|
|
|
}
|
2015-09-08 00:54:24 +02:00
|
|
|
f.fd.Close()
|
2016-05-30 09:29:30 +02:00
|
|
|
f.released = true
|
2016-05-05 13:38:39 +02:00
|
|
|
f.fdLock.Unlock()
|
|
|
|
|
2017-05-01 17:26:50 +02:00
|
|
|
openfiletable.Unregister(f.qIno)
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flush - FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Flush() fuse.Status {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2015-09-08 00:54:24 +02:00
|
|
|
|
|
|
|
// Since Flush() may be called for each dup'd fd, we don't
|
|
|
|
// want to really close the file, we just want to flush. This
|
|
|
|
// is achieved by closing a dup'd fd.
|
|
|
|
newFd, err := syscall.Dup(int(f.fd.Fd()))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fuse.ToStatus(err)
|
|
|
|
}
|
|
|
|
err = syscall.Close(newFd)
|
|
|
|
return fuse.ToStatus(err)
|
|
|
|
}
|
|
|
|
|
2018-07-01 22:00:06 +02:00
|
|
|
// Fsync FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Fsync(flags int) (code fuse.Status) {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2015-09-08 00:54:24 +02:00
|
|
|
|
2016-01-25 00:51:28 +01:00
|
|
|
return fuse.ToStatus(syscall.Fsync(int(f.fd.Fd())))
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2018-07-01 22:00:06 +02:00
|
|
|
// Chmod FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Chmod(mode uint32) fuse.Status {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2015-09-08 00:54:24 +02:00
|
|
|
|
2016-06-26 20:13:21 +02:00
|
|
|
// os.File.Chmod goes through the "syscallMode" translation function that messes
|
|
|
|
// up the suid and sgid bits. So use syscall.Fchmod directly.
|
|
|
|
err := syscall.Fchmod(f.intFd(), mode)
|
|
|
|
return fuse.ToStatus(err)
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2018-07-01 22:00:06 +02:00
|
|
|
// Chown FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Chown(uid uint32, gid uint32) fuse.Status {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2015-09-08 00:54:24 +02:00
|
|
|
|
2016-01-25 00:51:28 +01:00
|
|
|
return fuse.ToStatus(f.fd.Chown(int(uid), int(gid)))
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|
|
|
|
|
2018-07-01 22:00:06 +02:00
|
|
|
// GetAttr FUSE call (like stat)
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) GetAttr(a *fuse.Attr) fuse.Status {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
|
|
|
|
2016-06-15 23:30:44 +02:00
|
|
|
tlog.Debug.Printf("file.GetAttr()")
|
2015-09-08 00:54:24 +02:00
|
|
|
st := syscall.Stat_t{}
|
|
|
|
err := syscall.Fstat(int(f.fd.Fd()), &st)
|
|
|
|
if err != nil {
|
|
|
|
return fuse.ToStatus(err)
|
|
|
|
}
|
|
|
|
a.FromStat(&st)
|
2016-02-06 19:20:54 +01:00
|
|
|
a.Size = f.contentEnc.CipherSizeToPlainSize(a.Size)
|
2017-06-09 21:37:30 +02:00
|
|
|
if f.fs.args.ForceOwner != nil {
|
|
|
|
a.Owner = *f.fs.args.ForceOwner
|
|
|
|
}
|
2015-09-08 00:54:24 +02:00
|
|
|
|
|
|
|
return fuse.OK
|
|
|
|
}
|
|
|
|
|
2018-07-01 22:00:06 +02:00
|
|
|
// Utimens FUSE call
|
2018-07-01 19:10:57 +02:00
|
|
|
func (f *File) Utimens(a *time.Time, m *time.Time) fuse.Status {
|
2016-01-25 00:51:28 +01:00
|
|
|
f.fdLock.RLock()
|
|
|
|
defer f.fdLock.RUnlock()
|
2016-09-25 16:30:29 +02:00
|
|
|
return f.loopbackFile.Utimens(a, m)
|
2015-09-08 00:54:24 +02:00
|
|
|
}
|