fusefrontend: clamp oversized reads
Our byte cache pools are sized acc. to MAX_KERNEL_WRITE, but the running kernel may have a higher limit set. Clamp to what we can handle. Fixes a panic on a Synology NAS reported at https://github.com/rfjakob/gocryptfs/issues/145
This commit is contained in:
parent
64e5906ffa
commit
3009ec9852
|
@ -194,6 +194,8 @@ func (be *ContentEnc) DecryptBlock(ciphertext []byte, blockNo uint64, fileID []b
|
||||||
const encryptMaxSplit = 2
|
const encryptMaxSplit = 2
|
||||||
|
|
||||||
// EncryptBlocks is like EncryptBlock but takes multiple plaintext blocks.
|
// EncryptBlocks is like EncryptBlock but takes multiple plaintext blocks.
|
||||||
|
// Returns a byte slice from CReqPool - so don't forget to return it
|
||||||
|
// to the pool.
|
||||||
func (be *ContentEnc) EncryptBlocks(plaintextBlocks [][]byte, firstBlockNo uint64, fileID []byte) []byte {
|
func (be *ContentEnc) EncryptBlocks(plaintextBlocks [][]byte, firstBlockNo uint64, fileID []byte) []byte {
|
||||||
ciphertextBlocks := make([][]byte, len(plaintextBlocks))
|
ciphertextBlocks := make([][]byte, len(plaintextBlocks))
|
||||||
// For large writes, we parallelize encryption.
|
// For large writes, we parallelize encryption.
|
||||||
|
|
|
@ -132,6 +132,8 @@ func (f *file) createHeader() (fileID []byte, err error) {
|
||||||
return h.ID, err
|
return h.ID, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var oversizedReadWarn sync.Once
|
||||||
|
|
||||||
// doRead - read "length" plaintext bytes from plaintext offset "off" and append
|
// doRead - read "length" plaintext bytes from plaintext offset "off" and append
|
||||||
// to "dst".
|
// to "dst".
|
||||||
// Arguments "length" and "off" do not have to be block-aligned.
|
// Arguments "length" and "off" do not have to be block-aligned.
|
||||||
|
@ -142,6 +144,16 @@ func (f *file) createHeader() (fileID []byte, err error) {
|
||||||
// Called by Read() for normal reading,
|
// Called by Read() for normal reading,
|
||||||
// by Write() and Truncate() for Read-Modify-Write
|
// by Write() and Truncate() for Read-Modify-Write
|
||||||
func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) {
|
func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) {
|
||||||
|
// Our byte cache pools are sized acc. to MAX_KERNEL_WRITE, but the
|
||||||
|
// running kernel may have a higher limit set. Clamp to what we can
|
||||||
|
// handle.
|
||||||
|
if length > fuse.MAX_KERNEL_WRITE {
|
||||||
|
oversizedReadWarn.Do(func() {
|
||||||
|
tlog.Warn.Printf("doRead: truncating oversized read: %d to %d bytes",
|
||||||
|
length, fuse.MAX_KERNEL_WRITE)
|
||||||
|
})
|
||||||
|
length = fuse.MAX_KERNEL_WRITE
|
||||||
|
}
|
||||||
// Make sure we have the file ID.
|
// Make sure we have the file ID.
|
||||||
f.fileTableEntry.HeaderLock.RLock()
|
f.fileTableEntry.HeaderLock.RLock()
|
||||||
if f.fileTableEntry.ID == nil {
|
if f.fileTableEntry.ID == nil {
|
||||||
|
@ -170,7 +182,8 @@ func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Statu
|
||||||
blocks := f.contentEnc.ExplodePlainRange(off, length)
|
blocks := f.contentEnc.ExplodePlainRange(off, length)
|
||||||
alignedOffset, alignedLength := blocks[0].JointCiphertextRange(blocks)
|
alignedOffset, alignedLength := blocks[0].JointCiphertextRange(blocks)
|
||||||
skip := blocks[0].Skip
|
skip := blocks[0].Skip
|
||||||
tlog.Debug.Printf("JointCiphertextRange(%d, %d) -> %d, %d, %d", off, length, alignedOffset, alignedLength, skip)
|
tlog.Debug.Printf("doRead: off=%d len=%d -> off=%d len=%d skip=%d\n",
|
||||||
|
off, length, alignedOffset, alignedLength, skip)
|
||||||
|
|
||||||
ciphertext := f.fs.contentEnc.CReqPool.Get()
|
ciphertext := f.fs.contentEnc.CReqPool.Get()
|
||||||
ciphertext = ciphertext[:int(alignedLength)]
|
ciphertext = ciphertext[:int(alignedLength)]
|
||||||
|
|
Loading…
Reference in New Issue