cleanup and calulate tg chuck size dynamically

This commit is contained in:
divyam234 2023-12-08 16:34:20 +05:30
parent d40b0a825a
commit 7dda80d8dc
3 changed files with 40 additions and 94 deletions

View file

@ -27,21 +27,11 @@ const (
)
var (
ErrorBadDecryptUTF8 = errors.New("bad decryption - utf-8 invalid")
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
ErrorFileClosed = errors.New("file already closed")
ErrorBadSeek = errors.New("Seek beyond end of file")
)
var (
@ -90,19 +80,12 @@ func NewCipher(password, salt string) (*Cipher, error) {
func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
saltBytes = []byte(salt)
}
var key []byte
if password == "" {
key = make([]byte, keySize)
} else {
key, err = scrypt.Key([]byte(password), saltBytes, 16384, 8, 1, keySize)
if err != nil {
return err
}
saltBytes := []byte(salt)
key, err := scrypt.Key([]byte(password), saltBytes, 16384, 8, 1, keySize)
if err != nil {
return err
}
copy(c.dataKey[:], key)
copy(c.nameKey[:], key[len(c.dataKey):])
copy(c.nameTweak[:], key[len(c.dataKey)+len(c.nameKey):])
@ -133,11 +116,13 @@ func (n *nonce) fromReader(in io.Reader) error {
return nil
}
func (n *nonce) fromBuf(buf []byte) {
func (n *nonce) fromBuf(buf []byte) error {
read := copy((*n)[:], buf)
if read != fileNonceSize {
panic("buffer to short to read nonce")
return errors.New("buffer to short to read nonce")
}
return nil
}
func (n *nonce) carry(i int) {
@ -289,7 +274,10 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
}
fh.nonce.fromBuf(readBuf[fileMagicSize:])
err = fh.nonce.fromBuf(readBuf[fileMagicSize:])
if err != nil {
return nil, err
}
fh.initialNonce = fh.nonce
return fh, nil
}

View file

@ -1,52 +0,0 @@
package pkcs7
import "errors"
var (
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
)
func Pad(n int, buf []byte) []byte {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
padding := n - (length % n)
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
panic("padding failed")
}
return buf
}
func Unpad(n int, buf []byte) ([]byte, error) {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
if length == 0 {
return nil, ErrorPaddingNotFound
}
if (length % n) != 0 {
return nil, ErrorPaddingNotAMultiple
}
padding := int(buf[length-1])
if padding > n {
return nil, ErrorPaddingTooLong
}
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}
}
return buf[:length-padding], nil
}

View file

@ -23,6 +23,16 @@ type tgReader struct {
i int64
}
func calculateChunkSize(start, end int64) int64 {
chunkSize := int64(1024 * 1024)
for chunkSize > 1024 && chunkSize > (end-start) {
chunkSize /= 2
}
return chunkSize
}
func NewTGReader(
ctx context.Context,
client *telegram.Client,
@ -36,7 +46,7 @@ func NewTGReader(
client: client,
start: part.Start,
end: part.End,
chunkSize: int64(1024 * 1024),
chunkSize: calculateChunkSize(part.Start, part.End),
}
r.next = r.partStream()
return r, nil
@ -79,6 +89,7 @@ func (r *tgReader) chunk(offset int64, limit int64) ([]byte, error) {
Offset: offset,
Limit: int(limit),
Location: r.location,
Precise: true,
}
res, err := r.client.API().UploadGetFile(r.ctx, req)
@ -101,13 +112,13 @@ func (r *tgReader) partStream() func() ([]byte, error) {
end := r.end
offset := start - (start % r.chunkSize)
firstPartCut := start - offset
lastPartCut := (end % r.chunkSize) + 1
partCount := int((end - offset + r.chunkSize) / r.chunkSize)
leftCut := start - offset
rightCut := (end % r.chunkSize) + 1
totalParts := int((end - offset + r.chunkSize) / r.chunkSize)
currentPart := 1
readData := func() ([]byte, error) {
if currentPart > partCount {
return func() ([]byte, error) {
if currentPart > totalParts {
return make([]byte, 0), nil
}
res, err := r.chunk(offset, r.chunkSize)
@ -116,17 +127,16 @@ func (r *tgReader) partStream() func() ([]byte, error) {
}
if len(res) == 0 {
return res, nil
} else if partCount == 1 {
res = res[firstPartCut:lastPartCut]
} else if totalParts == 1 {
res = res[leftCut:rightCut]
} else if currentPart == 1 {
res = res[firstPartCut:]
} else if currentPart == partCount {
res = res[:lastPartCut]
res = res[leftCut:]
} else if currentPart == totalParts {
res = res[:rightCut]
}
currentPart++
offset += r.chunkSize
return res, nil
}
return readData
}