2021-04-26 10:18:09 +00:00
|
|
|
package s3store
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
)
|
|
|
|
|
|
|
|
// s3PartProducer converts a stream of bytes from the reader into a stream of files on disk
|
|
|
|
type s3PartProducer struct {
|
2021-05-18 08:29:18 +00:00
|
|
|
tmpDir string
|
|
|
|
files chan fileChunk
|
|
|
|
done chan struct{}
|
|
|
|
err error
|
|
|
|
r io.Reader
|
|
|
|
}
|
|
|
|
|
|
|
|
type fileChunk struct {
|
|
|
|
file *os.File
|
|
|
|
size int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func newS3PartProducer(source io.Reader, backlog int64, tmpDir string) (s3PartProducer, <-chan fileChunk) {
|
|
|
|
fileChan := make(chan fileChunk, backlog)
|
|
|
|
doneChan := make(chan struct{})
|
|
|
|
|
|
|
|
partProducer := s3PartProducer{
|
|
|
|
tmpDir: tmpDir,
|
|
|
|
done: doneChan,
|
|
|
|
files: fileChan,
|
|
|
|
r: source,
|
|
|
|
}
|
|
|
|
|
|
|
|
return partProducer, fileChan
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop should always be called by the consumer to ensure that the channels
|
|
|
|
// are properly closed and emptied.
|
|
|
|
func (spp *s3PartProducer) stop() {
|
|
|
|
close(spp.done)
|
|
|
|
|
|
|
|
// If we return while there are still files in the channel, then
|
|
|
|
// we may leak file descriptors. Let's ensure that those are cleaned up.
|
|
|
|
for fileChunk := range spp.files {
|
|
|
|
cleanUpTempFile(fileChunk.file)
|
|
|
|
}
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (spp *s3PartProducer) produce(partSize int64) {
|
2021-05-18 08:29:18 +00:00
|
|
|
outerloop:
|
2021-04-26 10:18:09 +00:00
|
|
|
for {
|
2021-05-18 08:29:18 +00:00
|
|
|
file, ok, err := spp.nextPart(partSize)
|
2021-04-26 10:18:09 +00:00
|
|
|
if err != nil {
|
2021-05-18 08:29:18 +00:00
|
|
|
// An error occured. Stop producing.
|
2021-04-26 10:18:09 +00:00
|
|
|
spp.err = err
|
2021-05-18 08:29:18 +00:00
|
|
|
break
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
2021-05-18 08:29:18 +00:00
|
|
|
if !ok {
|
|
|
|
// The source was fully read. Stop producing.
|
|
|
|
break
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case spp.files <- file:
|
|
|
|
case <-spp.done:
|
2021-05-18 08:29:18 +00:00
|
|
|
// We are told to stop producing. Stop producing.
|
|
|
|
break outerloop
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-18 08:29:18 +00:00
|
|
|
|
|
|
|
close(spp.files)
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
|
2021-05-18 08:29:18 +00:00
|
|
|
func (spp *s3PartProducer) nextPart(size int64) (fileChunk, bool, error) {
|
2021-04-26 10:18:09 +00:00
|
|
|
// Create a temporary file to store the part
|
2021-05-18 08:29:18 +00:00
|
|
|
file, err := ioutil.TempFile(spp.tmpDir, "tusd-s3-tmp-")
|
2021-04-26 10:18:09 +00:00
|
|
|
if err != nil {
|
2021-05-18 08:29:18 +00:00
|
|
|
return fileChunk{}, false, err
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
limitedReader := io.LimitReader(spp.r, size)
|
|
|
|
n, err := io.Copy(file, limitedReader)
|
|
|
|
if err != nil {
|
2021-05-18 08:29:18 +00:00
|
|
|
return fileChunk{}, false, err
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the entire request body is read and no more data is available,
|
|
|
|
// io.Copy returns 0 since it is unable to read any bytes. In that
|
|
|
|
// case, we can close the s3PartProducer.
|
|
|
|
if n == 0 {
|
|
|
|
cleanUpTempFile(file)
|
2021-05-18 08:29:18 +00:00
|
|
|
return fileChunk{}, false, nil
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Seek to the beginning of the file
|
|
|
|
file.Seek(0, 0)
|
|
|
|
|
2021-05-18 08:29:18 +00:00
|
|
|
return fileChunk{
|
|
|
|
file: file,
|
|
|
|
size: n,
|
|
|
|
}, true, nil
|
2021-04-26 10:18:09 +00:00
|
|
|
}
|