core: Add context to DataStores
Closes https://github.com/tus/tusd/issues/288
This commit is contained in:
parent
819ffb10ab
commit
485c21d72e
|
@ -1,6 +1,7 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
@ -23,7 +24,7 @@ type hookDataStore struct {
|
||||||
handler.DataStore
|
handler.DataStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store hookDataStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
func (store hookDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
|
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
|
||||||
if hookErr, ok := err.(hooks.HookError); ok {
|
if hookErr, ok := err.(hooks.HookError); ok {
|
||||||
return nil, hooks.NewHookError(
|
return nil, hooks.NewHookError(
|
||||||
|
@ -34,7 +35,7 @@ func (store hookDataStore) NewUpload(info handler.FileInfo) (handler.Upload, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
|
return nil, fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
return store.DataStore.NewUpload(info)
|
return store.DataStore.NewUpload(ctx, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupHookMetrics() {
|
func SetupHookMetrics() {
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
package filestore
|
package filestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -47,7 +48,7 @@ func (store FileStore) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseLengthDeferrer(store)
|
composer.UseLengthDeferrer(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store FileStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
id := uid.Uid()
|
id := uid.Uid()
|
||||||
binPath := store.binPath(id)
|
binPath := store.binPath(id)
|
||||||
info.ID = id
|
info.ID = id
|
||||||
|
@ -81,7 +82,7 @@ func (store FileStore) NewUpload(info handler.FileInfo) (handler.Upload, error)
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store FileStore) GetUpload(id string) (handler.Upload, error) {
|
func (store FileStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
info := handler.FileInfo{}
|
info := handler.FileInfo{}
|
||||||
data, err := ioutil.ReadFile(store.infoPath(id))
|
data, err := ioutil.ReadFile(store.infoPath(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -134,11 +135,11 @@ type fileUpload struct {
|
||||||
binPath string
|
binPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) GetInfo() (handler.FileInfo, error) {
|
func (upload *fileUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
return upload.info, nil
|
return upload.info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -160,11 +161,11 @@ func (upload *fileUpload) WriteChunk(offset int64, src io.Reader) (int64, error)
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) GetReader() (io.Reader, error) {
|
func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
return os.Open(upload.binPath)
|
return os.Open(upload.binPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) Terminate() error {
|
func (upload *fileUpload) Terminate(ctx context.Context) error {
|
||||||
if err := os.Remove(upload.infoPath); err != nil {
|
if err := os.Remove(upload.infoPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -174,7 +175,7 @@ func (upload *fileUpload) Terminate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store FileStore) ConcatUploads(dest string, uploads []string) (err error) {
|
func (store FileStore) ConcatUploads(ctx context.Context, dest string, uploads []string) (err error) {
|
||||||
file, err := os.OpenFile(store.binPath(dest), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
file, err := os.OpenFile(store.binPath(dest), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -195,7 +196,7 @@ func (store FileStore) ConcatUploads(dest string, uploads []string) (err error)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) DeclareLength(length int64) error {
|
func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
upload.info.Size = length
|
upload.info.Size = length
|
||||||
upload.info.SizeIsDeferred = false
|
upload.info.SizeIsDeferred = false
|
||||||
return upload.writeInfo()
|
return upload.writeInfo()
|
||||||
|
@ -210,6 +211,6 @@ func (upload *fileUpload) writeInfo() error {
|
||||||
return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm)
|
return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *fileUpload) FinishUpload() error {
|
func (upload *fileUpload) FinishUpload(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package filestore
|
package filestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -25,9 +26,10 @@ func TestFilestore(t *testing.T) {
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
store := FileStore{tmp}
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// Create new upload
|
// Create new upload
|
||||||
upload, err := store.NewUpload(handler.FileInfo{
|
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||||
Size: 42,
|
Size: 42,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"hello": "world",
|
"hello": "world",
|
||||||
|
@ -37,7 +39,7 @@ func TestFilestore(t *testing.T) {
|
||||||
a.NotEqual(nil, upload)
|
a.NotEqual(nil, upload)
|
||||||
|
|
||||||
// Check info without writing
|
// Check info without writing
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(42, info.Size)
|
a.EqualValues(42, info.Size)
|
||||||
a.EqualValues(0, info.Offset)
|
a.EqualValues(0, info.Offset)
|
||||||
|
@ -47,18 +49,18 @@ func TestFilestore(t *testing.T) {
|
||||||
a.Equal(filepath.Join(tmp, info.ID), info.Storage["Path"])
|
a.Equal(filepath.Join(tmp, info.ID), info.Storage["Path"])
|
||||||
|
|
||||||
// Write data to upload
|
// Write data to upload
|
||||||
bytesWritten, err := upload.WriteChunk(0, strings.NewReader("hello world"))
|
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(len("hello world"), bytesWritten)
|
a.EqualValues(len("hello world"), bytesWritten)
|
||||||
|
|
||||||
// Check new offset
|
// Check new offset
|
||||||
info, err = upload.GetInfo()
|
info, err = upload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(42, info.Size)
|
a.EqualValues(42, info.Size)
|
||||||
a.EqualValues(11, info.Offset)
|
a.EqualValues(11, info.Offset)
|
||||||
|
|
||||||
// Read content
|
// Read content
|
||||||
reader, err := upload.GetReader()
|
reader, err := upload.GetReader(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(reader)
|
content, err := ioutil.ReadAll(reader)
|
||||||
|
@ -67,10 +69,10 @@ func TestFilestore(t *testing.T) {
|
||||||
reader.(io.Closer).Close()
|
reader.(io.Closer).Close()
|
||||||
|
|
||||||
// Terminate upload
|
// Terminate upload
|
||||||
a.NoError(store.AsTerminatableUpload(upload).Terminate())
|
a.NoError(store.AsTerminatableUpload(upload).Terminate(ctx))
|
||||||
|
|
||||||
// Test if upload is deleted
|
// Test if upload is deleted
|
||||||
upload, err = store.GetUpload(info.ID)
|
upload, err = store.GetUpload(ctx, info.ID)
|
||||||
a.Equal(nil, upload)
|
a.Equal(nil, upload)
|
||||||
a.True(os.IsNotExist(err))
|
a.True(os.IsNotExist(err))
|
||||||
}
|
}
|
||||||
|
@ -79,8 +81,9 @@ func TestMissingPath(t *testing.T) {
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
store := FileStore{"./path-that-does-not-exist"}
|
store := FileStore{"./path-that-does-not-exist"}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
upload, err := store.NewUpload(handler.FileInfo{})
|
upload, err := store.NewUpload(ctx, handler.FileInfo{})
|
||||||
a.Error(err)
|
a.Error(err)
|
||||||
a.Equal("upload directory does not exist: ./path-that-does-not-exist", err.Error())
|
a.Equal("upload directory does not exist: ./path-that-does-not-exist", err.Error())
|
||||||
a.Equal(nil, upload)
|
a.Equal(nil, upload)
|
||||||
|
@ -93,13 +96,14 @@ func TestConcatUploads(t *testing.T) {
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
store := FileStore{tmp}
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// Create new upload to hold concatenated upload
|
// Create new upload to hold concatenated upload
|
||||||
finUpload, err := store.NewUpload(handler.FileInfo{Size: 9})
|
finUpload, err := store.NewUpload(ctx, handler.FileInfo{Size: 9})
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.NotEqual(nil, finUpload)
|
a.NotEqual(nil, finUpload)
|
||||||
|
|
||||||
finInfo, err := finUpload.GetInfo()
|
finInfo, err := finUpload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
finId := finInfo.ID
|
finId := finInfo.ID
|
||||||
|
|
||||||
|
@ -111,33 +115,33 @@ func TestConcatUploads(t *testing.T) {
|
||||||
"ghi",
|
"ghi",
|
||||||
}
|
}
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
upload, err := store.NewUpload(handler.FileInfo{Size: 3})
|
upload, err := store.NewUpload(ctx, handler.FileInfo{Size: 3})
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
n, err := upload.WriteChunk(0, strings.NewReader(contents[i]))
|
n, err := upload.WriteChunk(ctx, 0, strings.NewReader(contents[i]))
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(3, n)
|
a.EqualValues(3, n)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
ids[i] = info.ID
|
ids[i] = info.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
err = store.ConcatUploads(finId, ids)
|
err = store.ConcatUploads(ctx, finId, ids)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
// Check offset
|
// Check offset
|
||||||
finUpload, err = store.GetUpload(finId)
|
finUpload, err = store.GetUpload(ctx, finId)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
info, err := finUpload.GetInfo()
|
info, err := finUpload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(9, info.Size)
|
a.EqualValues(9, info.Size)
|
||||||
a.EqualValues(9, info.Offset)
|
a.EqualValues(9, info.Offset)
|
||||||
|
|
||||||
// Read content
|
// Read content
|
||||||
reader, err := finUpload.GetReader()
|
reader, err := finUpload.GetReader(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(reader)
|
content, err := ioutil.ReadAll(reader)
|
||||||
|
@ -153,23 +157,24 @@ func TestDeclareLength(t *testing.T) {
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
store := FileStore{tmp}
|
store := FileStore{tmp}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
upload, err := store.NewUpload(handler.FileInfo{
|
upload, err := store.NewUpload(ctx, handler.FileInfo{
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
})
|
})
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.NotEqual(nil, upload)
|
a.NotEqual(nil, upload)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(0, info.Size)
|
a.EqualValues(0, info.Size)
|
||||||
a.Equal(true, info.SizeIsDeferred)
|
a.Equal(true, info.SizeIsDeferred)
|
||||||
|
|
||||||
err = store.AsLengthDeclarableUpload(upload).DeclareLength(100)
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, 100)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
|
|
||||||
updatedInfo, err := upload.GetInfo()
|
updatedInfo, err := upload.GetInfo(ctx)
|
||||||
a.NoError(err)
|
a.NoError(err)
|
||||||
a.EqualValues(100, updatedInfo.Size)
|
a.EqualValues(100, updatedInfo.Size)
|
||||||
a.Equal(false, updatedInfo.SizeIsDeferred)
|
a.Equal(false, updatedInfo.SizeIsDeferred)
|
||||||
|
|
|
@ -56,7 +56,7 @@ func (store GCSStore) UseIn(composer *handler.StoreComposer) {
|
||||||
composer.UseTerminater(store)
|
composer.UseTerminater(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
func (store GCSStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
if info.ID == "" {
|
if info.ID == "" {
|
||||||
info.ID = uid.Uid()
|
info.ID = uid.Uid()
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,6 @@ func (store GCSStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
||||||
"Key": store.keyWithPrefix(info.ID),
|
"Key": store.keyWithPrefix(info.ID),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
err := store.writeInfo(ctx, store.keyWithPrefix(info.ID), info)
|
err := store.writeInfo(ctx, store.keyWithPrefix(info.ID), info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &gcsUpload{info.ID, &store}, err
|
return &gcsUpload{info.ID, &store}, err
|
||||||
|
@ -81,7 +80,7 @@ type gcsUpload struct {
|
||||||
store *GCSStore
|
store *GCSStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store GCSStore) GetUpload(id string) (handler.Upload, error) {
|
func (store GCSStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
return &gcsUpload{id, &store}, nil
|
return &gcsUpload{id, &store}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +88,7 @@ func (store GCSStore) AsTerminatableUpload(upload handler.Upload) handler.Termin
|
||||||
return upload.(*gcsUpload)
|
return upload.(*gcsUpload)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
func (upload gcsUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -99,7 +98,6 @@ func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
names, err := store.Service.FilterObjects(ctx, filterParams)
|
names, err := store.Service.FilterObjects(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -135,7 +133,7 @@ func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
||||||
|
|
||||||
const CONCURRENT_SIZE_REQUESTS = 32
|
const CONCURRENT_SIZE_REQUESTS = 32
|
||||||
|
|
||||||
func (upload gcsUpload) GetInfo() (handler.FileInfo, error) {
|
func (upload gcsUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -147,7 +145,6 @@ func (upload gcsUpload) GetInfo() (handler.FileInfo, error) {
|
||||||
ID: i,
|
ID: i,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
r, err := store.Service.ReadObject(ctx, params)
|
r, err := store.Service.ReadObject(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == storage.ErrObjectNotExist {
|
if err == storage.ErrObjectNotExist {
|
||||||
|
@ -258,7 +255,7 @@ func (store GCSStore) writeInfo(ctx context.Context, id string, info handler.Fil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload gcsUpload) FinishUpload() error {
|
func (upload gcsUpload) FinishUpload(ctx context.Context) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -268,7 +265,6 @@ func (upload gcsUpload) FinishUpload() error {
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
names, err := store.Service.FilterObjects(ctx, filterParams)
|
names, err := store.Service.FilterObjects(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -290,7 +286,7 @@ func (upload gcsUpload) FinishUpload() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -308,7 +304,7 @@ func (upload gcsUpload) FinishUpload() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload gcsUpload) Terminate() error {
|
func (upload gcsUpload) Terminate(ctx context.Context) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -317,7 +313,6 @@ func (upload gcsUpload) Terminate() error {
|
||||||
Prefix: store.keyWithPrefix(id),
|
Prefix: store.keyWithPrefix(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
err := store.Service.DeleteObjectsWithFilter(ctx, filterParams)
|
err := store.Service.DeleteObjectsWithFilter(ctx, filterParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -326,7 +321,7 @@ func (upload gcsUpload) Terminate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload gcsUpload) GetReader() (io.Reader, error) {
|
func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
|
@ -335,7 +330,6 @@ func (upload gcsUpload) GetReader() (io.Reader, error) {
|
||||||
ID: store.keyWithPrefix(id),
|
ID: store.keyWithPrefix(id),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
r, err := store.Service.ReadObject(ctx, params)
|
r, err := store.Service.ReadObject(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -64,7 +64,7 @@ func TestNewUpload(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
||||||
|
|
||||||
upload, err := store.NewUpload(mockTusdInfo)
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
@ -99,7 +99,7 @@ func TestNewUploadWithPrefix(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
|
||||||
|
|
||||||
upload, err := store.NewUpload(mockTusdInfo)
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
@ -185,10 +185,10 @@ func TestGetInfo(t *testing.T) {
|
||||||
|
|
||||||
service.EXPECT().WriteObject(ctx, params, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
service.EXPECT().WriteObject(ctx, params, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(mockTusdInfo, info)
|
assert.Equal(mockTusdInfo, info)
|
||||||
|
|
||||||
|
@ -214,10 +214,10 @@ func TestGetInfoNotFound(t *testing.T) {
|
||||||
service.EXPECT().ReadObject(ctx, params).Return(nil, storage.ErrObjectNotExist),
|
service.EXPECT().ReadObject(ctx, params).Return(nil, storage.ErrObjectNotExist),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
_, err = upload.GetInfo()
|
_, err = upload.GetInfo(context.Background())
|
||||||
assert.Equal(handler.ErrNotFound, err)
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,10 +264,10 @@ func TestGetReader(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().ReadObject(ctx, params).Return(r, nil)
|
service.EXPECT().ReadObject(ctx, params).Return(r, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
reader, err := upload.GetReader()
|
reader, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
buf := make([]byte, len(mockReaderData))
|
buf := make([]byte, len(mockReaderData))
|
||||||
|
@ -295,10 +295,10 @@ func TestTerminate(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
service.EXPECT().DeleteObjectsWithFilter(ctx, filterParams).Return(nil)
|
service.EXPECT().DeleteObjectsWithFilter(ctx, filterParams).Return(nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = store.AsTerminatableUpload(upload).Terminate()
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,10 +384,10 @@ func TestFinishUpload(t *testing.T) {
|
||||||
writeObject := service.EXPECT().WriteObject(ctx, infoParams, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
writeObject := service.EXPECT().WriteObject(ctx, infoParams, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
|
||||||
service.EXPECT().SetObjectMetadata(ctx, objectParams, metadata).Return(nil).After(writeObject)
|
service.EXPECT().SetObjectMetadata(ctx, objectParams, metadata).Return(nil).After(writeObject)
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = upload.FinishUpload()
|
err = upload.FinishUpload(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// Cancel the context to avoid getting an error from `go vet`
|
// Cancel the context to avoid getting an error from `go vet`
|
||||||
|
@ -463,10 +463,10 @@ func TestWriteChunk(t *testing.T) {
|
||||||
var offset int64
|
var offset int64
|
||||||
offset = mockSize / 3
|
offset = mockSize / 3
|
||||||
|
|
||||||
upload, err := store.GetUpload(mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
_, err = upload.WriteChunk(offset, reader)
|
_, err = upload.WriteChunk(context.Background(), offset, reader)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -37,14 +38,14 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
IsFinal: false,
|
IsFinal: false,
|
||||||
PartialUploads: nil,
|
PartialUploads: nil,
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
|
@ -76,8 +77,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("foo").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -113,26 +114,26 @@ func TestConcat(t *testing.T) {
|
||||||
uploadC := NewMockFullUpload(ctrl)
|
uploadC := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("a").Return(uploadA, nil),
|
store.EXPECT().GetUpload(context.Background(), "a").Return(uploadA, nil),
|
||||||
uploadA.EXPECT().GetInfo().Return(FileInfo{
|
uploadA.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().GetUpload("b").Return(uploadB, nil),
|
store.EXPECT().GetUpload(context.Background(), "b").Return(uploadB, nil),
|
||||||
uploadB.EXPECT().GetInfo().Return(FileInfo{
|
uploadB.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 10,
|
Size: 10,
|
||||||
IsPartial: false,
|
IsPartial: false,
|
||||||
IsFinal: true,
|
IsFinal: true,
|
||||||
PartialUploads: []string{"a", "b"},
|
PartialUploads: []string{"a", "b"},
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}).Return(uploadC, nil),
|
}).Return(uploadC, nil),
|
||||||
uploadC.EXPECT().GetInfo().Return(FileInfo{
|
uploadC.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
IsPartial: false,
|
IsPartial: false,
|
||||||
|
@ -140,7 +141,7 @@ func TestConcat(t *testing.T) {
|
||||||
PartialUploads: []string{"a", "b"},
|
PartialUploads: []string{"a", "b"},
|
||||||
MetaData: make(map[string]string),
|
MetaData: make(map[string]string),
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().ConcatUploads("foo", []string{"a", "b"}).Return(nil),
|
store.EXPECT().ConcatUploads(context.Background(), "foo", []string{"a", "b"}).Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -179,8 +180,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("foo").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
IsFinal: true,
|
IsFinal: true,
|
||||||
PartialUploads: []string{"a", "b"},
|
PartialUploads: []string{"a", "b"},
|
||||||
|
@ -217,8 +218,8 @@ func TestConcat(t *testing.T) {
|
||||||
// This upload is still unfinished (mismatching offset and size) and
|
// This upload is still unfinished (mismatching offset and size) and
|
||||||
// will therefore cause the POST request to fail.
|
// will therefore cause the POST request to fail.
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("c").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "c").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "c",
|
ID: "c",
|
||||||
IsPartial: true,
|
IsPartial: true,
|
||||||
Size: 5,
|
Size: 5,
|
||||||
|
@ -247,8 +248,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("huge").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "huge").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "huge",
|
ID: "huge",
|
||||||
Size: 1000,
|
Size: 1000,
|
||||||
Offset: 1000,
|
Offset: 1000,
|
||||||
|
@ -277,8 +278,8 @@ func TestConcat(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("foo").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -8,10 +9,10 @@ import (
|
||||||
|
|
||||||
type zeroStore struct{}
|
type zeroStore struct{}
|
||||||
|
|
||||||
func (store zeroStore) NewUpload(info FileInfo) (Upload, error) {
|
func (store zeroStore) NewUpload(ctx context.Context, info FileInfo) (Upload, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
func (store zeroStore) GetUpload(id string) (Upload, error) {
|
func (store zeroStore) GetUpload(ctx context.Context, id string) (Upload, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,11 +55,11 @@ type Upload interface {
|
||||||
// It will also lock resources while they are written to ensure only one
|
// It will also lock resources while they are written to ensure only one
|
||||||
// write happens per time.
|
// write happens per time.
|
||||||
// The function call must return the number of bytes written.
|
// The function call must return the number of bytes written.
|
||||||
WriteChunk(offset int64, src io.Reader) (int64, error)
|
WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error)
|
||||||
// Read the fileinformation used to validate the offset and respond to HEAD
|
// Read the fileinformation used to validate the offset and respond to HEAD
|
||||||
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
// requests. It may return an os.ErrNotExist which will be interpreted as a
|
||||||
// 404 Not Found.
|
// 404 Not Found.
|
||||||
GetInfo() (FileInfo, error)
|
GetInfo(ctx context.Context) (FileInfo, error)
|
||||||
// GetReader returns a reader which allows iterating of the content of an
|
// GetReader returns a reader which allows iterating of the content of an
|
||||||
// upload specified by its ID. It should attempt to provide a reader even if
|
// upload specified by its ID. It should attempt to provide a reader even if
|
||||||
// the upload has not been finished yet but it's not required.
|
// the upload has not been finished yet but it's not required.
|
||||||
|
@ -67,7 +67,7 @@ type Upload interface {
|
||||||
// Close() method will be invoked once everything has been read.
|
// Close() method will be invoked once everything has been read.
|
||||||
// If the given upload could not be found, the error tusd.ErrNotFound should
|
// If the given upload could not be found, the error tusd.ErrNotFound should
|
||||||
// be returned.
|
// be returned.
|
||||||
GetReader() (io.Reader, error)
|
GetReader(ctx context.Context) (io.Reader, error)
|
||||||
// FinisherDataStore is the interface which can be implemented by DataStores
|
// FinisherDataStore is the interface which can be implemented by DataStores
|
||||||
// which need to do additional operations once an entire upload has been
|
// which need to do additional operations once an entire upload has been
|
||||||
// completed. These tasks may include but are not limited to freeing unused
|
// completed. These tasks may include but are not limited to freeing unused
|
||||||
|
@ -75,7 +75,7 @@ type Upload interface {
|
||||||
// interface for removing a temporary object.
|
// interface for removing a temporary object.
|
||||||
// FinishUpload executes additional operations for the finished upload which
|
// FinishUpload executes additional operations for the finished upload which
|
||||||
// is specified by its ID.
|
// is specified by its ID.
|
||||||
FinishUpload() error
|
FinishUpload(ctx context.Context) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataStore interface {
|
type DataStore interface {
|
||||||
|
@ -83,15 +83,15 @@ type DataStore interface {
|
||||||
// return an unique id which is used to identify the upload. If no backend
|
// return an unique id which is used to identify the upload. If no backend
|
||||||
// (e.g. Riak) specifes the id you may want to use the uid package to
|
// (e.g. Riak) specifes the id you may want to use the uid package to
|
||||||
// generate one. The properties Size and MetaData will be filled.
|
// generate one. The properties Size and MetaData will be filled.
|
||||||
NewUpload(info FileInfo) (upload Upload, err error)
|
NewUpload(ctx context.Context, info FileInfo) (upload Upload, err error)
|
||||||
|
|
||||||
GetUpload(id string) (upload Upload, err error)
|
GetUpload(ctx context.Context, id string) (upload Upload, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TerminatableUpload interface {
|
type TerminatableUpload interface {
|
||||||
// Terminate an upload so any further requests to the resource, both reading
|
// Terminate an upload so any further requests to the resource, both reading
|
||||||
// and writing, must return os.ErrNotExist or similar.
|
// and writing, must return os.ErrNotExist or similar.
|
||||||
Terminate() error
|
Terminate(ctx context.Context) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// TerminaterDataStore is the interface which must be implemented by DataStores
|
// TerminaterDataStore is the interface which must be implemented by DataStores
|
||||||
|
@ -111,7 +111,7 @@ type ConcaterDataStore interface {
|
||||||
// destination upload has been created before with enough space to hold all
|
// destination upload has been created before with enough space to hold all
|
||||||
// partial uploads. The order, in which the partial uploads are supplied,
|
// partial uploads. The order, in which the partial uploads are supplied,
|
||||||
// must be respected during concatenation.
|
// must be respected during concatenation.
|
||||||
ConcatUploads(destination string, partialUploads []string) error
|
ConcatUploads(ctx context.Context, destination string, partialUploads []string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// LengthDeferrerDataStore is the interface that must be implemented if the
|
// LengthDeferrerDataStore is the interface that must be implemented if the
|
||||||
|
@ -123,7 +123,7 @@ type LengthDeferrerDataStore interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type LengthDeclarableUpload interface {
|
type LengthDeclarableUpload interface {
|
||||||
DeclareLength(length int64) error
|
DeclareLength(ctx context.Context, length int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locker is the interface required for custom lock persisting mechanisms.
|
// Locker is the interface required for custom lock persisting mechanisms.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -34,8 +35,8 @@ func TestGet(t *testing.T) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock().Return(nil),
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -43,7 +44,7 @@ func TestGet(t *testing.T) {
|
||||||
"filetype": "image/jpeg",
|
"filetype": "image/jpeg",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().GetReader().Return(reader, nil),
|
upload.EXPECT().GetReader(context.Background()).Return(reader, nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -78,8 +79,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
@ -106,8 +107,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"filetype": "non-a-valid-mime-type",
|
"filetype": "non-a-valid-mime-type",
|
||||||
|
@ -138,8 +139,8 @@ func TestGet(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
context "context"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
handler "github.com/tus/tusd/pkg/handler"
|
handler "github.com/tus/tusd/pkg/handler"
|
||||||
io "io"
|
io "io"
|
||||||
|
@ -35,33 +36,33 @@ func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpload mocks base method
|
// NewUpload mocks base method
|
||||||
func (m *MockFullDataStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "NewUpload", info)
|
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
|
||||||
ret0, _ := ret[0].(handler.Upload)
|
ret0, _ := ret[0].(handler.Upload)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpload indicates an expected call of NewUpload
|
// NewUpload indicates an expected call of NewUpload
|
||||||
func (mr *MockFullDataStoreMockRecorder) NewUpload(info interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), info)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUpload mocks base method
|
// GetUpload mocks base method
|
||||||
func (m *MockFullDataStore) GetUpload(id string) (handler.Upload, error) {
|
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetUpload", id)
|
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
|
||||||
ret0, _ := ret[0].(handler.Upload)
|
ret0, _ := ret[0].(handler.Upload)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUpload indicates an expected call of GetUpload
|
// GetUpload indicates an expected call of GetUpload
|
||||||
func (mr *MockFullDataStoreMockRecorder) GetUpload(id interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsTerminatableUpload mocks base method
|
// AsTerminatableUpload mocks base method
|
||||||
|
@ -79,17 +80,17 @@ func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConcatUploads mocks base method
|
// ConcatUploads mocks base method
|
||||||
func (m *MockFullDataStore) ConcatUploads(destination string, partialUploads []string) error {
|
func (m *MockFullDataStore) ConcatUploads(ctx context.Context, destination string, partialUploads []string) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "ConcatUploads", destination, partialUploads)
|
ret := m.ctrl.Call(m, "ConcatUploads", ctx, destination, partialUploads)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConcatUploads indicates an expected call of ConcatUploads
|
// ConcatUploads indicates an expected call of ConcatUploads
|
||||||
func (mr *MockFullDataStoreMockRecorder) ConcatUploads(destination, partialUploads interface{}) *gomock.Call {
|
func (mr *MockFullDataStoreMockRecorder) ConcatUploads(ctx, destination, partialUploads interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullDataStore)(nil).ConcatUploads), destination, partialUploads)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullDataStore)(nil).ConcatUploads), ctx, destination, partialUploads)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsLengthDeclarableUpload mocks base method
|
// AsLengthDeclarableUpload mocks base method
|
||||||
|
@ -130,90 +131,90 @@ func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteChunk mocks base method
|
// WriteChunk mocks base method
|
||||||
func (m *MockFullUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "WriteChunk", offset, src)
|
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(int64)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteChunk indicates an expected call of WriteChunk
|
// WriteChunk indicates an expected call of WriteChunk
|
||||||
func (mr *MockFullUploadMockRecorder) WriteChunk(offset, src interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), offset, src)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInfo mocks base method
|
// GetInfo mocks base method
|
||||||
func (m *MockFullUpload) GetInfo() (handler.FileInfo, error) {
|
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetInfo")
|
ret := m.ctrl.Call(m, "GetInfo", ctx)
|
||||||
ret0, _ := ret[0].(handler.FileInfo)
|
ret0, _ := ret[0].(handler.FileInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInfo indicates an expected call of GetInfo
|
// GetInfo indicates an expected call of GetInfo
|
||||||
func (mr *MockFullUploadMockRecorder) GetInfo() *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader mocks base method
|
// GetReader mocks base method
|
||||||
func (m *MockFullUpload) GetReader() (io.Reader, error) {
|
func (m *MockFullUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetReader")
|
ret := m.ctrl.Call(m, "GetReader", ctx)
|
||||||
ret0, _ := ret[0].(io.Reader)
|
ret0, _ := ret[0].(io.Reader)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader indicates an expected call of GetReader
|
// GetReader indicates an expected call of GetReader
|
||||||
func (mr *MockFullUploadMockRecorder) GetReader() *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinishUpload mocks base method
|
// FinishUpload mocks base method
|
||||||
func (m *MockFullUpload) FinishUpload() error {
|
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "FinishUpload")
|
ret := m.ctrl.Call(m, "FinishUpload", ctx)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinishUpload indicates an expected call of FinishUpload
|
// FinishUpload indicates an expected call of FinishUpload
|
||||||
func (mr *MockFullUploadMockRecorder) FinishUpload() *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate mocks base method
|
// Terminate mocks base method
|
||||||
func (m *MockFullUpload) Terminate() error {
|
func (m *MockFullUpload) Terminate(ctx context.Context) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Terminate")
|
ret := m.ctrl.Call(m, "Terminate", ctx)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate indicates an expected call of Terminate
|
// Terminate indicates an expected call of Terminate
|
||||||
func (mr *MockFullUploadMockRecorder) Terminate() *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeclareLength mocks base method
|
// DeclareLength mocks base method
|
||||||
func (m *MockFullUpload) DeclareLength(length int64) error {
|
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "DeclareLength", length)
|
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeclareLength indicates an expected call of DeclareLength
|
// DeclareLength indicates an expected call of DeclareLength
|
||||||
func (mr *MockFullUploadMockRecorder) DeclareLength(length interface{}) *gomock.Call {
|
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), length)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockFullLocker is a mock of FullLocker interface
|
// MockFullLocker is a mock of FullLocker interface
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -20,8 +21,8 @@ func TestHead(t *testing.T) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock().Return(nil),
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 11,
|
Offset: 11,
|
||||||
Size: 44,
|
Size: 44,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -63,7 +64,7 @@ func TestHead(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload("no").Return(nil, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -92,8 +93,8 @@ func TestHead(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -122,8 +123,8 @@ func TestHead(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -22,14 +23,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
upload.EXPECT().FinishUpload(),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -68,14 +69,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
upload.EXPECT().FinishUpload(),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -105,8 +106,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 20,
|
Offset: 20,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
|
@ -134,7 +135,7 @@ func TestPatch(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload("no").Return(nil, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -158,8 +159,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -187,8 +188,8 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 10,
|
Size: 10,
|
||||||
|
@ -261,14 +262,14 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
upload.EXPECT().FinishUpload(),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -303,17 +304,17 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().DeclareLength(int64(20)),
|
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
|
||||||
upload.EXPECT().FinishUpload(),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -346,16 +347,16 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 20,
|
Offset: 20,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().DeclareLength(int64(20)),
|
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
upload.EXPECT().FinishUpload(),
|
upload.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -385,26 +386,26 @@ func TestPatch(t *testing.T) {
|
||||||
upload2 := NewMockFullUpload(ctrl)
|
upload2 := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload1, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload1, nil),
|
||||||
upload1.EXPECT().GetInfo().Return(FileInfo{
|
upload1.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
SizeIsDeferred: true,
|
SizeIsDeferred: true,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
|
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
|
||||||
upload1.EXPECT().DeclareLength(int64(20)),
|
upload1.EXPECT().DeclareLength(context.Background(), int64(20)),
|
||||||
upload1.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload1.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
|
|
||||||
store.EXPECT().GetUpload("yes").Return(upload2, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload2, nil),
|
||||||
upload2.EXPECT().GetInfo().Return(FileInfo{
|
upload2.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 10,
|
Offset: 10,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
SizeIsDeferred: false,
|
SizeIsDeferred: false,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload2.EXPECT().WriteChunk(int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
upload2.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
|
||||||
upload2.EXPECT().FinishUpload(),
|
upload2.EXPECT().FinishUpload(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -454,13 +455,13 @@ func TestPatch(t *testing.T) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
locker.EXPECT().NewLock("yes").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock().Return(nil),
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 20,
|
Size: 20,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -491,13 +492,13 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -563,15 +564,15 @@ func TestPatch(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload("yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
|
||||||
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().Terminate(),
|
upload.EXPECT().Terminate(context.Background()),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
|
|
@ -2,6 +2,7 @@ package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -19,14 +20,14 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
},
|
},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -72,16 +73,16 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 0,
|
Size: 0,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 0,
|
Size: 0,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().FinishUpload().Return(nil),
|
upload.EXPECT().FinishUpload(context.Background()).Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -202,11 +203,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -239,11 +240,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -277,11 +278,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -316,11 +317,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -358,14 +359,14 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
},
|
},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
|
@ -375,7 +376,7 @@ func TestPost(t *testing.T) {
|
||||||
}, nil),
|
}, nil),
|
||||||
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock().Return(nil),
|
||||||
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -411,11 +412,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
@ -445,11 +446,11 @@ func TestPost(t *testing.T) {
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().NewUpload(FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{},
|
MetaData: map[string]string{},
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package handler_test
|
package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -39,13 +40,13 @@ func TestTerminate(t *testing.T) {
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
locker.EXPECT().NewLock("foo").Return(lock, nil),
|
||||||
lock.EXPECT().Lock().Return(nil),
|
lock.EXPECT().Lock().Return(nil),
|
||||||
store.EXPECT().GetUpload("foo").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo().Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 10,
|
Size: 10,
|
||||||
}, nil),
|
}, nil),
|
||||||
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().Terminate().Return(nil),
|
upload.EXPECT().Terminate(context.Background()).Return(nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -240,6 +240,8 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
// some HTTP clients may enforce a default value for this header.
|
// some HTTP clients may enforce a default value for this header.
|
||||||
|
@ -271,7 +273,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err = handler.sizeOfUploads(partialUploads)
|
size, err = handler.sizeOfUploads(ctx, partialUploads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -304,13 +306,13 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
PartialUploads: partialUploads,
|
PartialUploads: partialUploads,
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.NewUpload(info)
|
upload, err := handler.composer.Core.NewUpload(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err = upload.GetInfo()
|
info, err = upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -331,7 +333,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFinal {
|
if isFinal {
|
||||||
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
|
if err := handler.composer.Concater.ConcatUploads(ctx, id, partialUploads); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -361,7 +363,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
||||||
// This statement is in an else-if block to avoid causing duplicate calls
|
// This statement is in an else-if block to avoid causing duplicate calls
|
||||||
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
||||||
handler.finishUploadIfComplete(upload, info)
|
handler.finishUploadIfComplete(ctx, upload, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
|
@ -369,6 +371,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -386,13 +389,13 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -432,6 +435,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
@ -462,13 +466,13 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -508,7 +512,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
}
|
}
|
||||||
|
|
||||||
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||||
if err := lengthDeclarableUpload.DeclareLength(uploadLength); err != nil {
|
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -529,6 +533,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||||
// with the corresponding id. Afterwards, it will set the necessary response
|
// with the corresponding id. Afterwards, it will set the necessary response
|
||||||
// headers but will not send the response.
|
// headers but will not send the response.
|
||||||
func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Get Content-Length if possible
|
// Get Content-Length if possible
|
||||||
length := r.ContentLength
|
length := r.ContentLength
|
||||||
offset := info.Offset
|
offset := info.Offset
|
||||||
|
@ -589,9 +595,9 @@ func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
bytesWritten, err = upload.WriteChunk(offset, reader)
|
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
||||||
if terminateUpload && handler.composer.UsesTerminater {
|
if terminateUpload && handler.composer.UsesTerminater {
|
||||||
if terminateErr := handler.terminateUpload(upload, info); terminateErr != nil {
|
if terminateErr := handler.terminateUpload(ctx, upload, info); terminateErr != nil {
|
||||||
// We only log this error and not show it to the user since this
|
// We only log this error and not show it to the user since this
|
||||||
// termination error is not relevant to the uploading client
|
// termination error is not relevant to the uploading client
|
||||||
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
||||||
|
@ -618,17 +624,17 @@ func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.
|
||||||
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
||||||
info.Offset = newOffset
|
info.Offset = newOffset
|
||||||
|
|
||||||
return handler.finishUploadIfComplete(upload, info)
|
return handler.finishUploadIfComplete(ctx, upload, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
||||||
// matches upload size) and if so, it will call the data store's FinishUpload
|
// matches upload size) and if so, it will call the data store's FinishUpload
|
||||||
// function and send the necessary message on the CompleteUpload channel.
|
// function and send the necessary message on the CompleteUpload channel.
|
||||||
func (handler *UnroutedHandler) finishUploadIfComplete(upload Upload, info FileInfo) error {
|
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo) error {
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
// ... allow custom mechanism to finish and cleanup the upload
|
// ... allow custom mechanism to finish and cleanup the upload
|
||||||
if err := upload.FinishUpload(); err != nil {
|
if err := upload.FinishUpload(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,6 +652,8 @@ func (handler *UnroutedHandler) finishUploadIfComplete(upload Upload, info FileI
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
|
@ -662,13 +670,13 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -687,7 +695,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
src, err := upload.GetReader()
|
src, err := upload.GetReader(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -765,6 +773,8 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
handler.sendError(w, r, ErrNotImplemented)
|
handler.sendError(w, r, ErrNotImplemented)
|
||||||
|
@ -787,7 +797,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
defer lock.Unlock()
|
defer lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := handler.composer.Core.GetUpload(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -795,14 +805,14 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
var info FileInfo
|
var info FileInfo
|
||||||
if handler.config.NotifyTerminatedUploads {
|
if handler.config.NotifyTerminatedUploads {
|
||||||
info, err = upload.GetInfo()
|
info, err = upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = handler.terminateUpload(upload, info)
|
err = handler.terminateUpload(ctx, upload, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.sendError(w, r, err)
|
handler.sendError(w, r, err)
|
||||||
return
|
return
|
||||||
|
@ -816,10 +826,10 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
||||||
// and updates the statistics.
|
// and updates the statistics.
|
||||||
// Note the the info argument is only needed if the terminated uploads
|
// Note the the info argument is only needed if the terminated uploads
|
||||||
// notifications are enabled.
|
// notifications are enabled.
|
||||||
func (handler *UnroutedHandler) terminateUpload(upload Upload, info FileInfo) error {
|
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo) error {
|
||||||
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
||||||
|
|
||||||
err := terminatableUpload.Terminate()
|
err := terminatableUpload.Terminate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -983,14 +993,14 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
|
||||||
// The get sum of all sizes for a list of upload ids while checking whether
|
// The get sum of all sizes for a list of upload ids while checking whether
|
||||||
// all of these uploads are finished yet. This is used to calculate the size
|
// all of these uploads are finished yet. This is used to calculate the size
|
||||||
// of a final resource.
|
// of a final resource.
|
||||||
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
|
func (handler *UnroutedHandler) sizeOfUploads(ctx context.Context, ids []string) (size int64, err error) {
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
upload, err := handler.composer.Core.GetUpload(id)
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,6 +70,7 @@ package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -171,7 +172,7 @@ type s3Upload struct {
|
||||||
store *S3Store
|
store *S3Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
// an upload larger than MaxObjectSize must throw an error
|
// an upload larger than MaxObjectSize must throw an error
|
||||||
if info.Size > store.MaxObjectSize {
|
if info.Size > store.MaxObjectSize {
|
||||||
return nil, fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
|
return nil, fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
|
||||||
|
@ -221,7 +222,7 @@ func (store S3Store) NewUpload(info handler.FileInfo) (handler.Upload, error) {
|
||||||
return &s3Upload{id, &store}, nil
|
return &s3Upload{id, &store}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) GetUpload(id string) (handler.Upload, error) {
|
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
return &s3Upload{id, &store}, nil
|
return &s3Upload{id, &store}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,14 +251,14 @@ func (store S3Store) writeInfo(uploadId string, info handler.FileInfo) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
|
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
// Get the total size of the current upload
|
// Get the total size of the current upload
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -354,7 +355,7 @@ func (upload s3Upload) WriteChunk(offset int64, src io.Reader) (int64, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) GetInfo() (info handler.FileInfo, err error) {
|
func (upload s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, _ := splitIds(id)
|
uploadId, _ := splitIds(id)
|
||||||
|
@ -411,7 +412,7 @@ func (upload s3Upload) GetInfo() (info handler.FileInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) GetReader() (io.Reader, error) {
|
func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
@ -454,7 +455,7 @@ func (upload s3Upload) GetReader() (io.Reader, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) Terminate() error {
|
func (upload s3Upload) Terminate(ctx context.Context) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
@ -519,7 +520,7 @@ func (upload s3Upload) Terminate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) FinishUpload() error {
|
func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
@ -553,7 +554,7 @@ func (upload s3Upload) FinishUpload() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
|
func (store S3Store) ConcatUploads(ctx context.Context, dest string, partialUploads []string) error {
|
||||||
uploadId, multipartId := splitIds(dest)
|
uploadId, multipartId := splitIds(dest)
|
||||||
|
|
||||||
numPartialUploads := len(partialUploads)
|
numPartialUploads := len(partialUploads)
|
||||||
|
@ -590,18 +591,18 @@ func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
|
||||||
return newMultiError(errs)
|
return newMultiError(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := store.GetUpload(dest)
|
upload, err := store.GetUpload(ctx, dest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return upload.FinishUpload()
|
return upload.FinishUpload(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) DeclareLength(length int64) error {
|
func (upload s3Upload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, _ := splitIds(id)
|
uploadId, _ := splitIds(id)
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package s3store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -66,7 +67,7 @@ func TestNewUpload(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
@ -114,7 +115,7 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
@ -135,7 +136,7 @@ func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
||||||
Size: store.MaxObjectSize + 1,
|
Size: store.MaxObjectSize + 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
upload, err := store.NewUpload(info)
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
assert.NotNil(err)
|
assert.NotNil(err)
|
||||||
assert.EqualError(err, fmt.Sprintf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize))
|
assert.EqualError(err, fmt.Sprintf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize))
|
||||||
assert.Nil(upload)
|
assert.Nil(upload)
|
||||||
|
@ -154,10 +155,10 @@ func TestGetInfoNotFound(t *testing.T) {
|
||||||
Key: aws.String("uploadId.info"),
|
Key: aws.String("uploadId.info"),
|
||||||
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
_, err = upload.GetInfo()
|
_, err = upload.GetInfo(context.Background())
|
||||||
assert.Equal(handler.ErrNotFound, err)
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,10 +212,10 @@ func TestGetInfo(t *testing.T) {
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(500), info.Size)
|
assert.Equal(int64(500), info.Size)
|
||||||
assert.Equal(int64(400), info.Offset)
|
assert.Equal(int64(400), info.Offset)
|
||||||
|
@ -256,10 +257,10 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), info.Offset)
|
assert.Equal(int64(10), info.Offset)
|
||||||
assert.Equal("uploadId+multipartId", info.ID)
|
assert.Equal("uploadId+multipartId", info.ID)
|
||||||
|
@ -288,10 +289,10 @@ func TestGetInfoFinished(t *testing.T) {
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
info, err := upload.GetInfo()
|
info, err := upload.GetInfo(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(500), info.Size)
|
assert.Equal(int64(500), info.Size)
|
||||||
assert.Equal(int64(500), info.Offset)
|
assert.Equal(int64(500), info.Offset)
|
||||||
|
@ -312,10 +313,10 @@ func TestGetReader(t *testing.T) {
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))),
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
content, err := upload.GetReader()
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
|
assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
|
||||||
}
|
}
|
||||||
|
@ -341,10 +342,10 @@ func TestGetReaderNotFound(t *testing.T) {
|
||||||
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
content, err := upload.GetReader()
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(content)
|
assert.Nil(content)
|
||||||
assert.Equal(handler.ErrNotFound, err)
|
assert.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
@ -372,10 +373,10 @@ func TestGetReaderNotFinished(t *testing.T) {
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
content, err := upload.GetReader()
|
content, err := upload.GetReader(context.Background())
|
||||||
assert.Nil(content)
|
assert.Nil(content)
|
||||||
assert.Equal("cannot stream non-finished upload", err.Error())
|
assert.Equal("cannot stream non-finished upload", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -415,10 +416,10 @@ func TestDeclareLength(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = store.AsLengthDeclarableUpload(upload).DeclareLength(500)
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(context.Background(), 500)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -489,10 +490,10 @@ func TestFinishUpload(t *testing.T) {
|
||||||
}).Return(nil, nil),
|
}).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = upload.FinishUpload()
|
err = upload.FinishUpload(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,10 +582,10 @@ func TestWriteChunk(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(300, bytes.NewReader([]byte("1234567890ABCD")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(14), bytesRead)
|
assert.Equal(int64(14), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -663,10 +664,10 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
||||||
writer.CloseWithError(io.ErrUnexpectedEOF)
|
writer.CloseWithError(io.ErrUnexpectedEOF)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(300, reader)
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, reader)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(14), bytesRead)
|
assert.Equal(int64(14), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -731,10 +732,10 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(300, bytes.NewReader([]byte("1234567890")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), bytesRead)
|
assert.Equal(int64(10), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -804,10 +805,10 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(3, bytes.NewReader([]byte("45")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(2), bytesRead)
|
assert.Equal(int64(2), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -875,10 +876,10 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(3, bytes.NewReader([]byte("45")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(2), bytesRead)
|
assert.Equal(int64(2), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -946,13 +947,13 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
})).Return(nil, nil),
|
})).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// 10 bytes are missing for the upload to be finished (offset at 490 for 500
|
// 10 bytes are missing for the upload to be finished (offset at 490 for 500
|
||||||
// bytes file) but the minimum chunk size is higher (20). The chunk is
|
// bytes file) but the minimum chunk size is higher (20). The chunk is
|
||||||
// still uploaded since the last part may be smaller than the minimum.
|
// still uploaded since the last part may be smaller than the minimum.
|
||||||
bytesRead, err := upload.WriteChunk(490, bytes.NewReader([]byte("1234567890")))
|
bytesRead, err := upload.WriteChunk(context.Background(), 490, bytes.NewReader([]byte("1234567890")))
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
assert.Equal(int64(10), bytesRead)
|
assert.Equal(int64(10), bytesRead)
|
||||||
}
|
}
|
||||||
|
@ -990,10 +991,10 @@ func TestTerminate(t *testing.T) {
|
||||||
},
|
},
|
||||||
}).Return(&s3.DeleteObjectsOutput{}, nil)
|
}).Return(&s3.DeleteObjectsOutput{}, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = store.AsTerminatableUpload(upload).Terminate()
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1039,10 +1040,10 @@ func TestTerminateWithErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload("uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
err = store.AsTerminatableUpload(upload).Terminate()
|
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
|
||||||
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1124,7 +1125,7 @@ func TestConcatUploads(t *testing.T) {
|
||||||
}).Return(nil, nil),
|
}).Return(nil, nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := store.ConcatUploads("uploadId+multipartId", []string{
|
err := store.ConcatUploads(context.Background(), "uploadId+multipartId", []string{
|
||||||
"aaa+AAA",
|
"aaa+AAA",
|
||||||
"bbb+BBB",
|
"bbb+BBB",
|
||||||
"ccc+CCC",
|
"ccc+CCC",
|
||||||
|
|
Loading…
Reference in New Issue