core: Add context to DataStores

Closes https://github.com/tus/tusd/issues/288
This commit is contained in:
Marius 2019-09-15 13:43:59 +02:00
parent 819ffb10ab
commit 485c21d72e
17 changed files with 309 additions and 288 deletions

View File

@ -1,6 +1,7 @@
package cli
import (
"context"
"fmt"
"strconv"
@ -23,7 +24,7 @@ type hookDataStore struct {
handler.DataStore
}
func (store hookDataStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
func (store hookDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
if hookErr, ok := err.(hooks.HookError); ok {
return nil, hooks.NewHookError(
@ -34,7 +35,7 @@ func (store hookDataStore) NewUpload(info handler.FileInfo) (handler.Upload, err
}
return nil, fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
}
return store.DataStore.NewUpload(info)
return store.DataStore.NewUpload(ctx, info)
}
func SetupHookMetrics() {

View File

@ -9,6 +9,7 @@
package filestore
import (
"context"
"encoding/json"
"fmt"
"io"
@ -47,7 +48,7 @@ func (store FileStore) UseIn(composer *handler.StoreComposer) {
composer.UseLengthDeferrer(store)
}
func (store FileStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
id := uid.Uid()
binPath := store.binPath(id)
info.ID = id
@ -81,7 +82,7 @@ func (store FileStore) NewUpload(info handler.FileInfo) (handler.Upload, error)
return upload, nil
}
func (store FileStore) GetUpload(id string) (handler.Upload, error) {
func (store FileStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
info := handler.FileInfo{}
data, err := ioutil.ReadFile(store.infoPath(id))
if err != nil {
@ -134,11 +135,11 @@ type fileUpload struct {
binPath string
}
func (upload *fileUpload) GetInfo() (handler.FileInfo, error) {
func (upload *fileUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
return upload.info, nil
}
func (upload *fileUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm)
if err != nil {
return 0, err
@ -160,11 +161,11 @@ func (upload *fileUpload) WriteChunk(offset int64, src io.Reader) (int64, error)
return n, err
}
func (upload *fileUpload) GetReader() (io.Reader, error) {
func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) {
return os.Open(upload.binPath)
}
func (upload *fileUpload) Terminate() error {
func (upload *fileUpload) Terminate(ctx context.Context) error {
if err := os.Remove(upload.infoPath); err != nil {
return err
}
@ -174,7 +175,7 @@ func (upload *fileUpload) Terminate() error {
return nil
}
func (store FileStore) ConcatUploads(dest string, uploads []string) (err error) {
func (store FileStore) ConcatUploads(ctx context.Context, dest string, uploads []string) (err error) {
file, err := os.OpenFile(store.binPath(dest), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
if err != nil {
return err
@ -195,7 +196,7 @@ func (store FileStore) ConcatUploads(dest string, uploads []string) (err error)
return
}
func (upload *fileUpload) DeclareLength(length int64) error {
func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error {
upload.info.Size = length
upload.info.SizeIsDeferred = false
return upload.writeInfo()
@ -210,6 +211,6 @@ func (upload *fileUpload) writeInfo() error {
return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm)
}
func (upload *fileUpload) FinishUpload() error {
func (upload *fileUpload) FinishUpload(ctx context.Context) error {
return nil
}

View File

@ -1,6 +1,7 @@
package filestore
import (
"context"
"io"
"io/ioutil"
"os"
@ -25,9 +26,10 @@ func TestFilestore(t *testing.T) {
a.NoError(err)
store := FileStore{tmp}
ctx := context.Background()
// Create new upload
upload, err := store.NewUpload(handler.FileInfo{
upload, err := store.NewUpload(ctx, handler.FileInfo{
Size: 42,
MetaData: map[string]string{
"hello": "world",
@ -37,7 +39,7 @@ func TestFilestore(t *testing.T) {
a.NotEqual(nil, upload)
// Check info without writing
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
a.NoError(err)
a.EqualValues(42, info.Size)
a.EqualValues(0, info.Offset)
@ -47,18 +49,18 @@ func TestFilestore(t *testing.T) {
a.Equal(filepath.Join(tmp, info.ID), info.Storage["Path"])
// Write data to upload
bytesWritten, err := upload.WriteChunk(0, strings.NewReader("hello world"))
bytesWritten, err := upload.WriteChunk(ctx, 0, strings.NewReader("hello world"))
a.NoError(err)
a.EqualValues(len("hello world"), bytesWritten)
// Check new offset
info, err = upload.GetInfo()
info, err = upload.GetInfo(ctx)
a.NoError(err)
a.EqualValues(42, info.Size)
a.EqualValues(11, info.Offset)
// Read content
reader, err := upload.GetReader()
reader, err := upload.GetReader(ctx)
a.NoError(err)
content, err := ioutil.ReadAll(reader)
@ -67,10 +69,10 @@ func TestFilestore(t *testing.T) {
reader.(io.Closer).Close()
// Terminate upload
a.NoError(store.AsTerminatableUpload(upload).Terminate())
a.NoError(store.AsTerminatableUpload(upload).Terminate(ctx))
// Test if upload is deleted
upload, err = store.GetUpload(info.ID)
upload, err = store.GetUpload(ctx, info.ID)
a.Equal(nil, upload)
a.True(os.IsNotExist(err))
}
@ -79,8 +81,9 @@ func TestMissingPath(t *testing.T) {
a := assert.New(t)
store := FileStore{"./path-that-does-not-exist"}
ctx := context.Background()
upload, err := store.NewUpload(handler.FileInfo{})
upload, err := store.NewUpload(ctx, handler.FileInfo{})
a.Error(err)
a.Equal("upload directory does not exist: ./path-that-does-not-exist", err.Error())
a.Equal(nil, upload)
@ -93,13 +96,14 @@ func TestConcatUploads(t *testing.T) {
a.NoError(err)
store := FileStore{tmp}
ctx := context.Background()
// Create new upload to hold concatenated upload
finUpload, err := store.NewUpload(handler.FileInfo{Size: 9})
finUpload, err := store.NewUpload(ctx, handler.FileInfo{Size: 9})
a.NoError(err)
a.NotEqual(nil, finUpload)
finInfo, err := finUpload.GetInfo()
finInfo, err := finUpload.GetInfo(ctx)
a.NoError(err)
finId := finInfo.ID
@ -111,33 +115,33 @@ func TestConcatUploads(t *testing.T) {
"ghi",
}
for i := 0; i < 3; i++ {
upload, err := store.NewUpload(handler.FileInfo{Size: 3})
upload, err := store.NewUpload(ctx, handler.FileInfo{Size: 3})
a.NoError(err)
n, err := upload.WriteChunk(0, strings.NewReader(contents[i]))
n, err := upload.WriteChunk(ctx, 0, strings.NewReader(contents[i]))
a.NoError(err)
a.EqualValues(3, n)
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
a.NoError(err)
ids[i] = info.ID
}
err = store.ConcatUploads(finId, ids)
err = store.ConcatUploads(ctx, finId, ids)
a.NoError(err)
// Check offset
finUpload, err = store.GetUpload(finId)
finUpload, err = store.GetUpload(ctx, finId)
a.NoError(err)
info, err := finUpload.GetInfo()
info, err := finUpload.GetInfo(ctx)
a.NoError(err)
a.EqualValues(9, info.Size)
a.EqualValues(9, info.Offset)
// Read content
reader, err := finUpload.GetReader()
reader, err := finUpload.GetReader(ctx)
a.NoError(err)
content, err := ioutil.ReadAll(reader)
@ -153,23 +157,24 @@ func TestDeclareLength(t *testing.T) {
a.NoError(err)
store := FileStore{tmp}
ctx := context.Background()
upload, err := store.NewUpload(handler.FileInfo{
upload, err := store.NewUpload(ctx, handler.FileInfo{
Size: 0,
SizeIsDeferred: true,
})
a.NoError(err)
a.NotEqual(nil, upload)
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
a.NoError(err)
a.EqualValues(0, info.Size)
a.Equal(true, info.SizeIsDeferred)
err = store.AsLengthDeclarableUpload(upload).DeclareLength(100)
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, 100)
a.NoError(err)
updatedInfo, err := upload.GetInfo()
updatedInfo, err := upload.GetInfo(ctx)
a.NoError(err)
a.EqualValues(100, updatedInfo.Size)
a.Equal(false, updatedInfo.SizeIsDeferred)

View File

@ -56,7 +56,7 @@ func (store GCSStore) UseIn(composer *handler.StoreComposer) {
composer.UseTerminater(store)
}
func (store GCSStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
func (store GCSStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
if info.ID == "" {
info.ID = uid.Uid()
}
@ -67,7 +67,6 @@ func (store GCSStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
"Key": store.keyWithPrefix(info.ID),
}
ctx := context.Background()
err := store.writeInfo(ctx, store.keyWithPrefix(info.ID), info)
if err != nil {
return &gcsUpload{info.ID, &store}, err
@ -81,7 +80,7 @@ type gcsUpload struct {
store *GCSStore
}
func (store GCSStore) GetUpload(id string) (handler.Upload, error) {
func (store GCSStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
return &gcsUpload{id, &store}, nil
}
@ -89,7 +88,7 @@ func (store GCSStore) AsTerminatableUpload(upload handler.Upload) handler.Termin
return upload.(*gcsUpload)
}
func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
func (upload gcsUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
id := upload.id
store := upload.store
@ -99,7 +98,6 @@ func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
Prefix: prefix,
}
ctx := context.Background()
names, err := store.Service.FilterObjects(ctx, filterParams)
if err != nil {
return 0, err
@ -135,7 +133,7 @@ func (upload gcsUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
const CONCURRENT_SIZE_REQUESTS = 32
func (upload gcsUpload) GetInfo() (handler.FileInfo, error) {
func (upload gcsUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
id := upload.id
store := upload.store
@ -147,7 +145,6 @@ func (upload gcsUpload) GetInfo() (handler.FileInfo, error) {
ID: i,
}
ctx := context.Background()
r, err := store.Service.ReadObject(ctx, params)
if err != nil {
if err == storage.ErrObjectNotExist {
@ -258,7 +255,7 @@ func (store GCSStore) writeInfo(ctx context.Context, id string, info handler.Fil
return nil
}
func (upload gcsUpload) FinishUpload() error {
func (upload gcsUpload) FinishUpload(ctx context.Context) error {
id := upload.id
store := upload.store
@ -268,7 +265,6 @@ func (upload gcsUpload) FinishUpload() error {
Prefix: prefix,
}
ctx := context.Background()
names, err := store.Service.FilterObjects(ctx, filterParams)
if err != nil {
return err
@ -290,7 +286,7 @@ func (upload gcsUpload) FinishUpload() error {
return err
}
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
return err
}
@ -308,7 +304,7 @@ func (upload gcsUpload) FinishUpload() error {
return nil
}
func (upload gcsUpload) Terminate() error {
func (upload gcsUpload) Terminate(ctx context.Context) error {
id := upload.id
store := upload.store
@ -317,7 +313,6 @@ func (upload gcsUpload) Terminate() error {
Prefix: store.keyWithPrefix(id),
}
ctx := context.Background()
err := store.Service.DeleteObjectsWithFilter(ctx, filterParams)
if err != nil {
return err
@ -326,7 +321,7 @@ func (upload gcsUpload) Terminate() error {
return nil
}
func (upload gcsUpload) GetReader() (io.Reader, error) {
func (upload gcsUpload) GetReader(ctx context.Context) (io.Reader, error) {
id := upload.id
store := upload.store
@ -335,7 +330,6 @@ func (upload gcsUpload) GetReader() (io.Reader, error) {
ID: store.keyWithPrefix(id),
}
ctx := context.Background()
r, err := store.Service.ReadObject(ctx, params)
if err != nil {
return nil, err

View File

@ -64,7 +64,7 @@ func TestNewUpload(t *testing.T) {
ctx := context.Background()
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
upload, err := store.NewUpload(mockTusdInfo)
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
assert.Nil(err)
assert.NotNil(upload)
}
@ -99,7 +99,7 @@ func TestNewUploadWithPrefix(t *testing.T) {
ctx := context.Background()
service.EXPECT().WriteObject(ctx, params, r).Return(int64(r.Len()), nil)
upload, err := store.NewUpload(mockTusdInfo)
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
assert.Nil(err)
assert.NotNil(upload)
}
@ -185,10 +185,10 @@ func TestGetInfo(t *testing.T) {
service.EXPECT().WriteObject(ctx, params, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
info, err := upload.GetInfo()
info, err := upload.GetInfo(context.Background())
assert.Nil(err)
assert.Equal(mockTusdInfo, info)
@ -214,10 +214,10 @@ func TestGetInfoNotFound(t *testing.T) {
service.EXPECT().ReadObject(ctx, params).Return(nil, storage.ErrObjectNotExist),
)
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
_, err = upload.GetInfo()
_, err = upload.GetInfo(context.Background())
assert.Equal(handler.ErrNotFound, err)
}
@ -264,10 +264,10 @@ func TestGetReader(t *testing.T) {
ctx := context.Background()
service.EXPECT().ReadObject(ctx, params).Return(r, nil)
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
reader, err := upload.GetReader()
reader, err := upload.GetReader(context.Background())
assert.Nil(err)
buf := make([]byte, len(mockReaderData))
@ -295,10 +295,10 @@ func TestTerminate(t *testing.T) {
ctx := context.Background()
service.EXPECT().DeleteObjectsWithFilter(ctx, filterParams).Return(nil)
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
err = store.AsTerminatableUpload(upload).Terminate()
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
assert.Nil(err)
}
@ -384,10 +384,10 @@ func TestFinishUpload(t *testing.T) {
writeObject := service.EXPECT().WriteObject(ctx, infoParams, infoR).Return(int64(len(offsetInfoData)), nil).After(lastGetObjectSize)
service.EXPECT().SetObjectMetadata(ctx, objectParams, metadata).Return(nil).After(writeObject)
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
err = upload.FinishUpload()
err = upload.FinishUpload(context.Background())
assert.Nil(err)
// Cancel the context to avoid getting an error from `go vet`
@ -463,10 +463,10 @@ func TestWriteChunk(t *testing.T) {
var offset int64
offset = mockSize / 3
upload, err := store.GetUpload(mockID)
upload, err := store.GetUpload(context.Background(), mockID)
assert.Nil(err)
_, err = upload.WriteChunk(offset, reader)
_, err = upload.WriteChunk(context.Background(), offset, reader)
assert.Nil(err)
}

View File

@ -1,6 +1,7 @@
package handler_test
import (
"context"
"net/http"
"strings"
"testing"
@ -37,14 +38,14 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
IsPartial: true,
IsFinal: false,
PartialUploads: nil,
MetaData: make(map[string]string),
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
IsPartial: true,
@ -76,8 +77,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("foo").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
IsPartial: true,
}, nil),
@ -113,26 +114,26 @@ func TestConcat(t *testing.T) {
uploadC := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("a").Return(uploadA, nil),
uploadA.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "a").Return(uploadA, nil),
uploadA.EXPECT().GetInfo(context.Background()).Return(FileInfo{
IsPartial: true,
Size: 5,
Offset: 5,
}, nil),
store.EXPECT().GetUpload("b").Return(uploadB, nil),
uploadB.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "b").Return(uploadB, nil),
uploadB.EXPECT().GetInfo(context.Background()).Return(FileInfo{
IsPartial: true,
Size: 5,
Offset: 5,
}, nil),
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 10,
IsPartial: false,
IsFinal: true,
PartialUploads: []string{"a", "b"},
MetaData: make(map[string]string),
}).Return(uploadC, nil),
uploadC.EXPECT().GetInfo().Return(FileInfo{
uploadC.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 10,
IsPartial: false,
@ -140,7 +141,7 @@ func TestConcat(t *testing.T) {
PartialUploads: []string{"a", "b"},
MetaData: make(map[string]string),
}, nil),
store.EXPECT().ConcatUploads("foo", []string{"a", "b"}).Return(nil),
store.EXPECT().ConcatUploads(context.Background(), "foo", []string{"a", "b"}).Return(nil),
)
handler, _ := NewHandler(Config{
@ -179,8 +180,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("foo").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
IsFinal: true,
PartialUploads: []string{"a", "b"},
@ -217,8 +218,8 @@ func TestConcat(t *testing.T) {
// This upload is still unfinished (mismatching offset and size) and
// will therefore cause the POST request to fail.
gomock.InOrder(
store.EXPECT().GetUpload("c").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "c").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "c",
IsPartial: true,
Size: 5,
@ -247,8 +248,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("huge").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "huge").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "huge",
Size: 1000,
Offset: 1000,
@ -277,8 +278,8 @@ func TestConcat(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("foo").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 10,
Offset: 0,

View File

@ -1,6 +1,7 @@
package handler
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@ -8,10 +9,10 @@ import (
type zeroStore struct{}
func (store zeroStore) NewUpload(info FileInfo) (Upload, error) {
func (store zeroStore) NewUpload(ctx context.Context, info FileInfo) (Upload, error) {
return nil, nil
}
func (store zeroStore) GetUpload(id string) (Upload, error) {
func (store zeroStore) GetUpload(ctx context.Context, id string) (Upload, error) {
return nil, nil
}

View File

@ -55,11 +55,11 @@ type Upload interface {
// It will also lock resources while they are written to ensure only one
// write happens per time.
// The function call must return the number of bytes written.
WriteChunk(offset int64, src io.Reader) (int64, error)
WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error)
// Read the fileinformation used to validate the offset and respond to HEAD
// requests. It may return an os.ErrNotExist which will be interpreted as a
// 404 Not Found.
GetInfo() (FileInfo, error)
GetInfo(ctx context.Context) (FileInfo, error)
// GetReader returns a reader which allows iterating of the content of an
// upload specified by its ID. It should attempt to provide a reader even if
// the upload has not been finished yet but it's not required.
@ -67,7 +67,7 @@ type Upload interface {
// Close() method will be invoked once everything has been read.
// If the given upload could not be found, the error tusd.ErrNotFound should
// be returned.
GetReader() (io.Reader, error)
GetReader(ctx context.Context) (io.Reader, error)
// FinisherDataStore is the interface which can be implemented by DataStores
// which need to do additional operations once an entire upload has been
// completed. These tasks may include but are not limited to freeing unused
@ -75,7 +75,7 @@ type Upload interface {
// interface for removing a temporary object.
// FinishUpload executes additional operations for the finished upload which
// is specified by its ID.
FinishUpload() error
FinishUpload(ctx context.Context) error
}
type DataStore interface {
@ -83,15 +83,15 @@ type DataStore interface {
// return an unique id which is used to identify the upload. If no backend
// (e.g. Riak) specifes the id you may want to use the uid package to
// generate one. The properties Size and MetaData will be filled.
NewUpload(info FileInfo) (upload Upload, err error)
NewUpload(ctx context.Context, info FileInfo) (upload Upload, err error)
GetUpload(id string) (upload Upload, err error)
GetUpload(ctx context.Context, id string) (upload Upload, err error)
}
type TerminatableUpload interface {
// Terminate an upload so any further requests to the resource, both reading
// and writing, must return os.ErrNotExist or similar.
Terminate() error
Terminate(ctx context.Context) error
}
// TerminaterDataStore is the interface which must be implemented by DataStores
@ -111,7 +111,7 @@ type ConcaterDataStore interface {
// destination upload has been created before with enough space to hold all
// partial uploads. The order, in which the partial uploads are supplied,
// must be respected during concatenation.
ConcatUploads(destination string, partialUploads []string) error
ConcatUploads(ctx context.Context, destination string, partialUploads []string) error
}
// LengthDeferrerDataStore is the interface that must be implemented if the
@ -123,7 +123,7 @@ type LengthDeferrerDataStore interface {
}
type LengthDeclarableUpload interface {
DeclareLength(length int64) error
DeclareLength(ctx context.Context, length int64) error
}
// Locker is the interface required for custom lock persisting mechanisms.

View File

@ -1,6 +1,7 @@
package handler_test
import (
"context"
"net/http"
"strings"
"testing"
@ -34,8 +35,8 @@ func TestGet(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
Offset: 5,
Size: 20,
MetaData: map[string]string{
@ -43,7 +44,7 @@ func TestGet(t *testing.T) {
"filetype": "image/jpeg",
},
}, nil),
upload.EXPECT().GetReader().Return(reader, nil),
upload.EXPECT().GetReader(context.Background()).Return(reader, nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -78,8 +79,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
Offset: 0,
}, nil),
)
@ -106,8 +107,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
Offset: 0,
MetaData: map[string]string{
"filetype": "non-a-valid-mime-type",
@ -138,8 +139,8 @@ func TestGet(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
Offset: 0,
MetaData: map[string]string{
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",

View File

@ -5,6 +5,7 @@
package handler_test
import (
context "context"
gomock "github.com/golang/mock/gomock"
handler "github.com/tus/tusd/pkg/handler"
io "io"
@ -35,33 +36,33 @@ func (m *MockFullDataStore) EXPECT() *MockFullDataStoreMockRecorder {
}
// NewUpload mocks base method
func (m *MockFullDataStore) NewUpload(info handler.FileInfo) (handler.Upload, error) {
func (m *MockFullDataStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewUpload", info)
ret := m.ctrl.Call(m, "NewUpload", ctx, info)
ret0, _ := ret[0].(handler.Upload)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NewUpload indicates an expected call of NewUpload
func (mr *MockFullDataStoreMockRecorder) NewUpload(info interface{}) *gomock.Call {
func (mr *MockFullDataStoreMockRecorder) NewUpload(ctx, info interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), info)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewUpload", reflect.TypeOf((*MockFullDataStore)(nil).NewUpload), ctx, info)
}
// GetUpload mocks base method
func (m *MockFullDataStore) GetUpload(id string) (handler.Upload, error) {
func (m *MockFullDataStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetUpload", id)
ret := m.ctrl.Call(m, "GetUpload", ctx, id)
ret0, _ := ret[0].(handler.Upload)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetUpload indicates an expected call of GetUpload
func (mr *MockFullDataStoreMockRecorder) GetUpload(id interface{}) *gomock.Call {
func (mr *MockFullDataStoreMockRecorder) GetUpload(ctx, id interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), id)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUpload", reflect.TypeOf((*MockFullDataStore)(nil).GetUpload), ctx, id)
}
// AsTerminatableUpload mocks base method
@ -79,17 +80,17 @@ func (mr *MockFullDataStoreMockRecorder) AsTerminatableUpload(upload interface{}
}
// ConcatUploads mocks base method
func (m *MockFullDataStore) ConcatUploads(destination string, partialUploads []string) error {
func (m *MockFullDataStore) ConcatUploads(ctx context.Context, destination string, partialUploads []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConcatUploads", destination, partialUploads)
ret := m.ctrl.Call(m, "ConcatUploads", ctx, destination, partialUploads)
ret0, _ := ret[0].(error)
return ret0
}
// ConcatUploads indicates an expected call of ConcatUploads
func (mr *MockFullDataStoreMockRecorder) ConcatUploads(destination, partialUploads interface{}) *gomock.Call {
func (mr *MockFullDataStoreMockRecorder) ConcatUploads(ctx, destination, partialUploads interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullDataStore)(nil).ConcatUploads), destination, partialUploads)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConcatUploads", reflect.TypeOf((*MockFullDataStore)(nil).ConcatUploads), ctx, destination, partialUploads)
}
// AsLengthDeclarableUpload mocks base method
@ -130,90 +131,90 @@ func (m *MockFullUpload) EXPECT() *MockFullUploadMockRecorder {
}
// WriteChunk mocks base method
func (m *MockFullUpload) WriteChunk(offset int64, src io.Reader) (int64, error) {
func (m *MockFullUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteChunk", offset, src)
ret := m.ctrl.Call(m, "WriteChunk", ctx, offset, src)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WriteChunk indicates an expected call of WriteChunk
func (mr *MockFullUploadMockRecorder) WriteChunk(offset, src interface{}) *gomock.Call {
func (mr *MockFullUploadMockRecorder) WriteChunk(ctx, offset, src interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), offset, src)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChunk", reflect.TypeOf((*MockFullUpload)(nil).WriteChunk), ctx, offset, src)
}
// GetInfo mocks base method
func (m *MockFullUpload) GetInfo() (handler.FileInfo, error) {
func (m *MockFullUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetInfo")
ret := m.ctrl.Call(m, "GetInfo", ctx)
ret0, _ := ret[0].(handler.FileInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetInfo indicates an expected call of GetInfo
func (mr *MockFullUploadMockRecorder) GetInfo() *gomock.Call {
func (mr *MockFullUploadMockRecorder) GetInfo(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfo", reflect.TypeOf((*MockFullUpload)(nil).GetInfo), ctx)
}
// GetReader mocks base method
func (m *MockFullUpload) GetReader() (io.Reader, error) {
func (m *MockFullUpload) GetReader(ctx context.Context) (io.Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetReader")
ret := m.ctrl.Call(m, "GetReader", ctx)
ret0, _ := ret[0].(io.Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetReader indicates an expected call of GetReader
func (mr *MockFullUploadMockRecorder) GetReader() *gomock.Call {
func (mr *MockFullUploadMockRecorder) GetReader(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReader", reflect.TypeOf((*MockFullUpload)(nil).GetReader), ctx)
}
// FinishUpload mocks base method
func (m *MockFullUpload) FinishUpload() error {
func (m *MockFullUpload) FinishUpload(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FinishUpload")
ret := m.ctrl.Call(m, "FinishUpload", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// FinishUpload indicates an expected call of FinishUpload
func (mr *MockFullUploadMockRecorder) FinishUpload() *gomock.Call {
func (mr *MockFullUploadMockRecorder) FinishUpload(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishUpload", reflect.TypeOf((*MockFullUpload)(nil).FinishUpload), ctx)
}
// Terminate mocks base method
func (m *MockFullUpload) Terminate() error {
func (m *MockFullUpload) Terminate(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Terminate")
ret := m.ctrl.Call(m, "Terminate", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// Terminate indicates an expected call of Terminate
func (mr *MockFullUploadMockRecorder) Terminate() *gomock.Call {
func (mr *MockFullUploadMockRecorder) Terminate(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockFullUpload)(nil).Terminate), ctx)
}
// DeclareLength mocks base method
func (m *MockFullUpload) DeclareLength(length int64) error {
func (m *MockFullUpload) DeclareLength(ctx context.Context, length int64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeclareLength", length)
ret := m.ctrl.Call(m, "DeclareLength", ctx, length)
ret0, _ := ret[0].(error)
return ret0
}
// DeclareLength indicates an expected call of DeclareLength
func (mr *MockFullUploadMockRecorder) DeclareLength(length interface{}) *gomock.Call {
func (mr *MockFullUploadMockRecorder) DeclareLength(ctx, length interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), length)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareLength", reflect.TypeOf((*MockFullUpload)(nil).DeclareLength), ctx, length)
}
// MockFullLocker is a mock of FullLocker interface

View File

@ -1,6 +1,7 @@
package handler_test
import (
"context"
"net/http"
"os"
"testing"
@ -20,8 +21,8 @@ func TestHead(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
Offset: 11,
Size: 44,
MetaData: map[string]string{
@ -63,7 +64,7 @@ func TestHead(t *testing.T) {
})
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
store.EXPECT().GetUpload("no").Return(nil, os.ErrNotExist)
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
handler, _ := NewHandler(Config{
StoreComposer: composer,
@ -92,8 +93,8 @@ func TestHead(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
SizeIsDeferred: true,
Size: 0,
}, nil),
@ -122,8 +123,8 @@ func TestHead(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
SizeIsDeferred: false,
Size: 10,
}, nil),

View File

@ -1,6 +1,7 @@
package handler_test
import (
"context"
"io"
"io/ioutil"
"net/http"
@ -22,14 +23,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
}, nil),
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -68,14 +69,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
}, nil),
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -105,8 +106,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 20,
Size: 20,
@ -134,7 +135,7 @@ func TestPatch(t *testing.T) {
})
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
store.EXPECT().GetUpload("no").Return(nil, os.ErrNotExist)
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
handler, _ := NewHandler(Config{
StoreComposer: composer,
@ -158,8 +159,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
}, nil),
@ -187,8 +188,8 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 10,
@ -261,14 +262,14 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 20,
}, nil),
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -303,17 +304,17 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
upload.EXPECT().DeclareLength(int64(20)),
upload.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(),
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
upload.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hellothisismore")).Return(int64(15), nil),
upload.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -346,16 +347,16 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 20,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload).Return(upload),
upload.EXPECT().DeclareLength(int64(20)),
upload.EXPECT().FinishUpload(),
upload.EXPECT().DeclareLength(context.Background(), int64(20)),
upload.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -385,26 +386,26 @@ func TestPatch(t *testing.T) {
upload2 := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload1, nil),
upload1.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload1, nil),
upload1.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 5,
Size: 0,
SizeIsDeferred: true,
}, nil),
store.EXPECT().AsLengthDeclarableUpload(upload1).Return(upload1),
upload1.EXPECT().DeclareLength(int64(20)),
upload1.EXPECT().WriteChunk(int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
upload1.EXPECT().DeclareLength(context.Background(), int64(20)),
upload1.EXPECT().WriteChunk(context.Background(), int64(5), NewReaderMatcher("hello")).Return(int64(5), nil),
store.EXPECT().GetUpload("yes").Return(upload2, nil),
upload2.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload2, nil),
upload2.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 10,
Size: 20,
SizeIsDeferred: false,
}, nil),
upload2.EXPECT().WriteChunk(int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
upload2.EXPECT().FinishUpload(),
upload2.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("thisismore")).Return(int64(10), nil),
upload2.EXPECT().FinishUpload(context.Background()),
)
handler, _ := NewHandler(Config{
@ -454,13 +455,13 @@ func TestPatch(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("yes").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 20,
}, nil),
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -491,13 +492,13 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 100,
}, nil),
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
)
handler, _ := NewHandler(Config{
@ -563,15 +564,15 @@ func TestPatch(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().GetUpload("yes").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "yes",
Offset: 0,
Size: 100,
}, nil),
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
upload.EXPECT().Terminate(),
upload.EXPECT().Terminate(context.Background()),
)
handler, _ := NewHandler(Config{

View File

@ -2,6 +2,7 @@ package handler_test
import (
"bytes"
"context"
"net/http"
"strings"
"testing"
@ -19,14 +20,14 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{
"foo": "hello",
"bar": "world",
},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{
@ -72,16 +73,16 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 0,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 0,
MetaData: map[string]string{},
}, nil),
upload.EXPECT().FinishUpload().Return(nil),
upload.EXPECT().FinishUpload(context.Background()).Return(nil),
)
handler, _ := NewHandler(Config{
@ -202,11 +203,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -239,11 +240,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -277,11 +278,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -316,11 +317,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -358,14 +359,14 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{
"foo": "hello",
"bar": "world",
},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{
@ -375,7 +376,7 @@ func TestPost(t *testing.T) {
}, nil),
locker.EXPECT().NewLock("foo").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
upload.EXPECT().WriteChunk(int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("hello")).Return(int64(5), nil),
lock.EXPECT().Unlock().Return(nil),
)
@ -411,11 +412,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},
@ -445,11 +446,11 @@ func TestPost(t *testing.T) {
upload := NewMockFullUpload(ctrl)
gomock.InOrder(
store.EXPECT().NewUpload(FileInfo{
store.EXPECT().NewUpload(context.Background(), FileInfo{
Size: 300,
MetaData: map[string]string{},
}).Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 300,
MetaData: map[string]string{},

View File

@ -1,6 +1,7 @@
package handler_test
import (
"context"
"net/http"
"testing"
@ -39,13 +40,13 @@ func TestTerminate(t *testing.T) {
gomock.InOrder(
locker.EXPECT().NewLock("foo").Return(lock, nil),
lock.EXPECT().Lock().Return(nil),
store.EXPECT().GetUpload("foo").Return(upload, nil),
upload.EXPECT().GetInfo().Return(FileInfo{
store.EXPECT().GetUpload(context.Background(), "foo").Return(upload, nil),
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
ID: "foo",
Size: 10,
}, nil),
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
upload.EXPECT().Terminate().Return(nil),
upload.EXPECT().Terminate(context.Background()).Return(nil),
lock.EXPECT().Unlock().Return(nil),
)

View File

@ -240,6 +240,8 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// PostFile creates a new file upload using the datastore after validating the
// length and parsing the metadata.
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check for presence of application/offset+octet-stream. If another content
// type is defined, it will be ignored and treated as none was set because
// some HTTP clients may enforce a default value for this header.
@ -271,7 +273,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
return
}
size, err = handler.sizeOfUploads(partialUploads)
size, err = handler.sizeOfUploads(ctx, partialUploads)
if err != nil {
handler.sendError(w, r, err)
return
@ -304,13 +306,13 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
PartialUploads: partialUploads,
}
upload, err := handler.composer.Core.NewUpload(info)
upload, err := handler.composer.Core.NewUpload(ctx, info)
if err != nil {
handler.sendError(w, r, err)
return
}
info, err = upload.GetInfo()
info, err = upload.GetInfo(ctx)
if err != nil {
handler.sendError(w, r, err)
return
@ -331,7 +333,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
}
if isFinal {
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
if err := handler.composer.Concater.ConcatUploads(ctx, id, partialUploads); err != nil {
handler.sendError(w, r, err)
return
}
@ -361,7 +363,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
// This statement is in an else-if block to avoid causing duplicate calls
// to finishUploadIfComplete if an upload is empty and contains a chunk.
handler.finishUploadIfComplete(upload, info)
handler.finishUploadIfComplete(ctx, upload, info)
}
handler.sendResp(w, r, http.StatusCreated)
@ -369,6 +371,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
// HeadFile returns the length and offset for the HEAD request
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
@ -386,13 +389,13 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(id)
upload, err := handler.composer.Core.GetUpload(ctx, id)
if err != nil {
handler.sendError(w, r, err)
return
}
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
handler.sendError(w, r, err)
return
@ -432,6 +435,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
// PatchFile adds a chunk to an upload. This operation is only allowed
// if enough space in the upload is left.
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Check for presence of application/offset+octet-stream
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
@ -462,13 +466,13 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(id)
upload, err := handler.composer.Core.GetUpload(ctx, id)
if err != nil {
handler.sendError(w, r, err)
return
}
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
handler.sendError(w, r, err)
return
@ -508,7 +512,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
}
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
if err := lengthDeclarableUpload.DeclareLength(uploadLength); err != nil {
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
handler.sendError(w, r, err)
return
}
@ -529,6 +533,8 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// with the corresponding id. Afterwards, it will set the necessary response
// headers but will not send the response.
func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
// Get Content-Length if possible
length := r.ContentLength
offset := info.Offset
@ -589,9 +595,9 @@ func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.
}
var err error
bytesWritten, err = upload.WriteChunk(offset, reader)
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
if terminateUpload && handler.composer.UsesTerminater {
if terminateErr := handler.terminateUpload(upload, info); terminateErr != nil {
if terminateErr := handler.terminateUpload(ctx, upload, info); terminateErr != nil {
// We only log this error and not show it to the user since this
// termination error is not relevant to the uploading client
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
@ -618,17 +624,17 @@ func (handler *UnroutedHandler) writeChunk(upload Upload, info FileInfo, w http.
handler.Metrics.incBytesReceived(uint64(bytesWritten))
info.Offset = newOffset
return handler.finishUploadIfComplete(upload, info)
return handler.finishUploadIfComplete(ctx, upload, info)
}
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
// matches upload size) and if so, it will call the data store's FinishUpload
// function and send the necessary message on the CompleteUpload channel.
func (handler *UnroutedHandler) finishUploadIfComplete(upload Upload, info FileInfo) error {
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo) error {
// If the upload is completed, ...
if !info.SizeIsDeferred && info.Offset == info.Size {
// ... allow custom mechanism to finish and cleanup the upload
if err := upload.FinishUpload(); err != nil {
if err := upload.FinishUpload(ctx); err != nil {
return err
}
@ -646,6 +652,8 @@ func (handler *UnroutedHandler) finishUploadIfComplete(upload Upload, info FileI
// GetFile handles requests to download a file using a GET request. This is not
// part of the specification.
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id, err := extractIDFromPath(r.URL.Path)
if err != nil {
handler.sendError(w, r, err)
@ -662,13 +670,13 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(id)
upload, err := handler.composer.Core.GetUpload(ctx, id)
if err != nil {
handler.sendError(w, r, err)
return
}
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
handler.sendError(w, r, err)
return
@ -687,7 +695,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
return
}
src, err := upload.GetReader()
src, err := upload.GetReader(ctx)
if err != nil {
handler.sendError(w, r, err)
return
@ -765,6 +773,8 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
// DelFile terminates an upload permanently.
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Abort the request handling if the required interface is not implemented
if !handler.composer.UsesTerminater {
handler.sendError(w, r, ErrNotImplemented)
@ -787,7 +797,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
defer lock.Unlock()
}
upload, err := handler.composer.Core.GetUpload(id)
upload, err := handler.composer.Core.GetUpload(ctx, id)
if err != nil {
handler.sendError(w, r, err)
return
@ -795,14 +805,14 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
var info FileInfo
if handler.config.NotifyTerminatedUploads {
info, err = upload.GetInfo()
info, err = upload.GetInfo(ctx)
if err != nil {
handler.sendError(w, r, err)
return
}
}
err = handler.terminateUpload(upload, info)
err = handler.terminateUpload(ctx, upload, info)
if err != nil {
handler.sendError(w, r, err)
return
@ -816,10 +826,10 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
// and updates the statistics.
// Note the the info argument is only needed if the terminated uploads
// notifications are enabled.
func (handler *UnroutedHandler) terminateUpload(upload Upload, info FileInfo) error {
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo) error {
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
err := terminatableUpload.Terminate()
err := terminatableUpload.Terminate(ctx)
if err != nil {
return err
}
@ -983,14 +993,14 @@ func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto strin
// The get sum of all sizes for a list of upload ids while checking whether
// all of these uploads are finished yet. This is used to calculate the size
// of a final resource.
func (handler *UnroutedHandler) sizeOfUploads(ids []string) (size int64, err error) {
func (handler *UnroutedHandler) sizeOfUploads(ctx context.Context, ids []string) (size int64, err error) {
for _, id := range ids {
upload, err := handler.composer.Core.GetUpload(id)
upload, err := handler.composer.Core.GetUpload(ctx, id)
if err != nil {
return size, err
}
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
return size, err
}

View File

@ -70,6 +70,7 @@ package s3store
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
@ -171,7 +172,7 @@ type s3Upload struct {
store *S3Store
}
func (store S3Store) NewUpload(info handler.FileInfo) (handler.Upload, error) {
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
// an upload larger than MaxObjectSize must throw an error
if info.Size > store.MaxObjectSize {
return nil, fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
@ -221,7 +222,7 @@ func (store S3Store) NewUpload(info handler.FileInfo) (handler.Upload, error) {
return &s3Upload{id, &store}, nil
}
func (store S3Store) GetUpload(id string) (handler.Upload, error) {
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
return &s3Upload{id, &store}, nil
}
@ -250,14 +251,14 @@ func (store S3Store) writeInfo(uploadId string, info handler.FileInfo) error {
return err
}
func (upload s3Upload) WriteChunk(offset int64, src io.Reader) (int64, error) {
func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
// Get the total size of the current upload
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
return 0, err
}
@ -354,7 +355,7 @@ func (upload s3Upload) WriteChunk(offset int64, src io.Reader) (int64, error) {
}
}
func (upload s3Upload) GetInfo() (info handler.FileInfo, err error) {
func (upload s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
id := upload.id
store := upload.store
uploadId, _ := splitIds(id)
@ -411,7 +412,7 @@ func (upload s3Upload) GetInfo() (info handler.FileInfo, err error) {
return
}
func (upload s3Upload) GetReader() (io.Reader, error) {
func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
@ -454,7 +455,7 @@ func (upload s3Upload) GetReader() (io.Reader, error) {
return nil, err
}
func (upload s3Upload) Terminate() error {
func (upload s3Upload) Terminate(ctx context.Context) error {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
@ -519,7 +520,7 @@ func (upload s3Upload) Terminate() error {
return nil
}
func (upload s3Upload) FinishUpload() error {
func (upload s3Upload) FinishUpload(ctx context.Context) error {
id := upload.id
store := upload.store
uploadId, multipartId := splitIds(id)
@ -553,7 +554,7 @@ func (upload s3Upload) FinishUpload() error {
return err
}
func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
func (store S3Store) ConcatUploads(ctx context.Context, dest string, partialUploads []string) error {
uploadId, multipartId := splitIds(dest)
numPartialUploads := len(partialUploads)
@ -590,18 +591,18 @@ func (store S3Store) ConcatUploads(dest string, partialUploads []string) error {
return newMultiError(errs)
}
upload, err := store.GetUpload(dest)
upload, err := store.GetUpload(ctx, dest)
if err != nil {
return err
}
return upload.FinishUpload()
return upload.FinishUpload(ctx)
}
func (upload s3Upload) DeclareLength(length int64) error {
func (upload s3Upload) DeclareLength(ctx context.Context, length int64) error {
id := upload.id
store := upload.store
uploadId, _ := splitIds(id)
info, err := upload.GetInfo()
info, err := upload.GetInfo(ctx)
if err != nil {
return err
}

View File

@ -2,6 +2,7 @@ package s3store
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
@ -66,7 +67,7 @@ func TestNewUpload(t *testing.T) {
},
}
upload, err := store.NewUpload(info)
upload, err := store.NewUpload(context.Background(), info)
assert.Nil(err)
assert.NotNil(upload)
}
@ -114,7 +115,7 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
},
}
upload, err := store.NewUpload(info)
upload, err := store.NewUpload(context.Background(), info)
assert.Nil(err)
assert.NotNil(upload)
}
@ -135,7 +136,7 @@ func TestNewUploadLargerMaxObjectSize(t *testing.T) {
Size: store.MaxObjectSize + 1,
}
upload, err := store.NewUpload(info)
upload, err := store.NewUpload(context.Background(), info)
assert.NotNil(err)
assert.EqualError(err, fmt.Sprintf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize))
assert.Nil(upload)
@ -154,10 +155,10 @@ func TestGetInfoNotFound(t *testing.T) {
Key: aws.String("uploadId.info"),
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
_, err = upload.GetInfo()
_, err = upload.GetInfo(context.Background())
assert.Equal(handler.ErrNotFound, err)
}
@ -211,10 +212,10 @@ func TestGetInfo(t *testing.T) {
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
info, err := upload.GetInfo()
info, err := upload.GetInfo(context.Background())
assert.Nil(err)
assert.Equal(int64(500), info.Size)
assert.Equal(int64(400), info.Offset)
@ -256,10 +257,10 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
}, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
info, err := upload.GetInfo()
info, err := upload.GetInfo(context.Background())
assert.Nil(err)
assert.Equal(int64(10), info.Offset)
assert.Equal("uploadId+multipartId", info.ID)
@ -288,10 +289,10 @@ func TestGetInfoFinished(t *testing.T) {
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
info, err := upload.GetInfo()
info, err := upload.GetInfo(context.Background())
assert.Nil(err)
assert.Equal(int64(500), info.Size)
assert.Equal(int64(500), info.Offset)
@ -312,10 +313,10 @@ func TestGetReader(t *testing.T) {
Body: ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))),
}, nil)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
content, err := upload.GetReader()
content, err := upload.GetReader(context.Background())
assert.Nil(err)
assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
}
@ -341,10 +342,10 @@ func TestGetReaderNotFound(t *testing.T) {
}).Return(nil, awserr.New("NoSuchUpload", "The specified upload does not exist.", nil)),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
content, err := upload.GetReader()
content, err := upload.GetReader(context.Background())
assert.Nil(content)
assert.Equal(handler.ErrNotFound, err)
}
@ -372,10 +373,10 @@ func TestGetReaderNotFinished(t *testing.T) {
}, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
content, err := upload.GetReader()
content, err := upload.GetReader(context.Background())
assert.Nil(content)
assert.Equal("cannot stream non-finished upload", err.Error())
}
@ -415,10 +416,10 @@ func TestDeclareLength(t *testing.T) {
}),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
err = store.AsLengthDeclarableUpload(upload).DeclareLength(500)
err = store.AsLengthDeclarableUpload(upload).DeclareLength(context.Background(), 500)
assert.Nil(err)
}
@ -489,10 +490,10 @@ func TestFinishUpload(t *testing.T) {
}).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
err = upload.FinishUpload()
err = upload.FinishUpload(context.Background())
assert.Nil(err)
}
@ -581,10 +582,10 @@ func TestWriteChunk(t *testing.T) {
})).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(300, bytes.NewReader([]byte("1234567890ABCD")))
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
assert.Nil(err)
assert.Equal(int64(14), bytesRead)
}
@ -663,10 +664,10 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
writer.CloseWithError(io.ErrUnexpectedEOF)
}()
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(300, reader)
bytesRead, err := upload.WriteChunk(context.Background(), 300, reader)
assert.Nil(err)
assert.Equal(int64(14), bytesRead)
}
@ -731,10 +732,10 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
})).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(300, bytes.NewReader([]byte("1234567890")))
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890")))
assert.Nil(err)
assert.Equal(int64(10), bytesRead)
}
@ -804,10 +805,10 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
})).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(3, bytes.NewReader([]byte("45")))
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
assert.Nil(err)
assert.Equal(int64(2), bytesRead)
}
@ -875,10 +876,10 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
})).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
bytesRead, err := upload.WriteChunk(3, bytes.NewReader([]byte("45")))
bytesRead, err := upload.WriteChunk(context.Background(), 3, bytes.NewReader([]byte("45")))
assert.Nil(err)
assert.Equal(int64(2), bytesRead)
}
@ -946,13 +947,13 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
})).Return(nil, nil),
)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
// 10 bytes are missing for the upload to be finished (offset at 490 for 500
// bytes file) but the minimum chunk size is higher (20). The chunk is
// still uploaded since the last part may be smaller than the minimum.
bytesRead, err := upload.WriteChunk(490, bytes.NewReader([]byte("1234567890")))
bytesRead, err := upload.WriteChunk(context.Background(), 490, bytes.NewReader([]byte("1234567890")))
assert.Nil(err)
assert.Equal(int64(10), bytesRead)
}
@ -990,10 +991,10 @@ func TestTerminate(t *testing.T) {
},
}).Return(&s3.DeleteObjectsOutput{}, nil)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
err = store.AsTerminatableUpload(upload).Terminate()
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
assert.Nil(err)
}
@ -1039,10 +1040,10 @@ func TestTerminateWithErrors(t *testing.T) {
},
}, nil)
upload, err := store.GetUpload("uploadId+multipartId")
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
assert.Nil(err)
err = store.AsTerminatableUpload(upload).Terminate()
err = store.AsTerminatableUpload(upload).Terminate(context.Background())
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
}
@ -1124,7 +1125,7 @@ func TestConcatUploads(t *testing.T) {
}).Return(nil, nil),
)
err := store.ConcatUploads("uploadId+multipartId", []string{
err := store.ConcatUploads(context.Background(), "uploadId+multipartId", []string{
"aaa+AAA",
"bbb+BBB",
"ccc+CCC",