Squashed commit of the following:

commit 5a268dbafb9318b888142931ea27a1af10b9a8e7
Author: Marius <maerious@gmail.com>
Date:   Wed Jul 19 11:47:26 2017 +0200

    Remove manual assignment of upload ID in S3Store

commit a37e149090ee7fd5f170d24ccc33b8af9ae18fae
Author: Marius <maerious@gmail.com>
Date:   Wed Jul 19 11:42:00 2017 +0200

    Format Go code

commit 6643a9be62
Author: Markus Kienast <mark@rickkiste.at>
Date:   Sun Jul 16 17:08:24 2017 +0200

    fixed ID value in .info; adjusted tests; fixed assert(expected, received) swap
This commit is contained in:
Markus Kienast 2017-07-19 11:54:26 +02:00 committed by Marius
parent ba0f004df7
commit 028ba57206
2 changed files with 33 additions and 36 deletions

View File

@ -168,25 +168,10 @@ func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
if info.ID == "" { if info.ID == "" {
uploadId = uid.Uid() uploadId = uid.Uid()
} else { } else {
// certain tests set info.ID in advance
uploadId = info.ID uploadId = info.ID
} }
infoJson, err := json.Marshal(info)
if err != nil {
return "", err
}
// Create object on S3 containing information about the file
_, err = store.Service.PutObject(&s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: aws.String(uploadId + ".info"),
Body: bytes.NewReader(infoJson),
ContentLength: aws.Int64(int64(len(infoJson))),
})
if err != nil {
return "", fmt.Errorf("s3store: unable to create info file:\n%s", err)
}
// Convert meta data into a map of pointers for AWS Go SDK, sigh. // Convert meta data into a map of pointers for AWS Go SDK, sigh.
metadata := make(map[string]*string, len(info.MetaData)) metadata := make(map[string]*string, len(info.MetaData))
for key, value := range info.MetaData { for key, value := range info.MetaData {
@ -207,8 +192,25 @@ func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
} }
id = uploadId + "+" + *res.UploadId id = uploadId + "+" + *res.UploadId
info.ID = id
return infoJson, err := json.Marshal(info)
if err != nil {
return "", err
}
// Create object on S3 containing information about the file
_, err = store.Service.PutObject(&s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: aws.String(uploadId + ".info"),
Body: bytes.NewReader(infoJson),
ContentLength: aws.Int64(int64(len(infoJson))),
})
if err != nil {
return "", fmt.Errorf("s3store: unable to create info file:\n%s", err)
}
return id, nil
} }
func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64, error) { func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64, error) {
@ -305,11 +307,6 @@ func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
return info, err return info, err
} }
// The JSON object stored on S3 does not contain the proper upload ID because
// the ID has constructed after the storing happened. Therefore we set it
// manually.
info.ID = id
// Get uploaded parts and their offset // Get uploaded parts and their offset
listPtr, err := store.Service.ListParts(&s3.ListPartsInput{ listPtr, err := store.Service.ListParts(&s3.ListPartsInput{
Bucket: aws.String(store.Bucket), Bucket: aws.String(store.Bucket),

View File

@ -32,19 +32,13 @@ func TestNewUpload(t *testing.T) {
s3obj := NewMockS3API(mockCtrl) s3obj := NewMockS3API(mockCtrl)
store := s3store.New("bucket", s3obj) store := s3store.New("bucket", s3obj)
assert.Equal(store.Bucket, "bucket") assert.Equal("bucket", store.Bucket)
assert.Equal(store.Service, s3obj) assert.Equal(s3obj, store.Service)
s1 := "hello" s1 := "hello"
s2 := "men?" s2 := "men?"
gomock.InOrder( gomock.InOrder(
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
ContentLength: aws.Int64(int64(136)),
}),
s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{ s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
Key: aws.String("uploadId"), Key: aws.String("uploadId"),
@ -55,6 +49,12 @@ func TestNewUpload(t *testing.T) {
}).Return(&s3.CreateMultipartUploadOutput{ }).Return(&s3.CreateMultipartUploadOutput{
UploadId: aws.String("multipartId"), UploadId: aws.String("multipartId"),
}, nil), }, nil),
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
ContentLength: aws.Int64(int64(148)),
}),
) )
info := tusd.FileInfo{ info := tusd.FileInfo{
@ -68,7 +68,7 @@ func TestNewUpload(t *testing.T) {
id, err := store.NewUpload(info) id, err := store.NewUpload(info)
assert.Nil(err) assert.Nil(err)
assert.Equal(id, "uploadId+multipartId") assert.Equal("uploadId+multipartId", id)
} }
func TestGetInfoNotFound(t *testing.T) { func TestGetInfoNotFound(t *testing.T) {
@ -85,7 +85,7 @@ func TestGetInfoNotFound(t *testing.T) {
}).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil)) }).Return(nil, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
_, err := store.GetInfo("uploadId+multipartId") _, err := store.GetInfo("uploadId+multipartId")
assert.Equal(err, tusd.ErrNotFound) assert.Equal(tusd.ErrNotFound, err)
} }
func TestGetInfo(t *testing.T) { func TestGetInfo(t *testing.T) {
@ -101,7 +101,7 @@ func TestGetInfo(t *testing.T) {
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"), Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{ }).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))), Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
}, nil), }, nil),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{ s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
@ -173,7 +173,7 @@ func TestGetReader(t *testing.T) {
content, err := store.GetReader("uploadId+multipartId") content, err := store.GetReader("uploadId+multipartId")
assert.Nil(err) assert.Nil(err)
assert.Equal(content, ioutil.NopCloser(bytes.NewReader([]byte(`hello world`)))) assert.Equal(ioutil.NopCloser(bytes.NewReader([]byte(`hello world`))), content)
} }
func TestGetReaderNotFound(t *testing.T) { func TestGetReaderNotFound(t *testing.T) {
@ -199,7 +199,7 @@ func TestGetReaderNotFound(t *testing.T) {
content, err := store.GetReader("uploadId+multipartId") content, err := store.GetReader("uploadId+multipartId")
assert.Nil(content) assert.Nil(content)
assert.Equal(err, tusd.ErrNotFound) assert.Equal(tusd.ErrNotFound, err)
} }
func TestGetReaderNotFinished(t *testing.T) { func TestGetReaderNotFinished(t *testing.T) {
@ -227,7 +227,7 @@ func TestGetReaderNotFinished(t *testing.T) {
content, err := store.GetReader("uploadId+multipartId") content, err := store.GetReader("uploadId+multipartId")
assert.Nil(content) assert.Nil(content)
assert.Equal(err.Error(), "cannot stream non-finished upload") assert.Equal("cannot stream non-finished upload", err.Error())
} }
func TestFinishUpload(t *testing.T) { func TestFinishUpload(t *testing.T) {