add concatenation extension

This commit is contained in:
Acconut 2015-02-17 14:19:56 +01:00
parent 0c16aedc29
commit 93eb701e14
4 changed files with 380 additions and 7 deletions

223
concat_test.go Normal file
View File

@ -0,0 +1,223 @@
package tusd
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
)
type concatPartialStore struct {
t *testing.T
zeroStore
}
func (s concatPartialStore) NewUpload(info FileInfo) (string, error) {
if !info.IsPartial {
s.t.Error("expected upload to be partial")
}
if info.IsFinal {
s.t.Error("expected upload to not be final")
}
if len(info.PartialUploads) != 0 {
s.t.Error("expected no partial uploads")
}
return "foo", nil
}
func (s concatPartialStore) GetInfo(id string) (FileInfo, error) {
return FileInfo{
IsPartial: true,
}, nil
}
func TestConcatPartial(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 400,
BasePath: "files",
DataStore: concatPartialStore{
t: t,
},
})
// Test successful POST request
req, _ := http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Entity-Length", "300")
req.Header.Set("Concat", "partial")
req.Host = "tus.io"
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
t.Errorf("Expected 201 Created (got %v)", w.Code)
}
// Test successful HEAD request
req, _ = http.NewRequest("HEAD", "foo", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Host = "tus.io"
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected 204 No Content (got %v)", w.Code)
}
if w.HeaderMap.Get("Concat") != "partial" {
t.Errorf("Expect Concat header to be set")
}
}
type concatFinalStore struct {
t *testing.T
zeroStore
}
func (s concatFinalStore) NewUpload(info FileInfo) (string, error) {
if info.IsPartial {
s.t.Error("expected upload to not be partial")
}
if !info.IsFinal {
s.t.Error("expected upload to be final")
}
if !reflect.DeepEqual(info.PartialUploads, []string{"a", "b"}) {
s.t.Error("unexpected partial uploads")
}
return "foo", nil
}
func (s concatFinalStore) GetInfo(id string) (FileInfo, error) {
if id == "a" || id == "b" {
return FileInfo{
IsPartial: true,
Size: 5,
Offset: 5,
}, nil
}
if id == "c" {
return FileInfo{
IsPartial: true,
Size: 5,
Offset: 3,
}, nil
}
if id == "foo" {
return FileInfo{
IsFinal: true,
PartialUploads: []string{"a", "b"},
Size: 10,
Offset: 10,
}, nil
}
return FileInfo{}, ErrNotFound
}
func (s concatFinalStore) GetReader(id string) (io.Reader, error) {
if id == "a" {
return strings.NewReader("hello"), nil
}
if id == "b" {
return strings.NewReader("world"), nil
}
return nil, ErrNotFound
}
func (s concatFinalStore) WriteChunk(id string, offset int64, src io.Reader) error {
if id != "foo" {
s.t.Error("unexpected file id")
}
if offset != 0 {
s.t.Error("expected offset to be 0")
}
b, _ := ioutil.ReadAll(src)
if string(b) != "helloworld" {
s.t.Error("unexpected content")
}
return nil
}
func TestConcatFinal(t *testing.T) {
handler, _ := NewHandler(Config{
MaxSize: 400,
BasePath: "files",
DataStore: concatFinalStore{
t: t,
},
})
// Test successful POST request
req, _ := http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Concat", "final; http://tus.io/files/a /files/b/")
req.Host = "tus.io"
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
t.Errorf("Expected 201 Created (got %v)", w.Code)
}
// Test successful HEAD request
req, _ = http.NewRequest("HEAD", "foo", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Host = "tus.io"
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
t.Errorf("Expected 204 No Content (got %v)", w.Code)
}
if w.HeaderMap.Get("Concat") != "final; http://tus.io/files/a http://tus.io/files/b" {
t.Errorf("Expect Concat header to be set")
}
if w.HeaderMap.Get("Entity-Length") != "10" {
t.Errorf("Expect Entity-Length header to be 10")
}
// Test concatenating non finished upload (id: c)
req, _ = http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Concat", "final; http://tus.io/files/c")
req.Host = "tus.io"
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected 201 Created (got %v)", w.Code)
}
// Test exceeding max. size
handler, _ = NewHandler(Config{
MaxSize: 9,
BasePath: "files",
DataStore: concatFinalStore{
t: t,
},
})
req, _ = http.NewRequest("POST", "", nil)
req.Header.Set("TUS-Resumable", "1.0.0")
req.Header.Set("Concat", "final; http://tus.io/files/a /files/b/")
req.Host = "tus.io"
w = httptest.NewRecorder()
handler.ServeHTTP(w, req)
if w.Code != http.StatusRequestEntityTooLarge {
t.Errorf("Expected 201 Created (got %v)", w.Code)
}
}

View File

@ -13,6 +13,16 @@ type FileInfo struct {
// Offset in bytes (zero-based)
Offset int64
MetaData MetaData
// Indicates that this is a partial upload which will later be used to form
// a final upload by concatenation. Partial uploads should not be processed
// when they are finished since they are only incomplete chunks of files.
IsPartial bool
// Indicates that this is a final upload
IsFinal bool
// If the upload is a final one (see IsFinal) this will be a non-empty
// ordered slice containing the ids of the uploads of which the final upload
// will consist after concatenation.
PartialUploads []string
}
type DataStore interface {

View File

@ -8,6 +8,7 @@ import (
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
@ -16,6 +17,8 @@ import (
var logger = log.New(os.Stdout, "[tusd] ", 0)
var reExtractFileId = regexp.MustCompile(`([^/]+)\/?$`)
var (
ErrUnsupportedVersion = errors.New("unsupported version")
ErrMaxSizeExceeded = errors.New("maximum size exceeded")
@ -26,6 +29,9 @@ var (
ErrIllegalOffset = errors.New("illegal offset")
ErrSizeExceeded = errors.New("resource's size exceeded")
ErrNotImplemented = errors.New("feature not implemented")
ErrUploadNotFinished = errors.New("one of the partial uploads is not finished")
ErrInvalidConcat = errors.New("invalid Concat header")
ErrModifyFinal = errors.New("modifying a final upload is not allowed")
)
// HTTP status codes sent in the response when the specific error is returned.
@ -39,6 +45,9 @@ var ErrStatusCodes = map[error]int{
ErrIllegalOffset: http.StatusConflict,
ErrSizeExceeded: http.StatusRequestEntityTooLarge,
ErrNotImplemented: http.StatusNotImplemented,
ErrUploadNotFinished: http.StatusBadRequest,
ErrInvalidConcat: http.StatusBadRequest,
ErrModifyFinal: http.StatusForbidden,
}
type Config struct {
@ -132,7 +141,7 @@ func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
header.Set("TUS-Version", "1.0.0")
header.Set("TUS-Extension", "file-creation,metadata")
header.Set("TUS-Extension", "file-creation,metadata,concatenation")
w.WriteHeader(http.StatusNoContent)
return
@ -153,12 +162,31 @@ func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Create a new file upload using the datastore after validating the length
// and parsing the metadata.
func (handler *Handler) postFile(w http.ResponseWriter, r *http.Request) {
size, err := strconv.ParseInt(r.Header.Get("Entity-Length"), 10, 64)
if err != nil || size < 0 {
handler.sendError(w, ErrInvalidEntityLength)
// Parse Concat header
isPartial, isFinal, partialUploads, err := parseConcat(r.Header.Get("Concat"))
if err != nil {
handler.sendError(w, err)
return
}
// If the upload is a final upload created by concatenation multiple partial
// uploads the size is sum of all sizes of these files (no need for
// Entity-Length header)
var size int64
if isFinal {
size, err = handler.sizeOfUploads(partialUploads)
if err != nil {
handler.sendError(w, err)
return
}
} else {
size, err = strconv.ParseInt(r.Header.Get("Entity-Length"), 10, 64)
if err != nil || size < 0 {
handler.sendError(w, ErrInvalidEntityLength)
return
}
}
// Test whether the size is still allowed
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
handler.sendError(w, ErrMaxSizeExceeded)
@ -169,8 +197,11 @@ func (handler *Handler) postFile(w http.ResponseWriter, r *http.Request) {
meta := parseMeta(r.Header.Get("Metadata"))
info := FileInfo{
Size: size,
MetaData: meta,
Size: size,
MetaData: meta,
IsPartial: isPartial,
IsFinal: isFinal,
PartialUploads: partialUploads,
}
id, err := handler.dataStore.NewUpload(info)
@ -179,6 +210,13 @@ func (handler *Handler) postFile(w http.ResponseWriter, r *http.Request) {
return
}
if isFinal {
if err := handler.fillFinalUpload(id, partialUploads); err != nil {
handler.sendError(w, err)
return
}
}
url := handler.absFileUrl(r, id)
w.Header().Set("Location", url)
w.WriteHeader(http.StatusCreated)
@ -197,6 +235,19 @@ func (handler *Handler) headFile(w http.ResponseWriter, r *http.Request) {
return
}
// Add Concat header if possible
if info.IsPartial {
w.Header().Set("Concat", "partial")
}
if info.IsFinal {
v := "final;"
for _, uploadId := range info.PartialUploads {
v += " " + handler.absFileUrl(r, uploadId)
}
w.Header().Set("Concat", v)
}
w.Header().Set("Entity-Length", strconv.FormatInt(info.Size, 10))
w.Header().Set("Offset", strconv.FormatInt(info.Offset, 10))
w.WriteHeader(http.StatusNoContent)
@ -230,6 +281,12 @@ func (handler *Handler) patchFile(w http.ResponseWriter, r *http.Request) {
return
}
// Modifying a final upload is not allowed
if info.IsFinal {
handler.sendError(w, ErrModifyFinal)
return
}
// Ensure the offsets match
offset, err := strconv.ParseInt(r.Header.Get("Offset"), 10, 64)
if err != nil {
@ -343,6 +400,45 @@ func (handler *Handler) absFileUrl(r *http.Request, id string) string {
return url
}
// The get sum of all sizes for a list of upload ids while checking whether
// all of these uploads are finished yet. This is used to calculate the size
// of a final resource.
func (handler *Handler) sizeOfUploads(ids []string) (size int64, err error) {
for _, id := range ids {
info, err := handler.dataStore.GetInfo(id)
if err != nil {
return size, err
}
if info.Offset != info.Size {
err = ErrUploadNotFinished
return size, err
}
size += info.Size
}
return
}
// Fill an empty upload with the content of the uploads by their ids. The data
// will be written in the order as they appear in the slice
func (handler *Handler) fillFinalUpload(id string, uploads []string) error {
readers := make([]io.Reader, len(uploads))
for index, uploadId := range uploads {
reader, err := handler.dataStore.GetReader(uploadId)
if err != nil {
return err
}
readers[index] = reader
}
reader := io.MultiReader(readers...)
return handler.dataStore.WriteChunk(id, 0, reader)
}
// Parse the meatadata as defined in the Metadata extension.
// e.g. Metadata: key base64value, key2 base64value
func parseMeta(header string) map[string]string {
@ -370,3 +466,47 @@ func parseMeta(header string) map[string]string {
return meta
}
// Parse the Concat header, e.g.
// Concat: partial
// Concat: final; http://tus.io/files/a /files/b/
func parseConcat(header string) (isPartial bool, isFinal bool, partialUploads []string, err error) {
if len(header) == 0 {
return
}
if header == "partial" {
isPartial = true
return
}
l := len("final; ")
if strings.HasPrefix(header, "final; ") && len(header) > l {
isFinal = true
list := strings.Split(header[l:], " ")
for _, value := range list {
value := strings.TrimSpace(value)
if value == "" {
continue
}
// Extract ids out of URL
result := reExtractFileId.FindStringSubmatch(value)
if len(result) != 2 {
err = ErrInvalidConcat
return
}
partialUploads = append(partialUploads, result[1])
}
}
// If no valid partial upload ids are extracted this is not a final upload.
if len(partialUploads) == 0 {
isFinal = false
err = ErrInvalidConcat
}
return
}

View File

@ -20,7 +20,7 @@ func TestOptions(t *testing.T) {
}
headers := map[string]string{
"TUS-Extension": "file-creation,metadata",
"TUS-Extension": "file-creation,metadata,concatenation",
"TUS-Version": "1.0.0",
"TUS-Resumable": "1.0.0",
"TUS-Max-Size": "400",