2015-02-01 13:57:57 +00:00
|
|
|
package tusd
|
|
|
|
|
|
|
|
import (
|
2015-02-03 18:01:35 +00:00
|
|
|
"encoding/base64"
|
2015-02-01 13:57:57 +00:00
|
|
|
"errors"
|
|
|
|
"io"
|
|
|
|
"log"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
2015-02-17 13:19:56 +00:00
|
|
|
"regexp"
|
2015-02-01 13:57:57 +00:00
|
|
|
"strconv"
|
2015-02-03 18:01:35 +00:00
|
|
|
"strings"
|
2015-02-01 13:57:57 +00:00
|
|
|
|
|
|
|
"github.com/bmizerany/pat"
|
|
|
|
)
|
|
|
|
|
|
|
|
var logger = log.New(os.Stdout, "[tusd] ", 0)
|
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
var reExtractFileId = regexp.MustCompile(`([^/]+)\/?$`)
|
|
|
|
|
2015-02-01 13:57:57 +00:00
|
|
|
var (
|
|
|
|
ErrUnsupportedVersion = errors.New("unsupported version")
|
|
|
|
ErrMaxSizeExceeded = errors.New("maximum size exceeded")
|
|
|
|
ErrInvalidEntityLength = errors.New("missing or invalid Entity-Length header")
|
|
|
|
ErrInvalidOffset = errors.New("missing or invalid Offset header")
|
|
|
|
ErrNotFound = errors.New("upload not found")
|
|
|
|
ErrFileLocked = errors.New("file currently locked")
|
|
|
|
ErrIllegalOffset = errors.New("illegal offset")
|
|
|
|
ErrSizeExceeded = errors.New("resource's size exceeded")
|
2015-02-06 21:05:33 +00:00
|
|
|
ErrNotImplemented = errors.New("feature not implemented")
|
2015-02-17 13:19:56 +00:00
|
|
|
ErrUploadNotFinished = errors.New("one of the partial uploads is not finished")
|
|
|
|
ErrInvalidConcat = errors.New("invalid Concat header")
|
|
|
|
ErrModifyFinal = errors.New("modifying a final upload is not allowed")
|
2015-02-01 13:57:57 +00:00
|
|
|
)
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// HTTP status codes sent in the response when the specific error is returned.
|
2015-02-01 13:57:57 +00:00
|
|
|
var ErrStatusCodes = map[error]int{
|
|
|
|
ErrUnsupportedVersion: http.StatusPreconditionFailed,
|
|
|
|
ErrMaxSizeExceeded: http.StatusRequestEntityTooLarge,
|
|
|
|
ErrInvalidEntityLength: http.StatusBadRequest,
|
|
|
|
ErrInvalidOffset: http.StatusBadRequest,
|
|
|
|
ErrNotFound: http.StatusNotFound,
|
|
|
|
ErrFileLocked: 423, // Locked (WebDAV) (RFC 4918)
|
|
|
|
ErrIllegalOffset: http.StatusConflict,
|
|
|
|
ErrSizeExceeded: http.StatusRequestEntityTooLarge,
|
2015-02-06 21:05:33 +00:00
|
|
|
ErrNotImplemented: http.StatusNotImplemented,
|
2015-02-17 13:19:56 +00:00
|
|
|
ErrUploadNotFinished: http.StatusBadRequest,
|
|
|
|
ErrInvalidConcat: http.StatusBadRequest,
|
|
|
|
ErrModifyFinal: http.StatusForbidden,
|
2015-02-01 13:57:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Config struct {
|
2015-02-01 15:17:56 +00:00
|
|
|
// DataStore implementation used to store and retrieve the single uploads.
|
|
|
|
// Must no be nil.
|
2015-02-01 13:57:57 +00:00
|
|
|
DataStore DataStore
|
|
|
|
// MaxSize defines how many bytes may be stored in one single upload. If its
|
|
|
|
// value is is 0 or smaller no limit will be enforced.
|
|
|
|
MaxSize int64
|
|
|
|
// BasePath defines the URL path used for handling uploads, e.g. "/files/".
|
|
|
|
// If no trailing slash is presented it will be added. You may specify an
|
|
|
|
// absolute URL containing a scheme, e.g. "http://tus.io"
|
|
|
|
BasePath string
|
|
|
|
}
|
|
|
|
|
|
|
|
type Handler struct {
|
|
|
|
config Config
|
|
|
|
dataStore DataStore
|
|
|
|
isBasePathAbs bool
|
|
|
|
basePath string
|
|
|
|
routeHandler http.Handler
|
|
|
|
locks map[string]bool
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Create a new handler using the given configuration.
|
2015-02-01 13:57:57 +00:00
|
|
|
func NewHandler(config Config) (*Handler, error) {
|
|
|
|
base := config.BasePath
|
|
|
|
uri, err := url.Parse(base)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure base path ends with slash to remove logic from absFileUrl
|
|
|
|
if base != "" && string(base[len(base)-1]) != "/" {
|
|
|
|
base += "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure base path begins with slash if not absolute (starts with scheme)
|
|
|
|
if !uri.IsAbs() && len(base) > 0 && string(base[0]) != "/" {
|
|
|
|
base = "/" + base
|
|
|
|
}
|
|
|
|
|
|
|
|
mux := pat.New()
|
|
|
|
|
|
|
|
handler := &Handler{
|
|
|
|
config: config,
|
|
|
|
dataStore: config.DataStore,
|
|
|
|
basePath: base,
|
|
|
|
isBasePathAbs: uri.IsAbs(),
|
|
|
|
routeHandler: mux,
|
|
|
|
locks: make(map[string]bool),
|
|
|
|
}
|
|
|
|
|
|
|
|
mux.Post("", http.HandlerFunc(handler.postFile))
|
|
|
|
mux.Head(":id", http.HandlerFunc(handler.headFile))
|
2015-02-06 21:05:33 +00:00
|
|
|
mux.Get(":id", http.HandlerFunc(handler.getFile))
|
2015-02-01 13:57:57 +00:00
|
|
|
mux.Add("PATCH", ":id", http.HandlerFunc(handler.patchFile))
|
|
|
|
|
|
|
|
return handler, nil
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Implement the http.Handler interface.
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
|
|
go logger.Println(r.Method, r.URL.Path)
|
|
|
|
|
|
|
|
header := w.Header()
|
|
|
|
|
|
|
|
if origin := r.Header.Get("Origin"); origin != "" {
|
|
|
|
header.Set("Access-Control-Allow-Origin", origin)
|
|
|
|
|
|
|
|
if r.Method == "OPTIONS" {
|
|
|
|
// Preflight request
|
|
|
|
header.Set("Access-Control-Allow-Methods", "POST, HEAD, PATCH, OPTIONS")
|
|
|
|
header.Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Entity-Length, Offset, TUS-Resumable")
|
|
|
|
header.Set("Access-Control-Max-Age", "86400")
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Actual request
|
|
|
|
header.Set("Access-Control-Expose-Headers", "Offset, Location, Entity-Length, TUS-Version, TUS-Resumable, TUS-Max-Size, TUS-Extension")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set current version used by the server
|
|
|
|
header.Set("TUS-Resumable", "1.0.0")
|
|
|
|
|
|
|
|
// Set appropriated headers in case of OPTIONS method allowing protocol
|
|
|
|
// discovery and end with an 204 No Content
|
|
|
|
if r.Method == "OPTIONS" {
|
|
|
|
if handler.config.MaxSize > 0 {
|
|
|
|
header.Set("TUS-Max-Size", strconv.FormatInt(handler.config.MaxSize, 10))
|
|
|
|
}
|
|
|
|
|
|
|
|
header.Set("TUS-Version", "1.0.0")
|
2015-02-17 13:19:56 +00:00
|
|
|
header.Set("TUS-Extension", "file-creation,metadata,concatenation")
|
2015-02-01 13:57:57 +00:00
|
|
|
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test if the version sent by the client is supported
|
2015-02-06 21:05:33 +00:00
|
|
|
// GET methods are not checked since a browser may visit this URL and does
|
|
|
|
// not include this header. This request is not part of the specification.
|
|
|
|
if r.Method != "GET" && r.Header.Get("TUS-Resumable") != "1.0.0" {
|
2015-02-01 13:57:57 +00:00
|
|
|
handler.sendError(w, ErrUnsupportedVersion)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Proceed with routing the request
|
|
|
|
handler.routeHandler.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Create a new file upload using the datastore after validating the length
|
|
|
|
// and parsing the metadata.
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) postFile(w http.ResponseWriter, r *http.Request) {
|
2015-02-17 13:19:56 +00:00
|
|
|
// Parse Concat header
|
|
|
|
isPartial, isFinal, partialUploads, err := parseConcat(r.Header.Get("Concat"))
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, err)
|
2015-02-01 13:57:57 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
// If the upload is a final upload created by concatenation multiple partial
|
|
|
|
// uploads the size is sum of all sizes of these files (no need for
|
|
|
|
// Entity-Length header)
|
|
|
|
var size int64
|
|
|
|
if isFinal {
|
|
|
|
size, err = handler.sizeOfUploads(partialUploads)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size, err = strconv.ParseInt(r.Header.Get("Entity-Length"), 10, 64)
|
|
|
|
if err != nil || size < 0 {
|
|
|
|
handler.sendError(w, ErrInvalidEntityLength)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-01 13:57:57 +00:00
|
|
|
// Test whether the size is still allowed
|
2015-02-05 17:25:38 +00:00
|
|
|
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
|
2015-02-01 13:57:57 +00:00
|
|
|
handler.sendError(w, ErrMaxSizeExceeded)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-03 18:01:35 +00:00
|
|
|
// Parse metadata
|
|
|
|
meta := parseMeta(r.Header.Get("Metadata"))
|
2015-02-01 13:57:57 +00:00
|
|
|
|
2015-02-16 16:53:50 +00:00
|
|
|
info := FileInfo{
|
2015-02-17 13:19:56 +00:00
|
|
|
Size: size,
|
|
|
|
MetaData: meta,
|
|
|
|
IsPartial: isPartial,
|
|
|
|
IsFinal: isFinal,
|
|
|
|
PartialUploads: partialUploads,
|
2015-02-16 16:53:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
id, err := handler.dataStore.NewUpload(info)
|
2015-02-01 13:57:57 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
if isFinal {
|
|
|
|
if err := handler.fillFinalUpload(id, partialUploads); err != nil {
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-01 13:57:57 +00:00
|
|
|
url := handler.absFileUrl(r, id)
|
|
|
|
w.Header().Set("Location", url)
|
|
|
|
w.WriteHeader(http.StatusCreated)
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Returns the length and offset for the HEAD request
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) headFile(w http.ResponseWriter, r *http.Request) {
|
|
|
|
id := r.URL.Query().Get(":id")
|
|
|
|
info, err := handler.dataStore.GetInfo(id)
|
|
|
|
if err != nil {
|
|
|
|
// Interpret os.ErrNotExist as 404 Not Found
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = ErrNotFound
|
|
|
|
}
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
// Add Concat header if possible
|
|
|
|
if info.IsPartial {
|
|
|
|
w.Header().Set("Concat", "partial")
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.IsFinal {
|
|
|
|
v := "final;"
|
|
|
|
for _, uploadId := range info.PartialUploads {
|
|
|
|
v += " " + handler.absFileUrl(r, uploadId)
|
|
|
|
}
|
|
|
|
w.Header().Set("Concat", v)
|
|
|
|
}
|
|
|
|
|
2015-02-01 13:57:57 +00:00
|
|
|
w.Header().Set("Entity-Length", strconv.FormatInt(info.Size, 10))
|
|
|
|
w.Header().Set("Offset", strconv.FormatInt(info.Offset, 10))
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Add a chunk to an upload. Only allowed if the upload is not locked and enough
|
|
|
|
// space is left.
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) patchFile(w http.ResponseWriter, r *http.Request) {
|
|
|
|
id := r.URL.Query().Get(":id")
|
|
|
|
|
|
|
|
// Ensure file is not locked
|
|
|
|
if _, ok := handler.locks[id]; ok {
|
|
|
|
handler.sendError(w, ErrFileLocked)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lock file for further writes (heads are allowed)
|
|
|
|
handler.locks[id] = true
|
|
|
|
|
|
|
|
// File will be unlocked regardless of an error or success
|
|
|
|
defer func() {
|
|
|
|
delete(handler.locks, id)
|
|
|
|
}()
|
|
|
|
|
|
|
|
info, err := handler.dataStore.GetInfo(id)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = ErrNotFound
|
|
|
|
}
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
// Modifying a final upload is not allowed
|
|
|
|
if info.IsFinal {
|
|
|
|
handler.sendError(w, ErrModifyFinal)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-01 13:57:57 +00:00
|
|
|
// Ensure the offsets match
|
|
|
|
offset, err := strconv.ParseInt(r.Header.Get("Offset"), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, ErrInvalidOffset)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset != info.Offset {
|
|
|
|
handler.sendError(w, ErrIllegalOffset)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get Content-Length if possible
|
|
|
|
length := r.ContentLength
|
|
|
|
|
|
|
|
// Test if this upload fits into the file's size
|
|
|
|
if offset+length > info.Size {
|
|
|
|
handler.sendError(w, ErrSizeExceeded)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
maxSize := info.Size - offset
|
|
|
|
if length > 0 {
|
|
|
|
maxSize = length
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the
|
|
|
|
reader := io.LimitReader(r.Body, maxSize)
|
|
|
|
|
|
|
|
err = handler.dataStore.WriteChunk(id, offset, reader)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2015-02-06 21:05:33 +00:00
|
|
|
// Download a file using a GET request. This is not part of the specification.
|
|
|
|
func (handler *Handler) getFile(w http.ResponseWriter, r *http.Request) {
|
|
|
|
id := r.URL.Query().Get(":id")
|
|
|
|
|
|
|
|
// Ensure file is not locked
|
|
|
|
if _, ok := handler.locks[id]; ok {
|
|
|
|
handler.sendError(w, ErrFileLocked)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lock file for further writes (heads are allowed)
|
|
|
|
handler.locks[id] = true
|
|
|
|
|
|
|
|
// File will be unlocked regardless of an error or success
|
|
|
|
defer func() {
|
|
|
|
delete(handler.locks, id)
|
|
|
|
}()
|
|
|
|
|
|
|
|
info, err := handler.dataStore.GetInfo(id)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = ErrNotFound
|
|
|
|
}
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do not do anything if no data is stored yet.
|
|
|
|
if info.Offset == 0 {
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get reader
|
|
|
|
src, err := handler.dataStore.GetReader(id)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
io.Copy(w, src)
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Send the error in the response body. The status code will be looked up in
|
|
|
|
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) sendError(w http.ResponseWriter, err error) {
|
|
|
|
status, ok := ErrStatusCodes[err]
|
|
|
|
if !ok {
|
|
|
|
status = 500
|
|
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
|
|
w.WriteHeader(status)
|
|
|
|
w.Write([]byte(err.Error() + "\n"))
|
|
|
|
}
|
|
|
|
|
2015-02-01 15:17:56 +00:00
|
|
|
// Make an absolute URLs to the given upload id. If the base path is absolute
|
|
|
|
// it will be prepended else the host and protocol from the request is used.
|
2015-02-01 13:57:57 +00:00
|
|
|
func (handler *Handler) absFileUrl(r *http.Request, id string) string {
|
|
|
|
if handler.isBasePathAbs {
|
|
|
|
return handler.basePath + id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read origin and protocol from request
|
|
|
|
url := "http://"
|
|
|
|
if r.TLS != nil {
|
|
|
|
url = "https://"
|
|
|
|
}
|
|
|
|
|
|
|
|
url += r.Host + handler.basePath + id
|
|
|
|
|
|
|
|
return url
|
|
|
|
}
|
2015-02-03 18:01:35 +00:00
|
|
|
|
2015-02-17 13:19:56 +00:00
|
|
|
// The get sum of all sizes for a list of upload ids while checking whether
|
|
|
|
// all of these uploads are finished yet. This is used to calculate the size
|
|
|
|
// of a final resource.
|
|
|
|
func (handler *Handler) sizeOfUploads(ids []string) (size int64, err error) {
|
|
|
|
for _, id := range ids {
|
|
|
|
info, err := handler.dataStore.GetInfo(id)
|
|
|
|
if err != nil {
|
|
|
|
return size, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.Offset != info.Size {
|
|
|
|
err = ErrUploadNotFinished
|
|
|
|
return size, err
|
|
|
|
}
|
|
|
|
|
|
|
|
size += info.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill an empty upload with the content of the uploads by their ids. The data
|
|
|
|
// will be written in the order as they appear in the slice
|
|
|
|
func (handler *Handler) fillFinalUpload(id string, uploads []string) error {
|
|
|
|
readers := make([]io.Reader, len(uploads))
|
|
|
|
|
|
|
|
for index, uploadId := range uploads {
|
|
|
|
reader, err := handler.dataStore.GetReader(uploadId)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
readers[index] = reader
|
|
|
|
}
|
|
|
|
|
|
|
|
reader := io.MultiReader(readers...)
|
|
|
|
|
|
|
|
return handler.dataStore.WriteChunk(id, 0, reader)
|
|
|
|
}
|
|
|
|
|
2015-02-03 18:01:35 +00:00
|
|
|
// Parse the meatadata as defined in the Metadata extension.
|
|
|
|
// e.g. Metadata: key base64value, key2 base64value
|
|
|
|
func parseMeta(header string) map[string]string {
|
|
|
|
meta := make(map[string]string)
|
|
|
|
|
|
|
|
for _, element := range strings.Split(header, ",") {
|
|
|
|
element := strings.TrimSpace(element)
|
|
|
|
|
|
|
|
parts := strings.Split(element, " ")
|
|
|
|
|
|
|
|
// Do not continue with this element if no key and value or presented
|
|
|
|
if len(parts) != 2 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore corrent element if the value is no valid base64
|
|
|
|
key := parts[0]
|
|
|
|
value, err := base64.StdEncoding.DecodeString(parts[1])
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
meta[key] = string(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return meta
|
|
|
|
}
|
2015-02-17 13:19:56 +00:00
|
|
|
|
|
|
|
// Parse the Concat header, e.g.
|
|
|
|
// Concat: partial
|
|
|
|
// Concat: final; http://tus.io/files/a /files/b/
|
|
|
|
func parseConcat(header string) (isPartial bool, isFinal bool, partialUploads []string, err error) {
|
|
|
|
if len(header) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if header == "partial" {
|
|
|
|
isPartial = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
l := len("final; ")
|
|
|
|
if strings.HasPrefix(header, "final; ") && len(header) > l {
|
|
|
|
isFinal = true
|
|
|
|
|
|
|
|
list := strings.Split(header[l:], " ")
|
|
|
|
for _, value := range list {
|
|
|
|
value := strings.TrimSpace(value)
|
|
|
|
if value == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract ids out of URL
|
|
|
|
result := reExtractFileId.FindStringSubmatch(value)
|
|
|
|
if len(result) != 2 {
|
|
|
|
err = ErrInvalidConcat
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
partialUploads = append(partialUploads, result[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no valid partial upload ids are extracted this is not a final upload.
|
|
|
|
if len(partialUploads) == 0 {
|
|
|
|
isFinal = false
|
|
|
|
err = ErrInvalidConcat
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|