2019-06-11 16:23:20 +00:00
|
|
|
package handler
|
2015-11-29 01:33:55 +00:00
|
|
|
|
|
|
|
import (
|
2019-06-11 14:16:02 +00:00
|
|
|
"context"
|
2015-11-29 01:33:55 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
|
|
|
"io"
|
|
|
|
"log"
|
2018-06-03 16:37:45 +00:00
|
|
|
"math"
|
2017-03-01 18:43:37 +00:00
|
|
|
"net"
|
2015-11-29 01:33:55 +00:00
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"regexp"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2017-01-19 20:02:48 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2015-11-29 01:33:55 +00:00
|
|
|
)
|
|
|
|
|
2018-04-23 21:10:23 +00:00
|
|
|
const UploadLengthDeferred = "1"
|
|
|
|
|
2016-01-16 14:27:35 +00:00
|
|
|
var (
|
|
|
|
reExtractFileID = regexp.MustCompile(`([^/]+)\/?$`)
|
|
|
|
reForwardedHost = regexp.MustCompile(`host=([^,]+)`)
|
|
|
|
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
|
2019-09-05 17:45:43 +00:00
|
|
|
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z\-\+\.]+$`)
|
2016-01-16 14:27:35 +00:00
|
|
|
)
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-29 18:03:18 +00:00
|
|
|
var (
|
|
|
|
requestTimeoutDuration = 3 * time.Second
|
|
|
|
)
|
|
|
|
|
2017-01-26 22:15:05 +00:00
|
|
|
// HTTPError represents an error with an additional status code attached
|
|
|
|
// which may be used when this error is sent in a HTTP response.
|
|
|
|
// See the net/http package for standardized status codes.
|
|
|
|
type HTTPError interface {
|
|
|
|
error
|
|
|
|
StatusCode() int
|
2019-02-12 21:45:08 +00:00
|
|
|
Body() []byte
|
2017-01-26 22:15:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type httpError struct {
|
|
|
|
error
|
|
|
|
statusCode int
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2017-01-26 22:15:05 +00:00
|
|
|
func (err httpError) StatusCode() int {
|
|
|
|
return err.statusCode
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2019-02-12 21:45:08 +00:00
|
|
|
func (err httpError) Body() []byte {
|
|
|
|
return []byte(err.Error())
|
|
|
|
}
|
|
|
|
|
2017-01-26 22:15:05 +00:00
|
|
|
// NewHTTPError adds the given status code to the provided error and returns
|
|
|
|
// the new error instance. The status code may be used in corresponding HTTP
|
|
|
|
// responses. See the net/http package for standardized status codes.
|
|
|
|
func NewHTTPError(err error, statusCode int) HTTPError {
|
|
|
|
return httpError{err, statusCode}
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2018-04-23 21:09:14 +00:00
|
|
|
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
|
|
|
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
|
|
|
ErrInvalidContentType = NewHTTPError(errors.New("missing or invalid Content-Type header"), http.StatusBadRequest)
|
|
|
|
ErrInvalidUploadLength = NewHTTPError(errors.New("missing or invalid Upload-Length header"), http.StatusBadRequest)
|
|
|
|
ErrInvalidOffset = NewHTTPError(errors.New("missing or invalid Upload-Offset header"), http.StatusBadRequest)
|
|
|
|
ErrNotFound = NewHTTPError(errors.New("upload not found"), http.StatusNotFound)
|
|
|
|
ErrFileLocked = NewHTTPError(errors.New("file currently locked"), 423) // Locked (WebDAV) (RFC 4918)
|
|
|
|
ErrMismatchOffset = NewHTTPError(errors.New("mismatched offset"), http.StatusConflict)
|
|
|
|
ErrSizeExceeded = NewHTTPError(errors.New("resource's size exceeded"), http.StatusRequestEntityTooLarge)
|
|
|
|
ErrNotImplemented = NewHTTPError(errors.New("feature not implemented"), http.StatusNotImplemented)
|
|
|
|
ErrUploadNotFinished = NewHTTPError(errors.New("one of the partial uploads is not finished"), http.StatusBadRequest)
|
|
|
|
ErrInvalidConcat = NewHTTPError(errors.New("invalid Upload-Concat header"), http.StatusBadRequest)
|
|
|
|
ErrModifyFinal = NewHTTPError(errors.New("modifying a final upload is not allowed"), http.StatusForbidden)
|
|
|
|
ErrUploadLengthAndUploadDeferLength = NewHTTPError(errors.New("provided both Upload-Length and Upload-Defer-Length"), http.StatusBadRequest)
|
|
|
|
ErrInvalidUploadDeferLength = NewHTTPError(errors.New("invalid Upload-Defer-Length header"), http.StatusBadRequest)
|
2019-05-26 19:56:51 +00:00
|
|
|
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
2017-01-26 22:15:05 +00:00
|
|
|
)
|
|
|
|
|
2019-09-19 09:15:48 +00:00
|
|
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
|
|
|
type HTTPRequest struct {
|
|
|
|
// Method is the HTTP method, e.g. POST or PATCH
|
|
|
|
Method string
|
|
|
|
// URI is the full HTTP request URI, e.g. /files/fooo
|
|
|
|
URI string
|
|
|
|
// RemoteAddr contains the network address that sent the request
|
|
|
|
RemoteAddr string
|
|
|
|
// Header contains all HTTP headers as present in the HTTP request.
|
|
|
|
Header http.Header
|
|
|
|
}
|
|
|
|
|
|
|
|
// HookEvent represents an event from tusd which can be handled by the application.
|
|
|
|
type HookEvent struct {
|
|
|
|
// Upload contains information about the upload that caused this hook
|
|
|
|
// to be fired.
|
|
|
|
Upload FileInfo
|
|
|
|
// HTTPRequest contains details about the HTTP request that reached
|
|
|
|
// tusd.
|
|
|
|
HTTPRequest HTTPRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
|
|
|
return HookEvent{
|
|
|
|
Upload: info,
|
|
|
|
HTTPRequest: HTTPRequest{
|
|
|
|
Method: r.Method,
|
|
|
|
URI: r.RequestURI,
|
|
|
|
RemoteAddr: r.RemoteAddr,
|
|
|
|
Header: r.Header,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-07 20:09:47 +00:00
|
|
|
// UnroutedHandler exposes methods to handle requests as part of the tus protocol,
|
|
|
|
// such as PostFile, HeadFile, PatchFile and DelFile. In addition the GetFile method
|
|
|
|
// is provided which is, however, not part of the specification.
|
2015-11-29 01:33:55 +00:00
|
|
|
type UnroutedHandler struct {
|
|
|
|
config Config
|
2016-02-21 22:25:35 +00:00
|
|
|
composer *StoreComposer
|
2015-11-29 01:33:55 +00:00
|
|
|
isBasePathAbs bool
|
|
|
|
basePath string
|
|
|
|
logger *log.Logger
|
2015-12-26 23:44:02 +00:00
|
|
|
extensions string
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2016-03-12 21:28:24 +00:00
|
|
|
// CompleteUploads is used to send notifications whenever an upload is
|
2019-09-19 09:15:48 +00:00
|
|
|
// completed by a user. The HookEvent will contain information about this
|
2016-03-12 21:28:24 +00:00
|
|
|
// upload after it is completed. Sending to this channel will only
|
|
|
|
// happen if the NotifyCompleteUploads field is set to true in the Config
|
|
|
|
// structure. Notifications will also be sent for completions using the
|
|
|
|
// Concatenation extension.
|
2019-09-19 09:15:48 +00:00
|
|
|
CompleteUploads chan HookEvent
|
2016-03-12 21:24:57 +00:00
|
|
|
// TerminatedUploads is used to send notifications whenever an upload is
|
2019-09-19 09:15:48 +00:00
|
|
|
// terminated by a user. The HookEvent will contain information about this
|
2016-03-12 21:24:57 +00:00
|
|
|
// upload gathered before the termination. Sending to this channel will only
|
|
|
|
// happen if the NotifyTerminatedUploads field is set to true in the Config
|
|
|
|
// structure.
|
2019-09-19 09:15:48 +00:00
|
|
|
TerminatedUploads chan HookEvent
|
2017-02-21 22:17:07 +00:00
|
|
|
// UploadProgress is used to send notifications about the progress of the
|
|
|
|
// currently running uploads. For each open PATCH request, every second
|
2019-09-19 09:15:48 +00:00
|
|
|
// a HookEvent instance will be send over this channel with the Offset field
|
2017-02-21 22:17:07 +00:00
|
|
|
// being set to the number of bytes which have been transfered to the server.
|
|
|
|
// Please be aware that this number may be higher than the number of bytes
|
|
|
|
// which have been stored by the data store! Sending to this channel will only
|
|
|
|
// happen if the NotifyUploadProgress field is set to true in the Config
|
|
|
|
// structure.
|
2019-09-19 09:15:48 +00:00
|
|
|
UploadProgress chan HookEvent
|
2017-07-19 15:45:16 +00:00
|
|
|
// CreatedUploads is used to send notifications about the uploads having been
|
2019-09-19 09:15:48 +00:00
|
|
|
// created. It triggers post creation and therefore has all the HookEvent incl.
|
2017-07-19 15:45:16 +00:00
|
|
|
// the ID available already. It facilitates the post-create hook. Sending to
|
|
|
|
// this channel will only happen if the NotifyCreatedUploads field is set to
|
|
|
|
// true in the Config structure.
|
2019-09-19 09:15:48 +00:00
|
|
|
CreatedUploads chan HookEvent
|
2016-05-24 15:04:28 +00:00
|
|
|
// Metrics provides numbers of the usage for this handler.
|
|
|
|
Metrics Metrics
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewUnroutedHandler creates a new handler without routing using the given
|
|
|
|
// configuration. It exposes the http handlers which need to be combined with
|
|
|
|
// a router (aka mux) of your choice. If you are looking for preconfigured
|
|
|
|
// handler see NewHandler.
|
|
|
|
func NewUnroutedHandler(config Config) (*UnroutedHandler, error) {
|
2016-02-21 22:25:35 +00:00
|
|
|
if err := config.validate(); err != nil {
|
2015-11-29 01:33:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-12-26 23:44:02 +00:00
|
|
|
// Only promote extesions using the Tus-Extension header which are implemented
|
2018-06-03 16:15:50 +00:00
|
|
|
extensions := "creation,creation-with-upload"
|
2016-02-21 22:25:35 +00:00
|
|
|
if config.StoreComposer.UsesTerminater {
|
2015-12-26 23:44:02 +00:00
|
|
|
extensions += ",termination"
|
|
|
|
}
|
2016-02-21 22:25:35 +00:00
|
|
|
if config.StoreComposer.UsesConcater {
|
2016-01-20 14:33:17 +00:00
|
|
|
extensions += ",concatenation"
|
|
|
|
}
|
2018-06-03 16:15:50 +00:00
|
|
|
if config.StoreComposer.UsesLengthDeferrer {
|
|
|
|
extensions += ",creation-defer-length"
|
|
|
|
}
|
2015-12-26 23:44:02 +00:00
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
handler := &UnroutedHandler{
|
2016-03-12 21:24:57 +00:00
|
|
|
config: config,
|
|
|
|
composer: config.StoreComposer,
|
|
|
|
basePath: config.BasePath,
|
|
|
|
isBasePathAbs: config.isAbs,
|
2019-09-19 09:15:48 +00:00
|
|
|
CompleteUploads: make(chan HookEvent),
|
|
|
|
TerminatedUploads: make(chan HookEvent),
|
|
|
|
UploadProgress: make(chan HookEvent),
|
|
|
|
CreatedUploads: make(chan HookEvent),
|
2016-03-12 21:24:57 +00:00
|
|
|
logger: config.Logger,
|
|
|
|
extensions: extensions,
|
2016-05-24 15:04:28 +00:00
|
|
|
Metrics: newMetrics(),
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return handler, nil
|
|
|
|
}
|
|
|
|
|
2019-09-10 13:26:37 +00:00
|
|
|
// SupportedExtensions returns a comma-separated list of the supported tus extensions.
|
|
|
|
// The availability of an extension usually depends on whether the provided data store
|
|
|
|
// implements some additional interfaces.
|
|
|
|
func (handler *UnroutedHandler) SupportedExtensions() string {
|
|
|
|
return handler.extensions
|
|
|
|
}
|
|
|
|
|
2015-12-07 20:10:02 +00:00
|
|
|
// Middleware checks various aspects of the request and ensures that it
|
2015-11-29 01:33:55 +00:00
|
|
|
// conforms with the spec. Also handles method overriding for clients which
|
|
|
|
// cannot make PATCH AND DELETE requests. If you are using the tusd handlers
|
|
|
|
// directly you will need to wrap at least the POST and PATCH endpoints in
|
|
|
|
// this middleware.
|
2015-12-07 20:10:02 +00:00
|
|
|
func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
2015-11-29 01:33:55 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// Allow overriding the HTTP method. The reason for this is
|
|
|
|
// that some libraries/environments to not support PATCH and
|
|
|
|
// DELETE requests, e.g. Flash in a browser and parts of Java
|
|
|
|
if newMethod := r.Header.Get("X-HTTP-Method-Override"); newMethod != "" {
|
|
|
|
r.Method = newMethod
|
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.log("RequestIncoming", "method", r.Method, "path", r.URL.Path)
|
|
|
|
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incRequestsTotal(r.Method)
|
2015-11-29 01:33:55 +00:00
|
|
|
|
|
|
|
header := w.Header()
|
|
|
|
|
|
|
|
if origin := r.Header.Get("Origin"); origin != "" {
|
|
|
|
header.Set("Access-Control-Allow-Origin", origin)
|
|
|
|
|
|
|
|
if r.Method == "OPTIONS" {
|
|
|
|
// Preflight request
|
2017-04-12 09:52:02 +00:00
|
|
|
header.Add("Access-Control-Allow-Methods", "POST, GET, HEAD, PATCH, DELETE, OPTIONS")
|
2018-11-10 20:10:38 +00:00
|
|
|
header.Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
2015-11-29 01:33:55 +00:00
|
|
|
header.Set("Access-Control-Max-Age", "86400")
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Actual request
|
2018-11-10 20:10:38 +00:00
|
|
|
header.Add("Access-Control-Expose-Headers", "Upload-Offset, Location, Upload-Length, Tus-Version, Tus-Resumable, Tus-Max-Size, Tus-Extension, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set current version used by the server
|
|
|
|
header.Set("Tus-Resumable", "1.0.0")
|
|
|
|
|
|
|
|
// Add nosniff to all responses https://golang.org/src/net/http/server.go#L1429
|
|
|
|
header.Set("X-Content-Type-Options", "nosniff")
|
|
|
|
|
|
|
|
// Set appropriated headers in case of OPTIONS method allowing protocol
|
|
|
|
// discovery and end with an 204 No Content
|
|
|
|
if r.Method == "OPTIONS" {
|
|
|
|
if handler.config.MaxSize > 0 {
|
|
|
|
header.Set("Tus-Max-Size", strconv.FormatInt(handler.config.MaxSize, 10))
|
|
|
|
}
|
|
|
|
|
|
|
|
header.Set("Tus-Version", "1.0.0")
|
2015-12-26 23:44:02 +00:00
|
|
|
header.Set("Tus-Extension", handler.extensions)
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2016-05-10 09:58:43 +00:00
|
|
|
// Although the 204 No Content status code is a better fit in this case,
|
|
|
|
// since we do not have a response body included, we cannot use it here
|
|
|
|
// as some browsers only accept 200 OK as successful response to a
|
|
|
|
// preflight request. If we send them the 204 No Content the response
|
|
|
|
// will be ignored or interpreted as a rejection.
|
|
|
|
// For example, the Presto engine, which is used in older versions of
|
|
|
|
// Opera, Opera Mobile and Opera Mini, handles CORS this way.
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusOK)
|
2015-11-29 01:33:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test if the version sent by the client is supported
|
|
|
|
// GET methods are not checked since a browser may visit this URL and does
|
|
|
|
// not include this header. This request is not part of the specification.
|
|
|
|
if r.Method != "GET" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
|
|
|
handler.sendError(w, r, ErrUnsupportedVersion)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Proceed with routing the request
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// PostFile creates a new file upload using the datastore after validating the
|
|
|
|
// length and parsing the metadata.
|
|
|
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
2019-09-29 18:03:18 +00:00
|
|
|
ctx := context.Background()
|
2019-09-15 11:43:59 +00:00
|
|
|
|
2016-09-29 19:20:51 +00:00
|
|
|
// Check for presence of application/offset+octet-stream. If another content
|
|
|
|
// type is defined, it will be ignored and treated as none was set because
|
|
|
|
// some HTTP clients may enforce a default value for this header.
|
|
|
|
containsChunk := r.Header.Get("Content-Type") == "application/offset+octet-stream"
|
2016-08-28 20:06:37 +00:00
|
|
|
|
2016-01-20 14:33:17 +00:00
|
|
|
// Only use the proper Upload-Concat header if the concatenation extension
|
|
|
|
// is even supported by the data store.
|
|
|
|
var concatHeader string
|
2016-02-21 22:25:35 +00:00
|
|
|
if handler.composer.UsesConcater {
|
2016-01-20 14:33:17 +00:00
|
|
|
concatHeader = r.Header.Get("Upload-Concat")
|
|
|
|
}
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
// Parse Upload-Concat header
|
2019-09-19 10:14:25 +00:00
|
|
|
isPartial, isFinal, partialUploadIDs, err := parseConcat(concatHeader)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the upload is a final upload created by concatenation multiple partial
|
|
|
|
// uploads the size is sum of all sizes of these files (no need for
|
|
|
|
// Upload-Length header)
|
|
|
|
var size int64
|
2018-04-23 21:10:23 +00:00
|
|
|
var sizeIsDeferred bool
|
2019-09-19 10:14:25 +00:00
|
|
|
var partialUploads []Upload
|
2015-11-29 01:33:55 +00:00
|
|
|
if isFinal {
|
2016-08-28 20:06:37 +00:00
|
|
|
// A final upload must not contain a chunk within the creation request
|
|
|
|
if containsChunk {
|
|
|
|
handler.sendError(w, r, ErrModifyFinal)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-19 10:14:25 +00:00
|
|
|
partialUploads, size, err = handler.sizeOfUploads(ctx, partialUploadIDs)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
2018-04-23 21:10:23 +00:00
|
|
|
uploadLengthHeader := r.Header.Get("Upload-Length")
|
|
|
|
uploadDeferLengthHeader := r.Header.Get("Upload-Defer-Length")
|
2018-05-05 19:20:26 +00:00
|
|
|
size, sizeIsDeferred, err = handler.validateNewUploadLengthHeaders(uploadLengthHeader, uploadDeferLengthHeader)
|
2018-04-23 21:10:23 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
2015-11-29 01:33:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test whether the size is still allowed
|
|
|
|
if handler.config.MaxSize > 0 && size > handler.config.MaxSize {
|
|
|
|
handler.sendError(w, r, ErrMaxSizeExceeded)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse metadata
|
2018-05-22 16:46:18 +00:00
|
|
|
meta := ParseMetadataHeader(r.Header.Get("Upload-Metadata"))
|
2015-11-29 01:33:55 +00:00
|
|
|
|
|
|
|
info := FileInfo{
|
|
|
|
Size: size,
|
2018-04-23 21:10:23 +00:00
|
|
|
SizeIsDeferred: sizeIsDeferred,
|
2015-11-29 01:33:55 +00:00
|
|
|
MetaData: meta,
|
|
|
|
IsPartial: isPartial,
|
|
|
|
IsFinal: isFinal,
|
2019-09-19 10:14:25 +00:00
|
|
|
PartialUploads: partialUploadIDs,
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:48 +00:00
|
|
|
if handler.config.PreUploadCreateCallback != nil {
|
|
|
|
if err := handler.config.PreUploadCreateCallback(newHookEvent(info, r)); err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.NewUpload(ctx, info)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err = upload.GetInfo(ctx)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
id := info.ID
|
2018-03-29 12:40:43 +00:00
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
// Add the Location header directly after creating the new resource to even
|
|
|
|
// include it in cases of failure when an error is returned
|
|
|
|
url := handler.absFileURL(r, id)
|
|
|
|
w.Header().Set("Location", url)
|
|
|
|
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incUploadsCreated()
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
2016-08-28 20:06:37 +00:00
|
|
|
|
2017-07-19 15:45:16 +00:00
|
|
|
if handler.config.NotifyCreatedUploads {
|
2019-09-19 09:15:48 +00:00
|
|
|
handler.CreatedUploads <- newHookEvent(info, r)
|
2017-07-19 15:45:16 +00:00
|
|
|
}
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
if isFinal {
|
2019-09-19 10:14:25 +00:00
|
|
|
concatableUpload := handler.composer.Concater.AsConcatableUpload(upload)
|
|
|
|
if err := concatableUpload.ConcatUploads(ctx, partialUploads); err != nil {
|
2015-11-29 01:33:55 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2016-03-12 21:52:31 +00:00
|
|
|
info.Offset = size
|
2016-02-21 17:38:43 +00:00
|
|
|
|
|
|
|
if handler.config.NotifyCompleteUploads {
|
2019-09-19 09:15:48 +00:00
|
|
|
handler.CompleteUploads <- newHookEvent(info, r)
|
2016-02-21 17:38:43 +00:00
|
|
|
}
|
2016-08-28 20:06:37 +00:00
|
|
|
}
|
2016-05-24 15:04:28 +00:00
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
if containsChunk {
|
|
|
|
if handler.composer.UsesLocker {
|
2019-09-12 10:37:43 +00:00
|
|
|
lock, err := handler.lockUpload(id)
|
|
|
|
if err != nil {
|
2016-08-28 20:06:37 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
defer lock.Unlock()
|
2016-08-28 20:06:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-29 18:03:18 +00:00
|
|
|
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
|
2016-08-28 20:06:37 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2018-04-23 21:16:28 +00:00
|
|
|
} else if !sizeIsDeferred && size == 0 {
|
2018-03-29 12:40:43 +00:00
|
|
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
|
|
|
// This statement is in an else-if block to avoid causing duplicate calls
|
|
|
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
2019-09-19 09:15:48 +00:00
|
|
|
handler.finishUploadIfComplete(ctx, upload, info, r)
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusCreated)
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// HeadFile returns the length and offset for the HEAD request
|
|
|
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
2019-09-29 18:03:18 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), requestTimeoutDuration)
|
|
|
|
defer cancel()
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2015-12-07 20:37:34 +00:00
|
|
|
id, err := extractIDFromPath(r.URL.Path)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-12-26 20:23:09 +00:00
|
|
|
|
2016-02-21 22:25:35 +00:00
|
|
|
if handler.composer.UsesLocker {
|
2019-09-12 10:37:43 +00:00
|
|
|
lock, err := handler.lockUpload(id)
|
|
|
|
if err != nil {
|
2015-12-26 20:23:09 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
defer lock.Unlock()
|
2015-12-26 20:23:09 +00:00
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err := upload.GetInfo(ctx)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add Upload-Concat header if possible
|
|
|
|
if info.IsPartial {
|
|
|
|
w.Header().Set("Upload-Concat", "partial")
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.IsFinal {
|
|
|
|
v := "final;"
|
|
|
|
for _, uploadID := range info.PartialUploads {
|
2017-01-31 15:58:31 +00:00
|
|
|
v += handler.absFileURL(r, uploadID) + " "
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
2017-01-31 15:58:31 +00:00
|
|
|
// Remove trailing space
|
|
|
|
v = v[:len(v)-1]
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
w.Header().Set("Upload-Concat", v)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(info.MetaData) != 0 {
|
2018-05-22 16:46:18 +00:00
|
|
|
w.Header().Set("Upload-Metadata", SerializeMetadataHeader(info.MetaData))
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2018-04-23 21:10:23 +00:00
|
|
|
if info.SizeIsDeferred {
|
|
|
|
w.Header().Set("Upload-Defer-Length", UploadLengthDeferred)
|
|
|
|
} else {
|
|
|
|
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
|
|
|
}
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
w.Header().Set("Cache-Control", "no-store")
|
|
|
|
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusOK)
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2018-03-29 12:40:43 +00:00
|
|
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
|
|
|
// if enough space in the upload is left.
|
2015-11-29 01:33:55 +00:00
|
|
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
2019-09-29 18:03:18 +00:00
|
|
|
ctx := context.Background()
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2015-12-09 19:25:08 +00:00
|
|
|
// Check for presence of application/offset+octet-stream
|
2015-11-29 01:33:55 +00:00
|
|
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
|
|
|
handler.sendError(w, r, ErrInvalidContentType)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-09 19:25:08 +00:00
|
|
|
// Check for presence of a valid Upload-Offset Header
|
2015-11-29 01:33:55 +00:00
|
|
|
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
|
|
|
if err != nil || offset < 0 {
|
|
|
|
handler.sendError(w, r, ErrInvalidOffset)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-07 20:37:34 +00:00
|
|
|
id, err := extractIDFromPath(r.URL.Path)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2016-02-21 22:25:35 +00:00
|
|
|
if handler.composer.UsesLocker {
|
2019-09-12 10:37:43 +00:00
|
|
|
lock, err := handler.lockUpload(id)
|
|
|
|
if err != nil {
|
2015-12-26 20:23:09 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
defer lock.Unlock()
|
2015-12-26 20:23:09 +00:00
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err := upload.GetInfo(ctx)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Modifying a final upload is not allowed
|
|
|
|
if info.IsFinal {
|
|
|
|
handler.sendError(w, r, ErrModifyFinal)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset != info.Offset {
|
|
|
|
handler.sendError(w, r, ErrMismatchOffset)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-04-09 20:09:22 +00:00
|
|
|
// Do not proxy the call to the data store if the upload is already completed
|
2018-04-23 21:16:28 +00:00
|
|
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
2016-04-09 20:09:22 +00:00
|
|
|
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusNoContent)
|
2016-04-09 20:09:22 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-03 16:24:51 +00:00
|
|
|
if r.Header.Get("Upload-Length") != "" {
|
2018-06-11 21:59:54 +00:00
|
|
|
if !handler.composer.UsesLengthDeferrer {
|
2018-06-03 16:24:51 +00:00
|
|
|
handler.sendError(w, r, ErrNotImplemented)
|
2018-06-11 21:59:54 +00:00
|
|
|
return
|
2018-05-13 13:52:27 +00:00
|
|
|
}
|
2018-06-11 21:59:54 +00:00
|
|
|
if !info.SizeIsDeferred {
|
|
|
|
handler.sendError(w, r, ErrInvalidUploadLength)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
uploadLength, err := strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64)
|
2018-06-22 15:35:57 +00:00
|
|
|
if err != nil || uploadLength < 0 || uploadLength < info.Offset || (handler.config.MaxSize > 0 && uploadLength > handler.config.MaxSize) {
|
2018-06-11 21:59:54 +00:00
|
|
|
handler.sendError(w, r, ErrInvalidUploadLength)
|
|
|
|
return
|
|
|
|
}
|
2019-08-24 13:14:51 +00:00
|
|
|
|
|
|
|
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
2019-09-15 11:43:59 +00:00
|
|
|
if err := lengthDeclarableUpload.DeclareLength(ctx, uploadLength); err != nil {
|
2018-06-11 21:59:54 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
info.Size = uploadLength
|
|
|
|
info.SizeIsDeferred = false
|
2018-04-23 21:16:20 +00:00
|
|
|
}
|
|
|
|
|
2019-09-29 18:03:18 +00:00
|
|
|
if err := handler.writeChunk(ctx, upload, info, w, r); err != nil {
|
2016-08-28 20:06:37 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusNoContent)
|
2016-08-28 20:06:37 +00:00
|
|
|
}
|
|
|
|
|
2018-03-29 12:40:43 +00:00
|
|
|
// writeChunk reads the body from the requests r and appends it to the upload
|
|
|
|
// with the corresponding id. Afterwards, it will set the necessary response
|
|
|
|
// headers but will not send the response.
|
2019-09-29 18:03:18 +00:00
|
|
|
func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, info FileInfo, w http.ResponseWriter, r *http.Request) error {
|
2015-11-29 01:33:55 +00:00
|
|
|
// Get Content-Length if possible
|
|
|
|
length := r.ContentLength
|
2016-08-28 20:06:37 +00:00
|
|
|
offset := info.Offset
|
2019-08-24 13:14:51 +00:00
|
|
|
id := info.ID
|
2015-11-29 01:33:55 +00:00
|
|
|
|
|
|
|
// Test if this upload fits into the file's size
|
2018-04-23 21:16:28 +00:00
|
|
|
if !info.SizeIsDeferred && offset+length > info.Size {
|
2016-08-28 20:06:37 +00:00
|
|
|
return ErrSizeExceeded
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
maxSize := info.Size - offset
|
2018-06-03 16:37:45 +00:00
|
|
|
// If the upload's length is deferred and the PATCH request does not contain the Content-Length
|
|
|
|
// header (which is allowed if 'Transfer-Encoding: chunked' is used), we still need to set limits for
|
|
|
|
// the body size.
|
|
|
|
if info.SizeIsDeferred {
|
|
|
|
if handler.config.MaxSize > 0 {
|
|
|
|
// Ensure that the upload does not exceed the maximum upload size
|
|
|
|
maxSize = handler.config.MaxSize - offset
|
|
|
|
} else {
|
|
|
|
// If no upload limit is given, we allow arbitrary sizes
|
|
|
|
maxSize = math.MaxInt64
|
|
|
|
}
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
if length > 0 {
|
|
|
|
maxSize = length
|
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.log("ChunkWriteStart", "id", id, "maxSize", i64toa(maxSize), "offset", i64toa(offset))
|
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
var bytesWritten int64
|
2017-01-19 20:02:48 +00:00
|
|
|
// Prevent a nil pointer dereference when accessing the body which may not be
|
2016-08-28 20:06:37 +00:00
|
|
|
// available in the case of a malicious request.
|
|
|
|
if r.Body != nil {
|
2017-01-19 20:02:48 +00:00
|
|
|
// Limit the data read from the request's body to the allowed maximum
|
2016-08-28 20:06:37 +00:00
|
|
|
reader := io.LimitReader(r.Body, maxSize)
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-05-26 19:56:51 +00:00
|
|
|
// We use a context object to allow the hook system to cancel an upload
|
|
|
|
uploadCtx, stopUpload := context.WithCancel(context.Background())
|
|
|
|
info.stopUpload = stopUpload
|
|
|
|
// terminateUpload specifies whether the upload should be deleted after
|
|
|
|
// the write has finished
|
|
|
|
terminateUpload := false
|
|
|
|
// Cancel the context when the function exits to ensure that the goroutine
|
|
|
|
// is properly cleaned up
|
|
|
|
defer stopUpload()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// Interrupt the Read() call from the request body
|
|
|
|
<-uploadCtx.Done()
|
|
|
|
terminateUpload = true
|
|
|
|
r.Body.Close()
|
|
|
|
}()
|
|
|
|
|
2017-01-19 20:02:48 +00:00
|
|
|
if handler.config.NotifyUploadProgress {
|
2019-05-26 19:56:51 +00:00
|
|
|
var stopProgressEvents chan<- struct{}
|
2019-09-19 09:15:48 +00:00
|
|
|
reader, stopProgressEvents = handler.sendProgressMessages(newHookEvent(info, r), reader)
|
2019-05-26 19:56:51 +00:00
|
|
|
defer close(stopProgressEvents)
|
2017-01-19 20:02:48 +00:00
|
|
|
}
|
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
var err error
|
2019-09-15 11:43:59 +00:00
|
|
|
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
2019-05-26 19:56:51 +00:00
|
|
|
if terminateUpload && handler.composer.UsesTerminater {
|
2019-09-19 09:15:48 +00:00
|
|
|
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
|
2019-05-26 19:56:51 +00:00
|
|
|
// We only log this error and not show it to the user since this
|
|
|
|
// termination error is not relevant to the uploading client
|
|
|
|
handler.log("UploadStopTerminateError", "id", id, "error", terminateErr.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The error "http: invalid Read on closed Body" is returned if we stop the upload
|
|
|
|
// while the data store is still reading. Since this is an implementation detail,
|
|
|
|
// we replace this error with a message saying that the upload has been stopped.
|
|
|
|
if err == http.ErrBodyReadAfterClose {
|
|
|
|
err = ErrUploadStoppedByServer
|
|
|
|
}
|
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
// Send new offset to client
|
|
|
|
newOffset := offset + bytesWritten
|
|
|
|
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incBytesReceived(uint64(bytesWritten))
|
2018-03-29 12:40:43 +00:00
|
|
|
info.Offset = newOffset
|
|
|
|
|
2019-09-19 09:15:48 +00:00
|
|
|
return handler.finishUploadIfComplete(ctx, upload, info, r)
|
2018-03-29 12:40:43 +00:00
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2018-03-29 12:40:43 +00:00
|
|
|
// finishUploadIfComplete checks whether an upload is completed (i.e. upload offset
|
|
|
|
// matches upload size) and if so, it will call the data store's FinishUpload
|
|
|
|
// function and send the necessary message on the CompleteUpload channel.
|
2019-09-19 09:15:48 +00:00
|
|
|
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
2015-12-08 21:08:54 +00:00
|
|
|
// If the upload is completed, ...
|
2018-04-23 21:16:28 +00:00
|
|
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
2015-12-08 21:08:54 +00:00
|
|
|
// ... allow custom mechanism to finish and cleanup the upload
|
2019-09-15 11:43:59 +00:00
|
|
|
if err := upload.FinishUpload(ctx); err != nil {
|
2019-08-24 13:14:51 +00:00
|
|
|
return err
|
2015-12-08 21:08:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ... send the info out to the channel
|
|
|
|
if handler.config.NotifyCompleteUploads {
|
2019-09-19 09:15:48 +00:00
|
|
|
handler.CompleteUploads <- newHookEvent(info, r)
|
2015-12-08 21:08:54 +00:00
|
|
|
}
|
2016-05-24 15:04:28 +00:00
|
|
|
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incUploadsFinished()
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2016-08-28 20:06:37 +00:00
|
|
|
return nil
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFile handles requests to download a file using a GET request. This is not
|
|
|
|
// part of the specification.
|
|
|
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
2019-09-29 18:03:18 +00:00
|
|
|
ctx := context.Background()
|
2019-09-15 11:43:59 +00:00
|
|
|
|
2015-12-07 20:37:34 +00:00
|
|
|
id, err := extractIDFromPath(r.URL.Path)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2016-02-21 22:25:35 +00:00
|
|
|
if handler.composer.UsesLocker {
|
2019-09-12 10:37:43 +00:00
|
|
|
lock, err := handler.lockUpload(id)
|
|
|
|
if err != nil {
|
2015-12-26 20:23:09 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
defer lock.Unlock()
|
2015-12-26 20:23:09 +00:00
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err := upload.GetInfo(ctx)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-10-13 16:38:43 +00:00
|
|
|
// Set headers before sending responses
|
|
|
|
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
|
2018-02-28 21:55:14 +00:00
|
|
|
|
|
|
|
contentType, contentDisposition := filterContentType(info)
|
|
|
|
w.Header().Set("Content-Type", contentType)
|
|
|
|
w.Header().Set("Content-Disposition", contentDisposition)
|
2016-10-13 16:38:43 +00:00
|
|
|
|
2018-02-19 17:28:42 +00:00
|
|
|
// If no data has been uploaded yet, respond with an empty "204 No Content" status.
|
2015-11-29 01:33:55 +00:00
|
|
|
if info.Offset == 0 {
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusNoContent)
|
2015-11-29 01:33:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
src, err := upload.GetReader(ctx)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusOK)
|
2015-11-29 01:33:55 +00:00
|
|
|
io.Copy(w, src)
|
|
|
|
|
|
|
|
// Try to close the reader if the io.Closer interface is implemented
|
|
|
|
if closer, ok := src.(io.Closer); ok {
|
|
|
|
closer.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-28 21:55:14 +00:00
|
|
|
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
|
|
|
// allowed to be rendered by browser inline, instead of being forced to be
|
|
|
|
// downloadd. For example, HTML or SVG files are not allowed, since they may
|
|
|
|
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
|
|
|
|
// as their parsers commonly contain vulnerabilities which can be exploited.
|
|
|
|
// The values of this map does not convei any meaning and are therefore just
|
|
|
|
// empty structs.
|
|
|
|
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
|
|
|
"text/plain": struct{}{},
|
|
|
|
|
|
|
|
"image/png": struct{}{},
|
|
|
|
"image/jpeg": struct{}{},
|
|
|
|
"image/gif": struct{}{},
|
|
|
|
"image/bmp": struct{}{},
|
|
|
|
"image/webp": struct{}{},
|
|
|
|
|
|
|
|
"audio/wave": struct{}{},
|
|
|
|
"audio/wav": struct{}{},
|
|
|
|
"audio/x-wav": struct{}{},
|
|
|
|
"audio/x-pn-wav": struct{}{},
|
|
|
|
"audio/webm": struct{}{},
|
|
|
|
"video/webm": struct{}{},
|
|
|
|
"audio/ogg": struct{}{},
|
|
|
|
"video/ogg ": struct{}{},
|
|
|
|
"application/ogg": struct{}{},
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterContentType returns the values for the Content-Type and
|
|
|
|
// Content-Disposition headers for a given upload. These values should be used
|
|
|
|
// in responses for GET requests to ensure that only non-malicious file types
|
|
|
|
// are shown directly in the browser. It will extract the file name and type
|
|
|
|
// from the "fileame" and "filetype".
|
|
|
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
|
|
|
|
func filterContentType(info FileInfo) (contentType string, contentDisposition string) {
|
|
|
|
filetype := info.MetaData["filetype"]
|
|
|
|
|
|
|
|
if reMimeType.MatchString(filetype) {
|
|
|
|
// If the filetype from metadata is well formed, we forward use this
|
|
|
|
// for the Content-Type header. However, only whitelisted mime types
|
|
|
|
// will be allowed to be shown inline in the browser
|
|
|
|
contentType = filetype
|
|
|
|
if _, isWhitelisted := mimeInlineBrowserWhitelist[filetype]; isWhitelisted {
|
|
|
|
contentDisposition = "inline"
|
|
|
|
} else {
|
|
|
|
contentDisposition = "attachment"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the filetype from the metadata is not well formed, we use a
|
|
|
|
// default type and force the browser to download the content.
|
|
|
|
contentType = "application/octet-stream"
|
|
|
|
contentDisposition = "attachment"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a filename to Content-Disposition if one is available in the metadata
|
|
|
|
if filename, ok := info.MetaData["filename"]; ok {
|
|
|
|
contentDisposition += ";filename=" + strconv.Quote(filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
return contentType, contentDisposition
|
|
|
|
}
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
// DelFile terminates an upload permanently.
|
|
|
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
2019-09-29 18:03:18 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), requestTimeoutDuration)
|
|
|
|
defer cancel()
|
2019-09-15 11:43:59 +00:00
|
|
|
|
2015-12-26 23:44:02 +00:00
|
|
|
// Abort the request handling if the required interface is not implemented
|
2016-02-21 22:25:35 +00:00
|
|
|
if !handler.composer.UsesTerminater {
|
2015-12-26 23:44:02 +00:00
|
|
|
handler.sendError(w, r, ErrNotImplemented)
|
2015-12-07 20:37:34 +00:00
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2015-12-07 20:37:34 +00:00
|
|
|
id, err := extractIDFromPath(r.URL.Path)
|
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
2015-11-29 01:33:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-02-21 22:25:35 +00:00
|
|
|
if handler.composer.UsesLocker {
|
2019-09-12 10:37:43 +00:00
|
|
|
lock, err := handler.lockUpload(id)
|
|
|
|
if err != nil {
|
2015-12-26 20:23:09 +00:00
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
defer lock.Unlock()
|
2015-12-26 20:23:09 +00:00
|
|
|
}
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-03-12 21:24:57 +00:00
|
|
|
var info FileInfo
|
|
|
|
if handler.config.NotifyTerminatedUploads {
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err = upload.GetInfo(ctx)
|
2016-03-12 21:24:57 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:48 +00:00
|
|
|
err = handler.terminateUpload(ctx, upload, info, r)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
handler.sendError(w, r, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
handler.sendResp(w, r, http.StatusNoContent)
|
2019-05-26 19:56:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// terminateUpload passes a given upload to the DataStore's Terminater,
|
|
|
|
// send the corresponding upload info on the TerminatedUploads channnel
|
|
|
|
// and updates the statistics.
|
|
|
|
// Note the the info argument is only needed if the terminated uploads
|
|
|
|
// notifications are enabled.
|
2019-09-19 09:15:48 +00:00
|
|
|
func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
2019-08-24 13:14:51 +00:00
|
|
|
terminatableUpload := handler.composer.Terminater.AsTerminatableUpload(upload)
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
err := terminatableUpload.Terminate(ctx)
|
2019-05-26 19:56:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-03-12 21:24:57 +00:00
|
|
|
|
|
|
|
if handler.config.NotifyTerminatedUploads {
|
2019-09-19 09:15:48 +00:00
|
|
|
handler.TerminatedUploads <- newHookEvent(info, r)
|
2016-03-12 21:24:57 +00:00
|
|
|
}
|
2016-05-24 15:04:28 +00:00
|
|
|
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incUploadsTerminated()
|
2019-05-26 19:56:51 +00:00
|
|
|
|
|
|
|
return nil
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send the error in the response body. The status code will be looked up in
|
|
|
|
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
|
|
|
func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request, err error) {
|
|
|
|
// Interpret os.ErrNotExist as 404 Not Found
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = ErrNotFound
|
|
|
|
}
|
|
|
|
|
2017-03-01 18:43:37 +00:00
|
|
|
// Errors for read timeouts contain too much information which is not
|
|
|
|
// necessary for us and makes grouping for the metrics harder. The error
|
|
|
|
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
|
|
|
|
// Therefore, we use a common error message for all of them.
|
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
err = errors.New("read tcp: i/o timeout")
|
|
|
|
}
|
|
|
|
|
2019-07-21 20:40:41 +00:00
|
|
|
// Errors for connnection resets also contain TCP details, we don't need, e.g:
|
|
|
|
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
|
|
|
|
// Therefore, we also trim those down.
|
|
|
|
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
|
|
|
|
err = errors.New("read tcp: connection reset by peer")
|
|
|
|
}
|
|
|
|
|
2017-02-28 19:39:25 +00:00
|
|
|
statusErr, ok := err.(HTTPError)
|
|
|
|
if !ok {
|
|
|
|
statusErr = NewHTTPError(err, http.StatusInternalServerError)
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2019-02-12 21:45:08 +00:00
|
|
|
reason := append(statusErr.Body(), '\n')
|
2015-11-29 01:33:55 +00:00
|
|
|
if r.Method == "HEAD" {
|
2019-02-12 21:45:08 +00:00
|
|
|
reason = nil
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
w.Header().Set("Content-Length", strconv.Itoa(len(reason)))
|
2017-02-28 19:39:25 +00:00
|
|
|
w.WriteHeader(statusErr.StatusCode())
|
2019-02-12 21:45:08 +00:00
|
|
|
w.Write(reason)
|
2016-05-24 15:04:28 +00:00
|
|
|
|
2017-02-28 19:39:25 +00:00
|
|
|
handler.log("ResponseOutgoing", "status", strconv.Itoa(statusErr.StatusCode()), "method", r.Method, "path", r.URL.Path, "error", err.Error())
|
2016-09-23 19:21:38 +00:00
|
|
|
|
2019-03-21 19:04:01 +00:00
|
|
|
handler.Metrics.incErrorsTotal(statusErr)
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2016-09-23 19:21:38 +00:00
|
|
|
// sendResp writes the header to w with the specified status code.
|
|
|
|
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
|
|
|
|
w.WriteHeader(status)
|
|
|
|
|
|
|
|
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path)
|
|
|
|
}
|
|
|
|
|
2015-11-29 01:33:55 +00:00
|
|
|
// Make an absolute URLs to the given upload id. If the base path is absolute
|
|
|
|
// it will be prepended else the host and protocol from the request is used.
|
|
|
|
func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string {
|
|
|
|
if handler.isBasePathAbs {
|
|
|
|
return handler.basePath + id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read origin and protocol from request
|
2016-01-16 14:27:35 +00:00
|
|
|
host, proto := getHostAndProtocol(r, handler.config.RespectForwardedHeaders)
|
|
|
|
|
|
|
|
url := proto + "://" + host + handler.basePath + id
|
|
|
|
|
|
|
|
return url
|
|
|
|
}
|
|
|
|
|
2017-01-19 20:02:48 +00:00
|
|
|
type progressWriter struct {
|
|
|
|
Offset int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *progressWriter) Write(b []byte) (int, error) {
|
|
|
|
atomic.AddInt64(&w.Offset, int64(len(b)))
|
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
2017-02-21 22:17:07 +00:00
|
|
|
// sendProgressMessage will send a notification over the UploadProgress channel
|
|
|
|
// every second, indicating how much data has been transfered to the server.
|
|
|
|
// It will stop sending these instances once the returned channel has been
|
|
|
|
// closed. The returned reader should be used to read the request body.
|
2019-09-19 09:15:48 +00:00
|
|
|
func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader io.Reader) (io.Reader, chan<- struct{}) {
|
2019-05-15 21:57:20 +00:00
|
|
|
previousOffset := int64(0)
|
2017-01-19 20:02:48 +00:00
|
|
|
progress := &progressWriter{
|
2019-09-19 09:15:48 +00:00
|
|
|
Offset: hook.Upload.Offset,
|
2017-01-19 20:02:48 +00:00
|
|
|
}
|
|
|
|
stop := make(chan struct{}, 1)
|
|
|
|
reader = io.TeeReader(reader, progress)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stop:
|
2019-09-19 09:15:48 +00:00
|
|
|
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
|
|
|
if hook.Upload.Offset != previousOffset {
|
|
|
|
handler.UploadProgress <- hook
|
|
|
|
previousOffset = hook.Upload.Offset
|
2019-05-15 21:57:20 +00:00
|
|
|
}
|
2017-01-19 20:02:48 +00:00
|
|
|
return
|
|
|
|
case <-time.After(1 * time.Second):
|
2019-09-19 09:15:48 +00:00
|
|
|
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
|
|
|
if hook.Upload.Offset != previousOffset {
|
|
|
|
handler.UploadProgress <- hook
|
|
|
|
previousOffset = hook.Upload.Offset
|
2019-05-15 21:57:20 +00:00
|
|
|
}
|
2017-01-19 20:02:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return reader, stop
|
|
|
|
}
|
|
|
|
|
2016-01-16 14:27:35 +00:00
|
|
|
// getHostAndProtocol extracts the host and used protocol (either HTTP or HTTPS)
|
|
|
|
// from the given request. If `allowForwarded` is set, the X-Forwarded-Host,
|
|
|
|
// X-Forwarded-Proto and Forwarded headers will also be checked to
|
|
|
|
// support proxies.
|
|
|
|
func getHostAndProtocol(r *http.Request, allowForwarded bool) (host, proto string) {
|
2015-11-29 01:33:55 +00:00
|
|
|
if r.TLS != nil {
|
2016-01-16 14:27:35 +00:00
|
|
|
proto = "https"
|
|
|
|
} else {
|
|
|
|
proto = "http"
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2016-01-16 14:27:35 +00:00
|
|
|
host = r.Host
|
2015-11-29 01:33:55 +00:00
|
|
|
|
2016-01-16 14:27:35 +00:00
|
|
|
if !allowForwarded {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if h := r.Header.Get("X-Forwarded-Host"); h != "" {
|
|
|
|
host = h
|
|
|
|
}
|
|
|
|
|
|
|
|
if h := r.Header.Get("X-Forwarded-Proto"); h == "http" || h == "https" {
|
|
|
|
proto = h
|
|
|
|
}
|
|
|
|
|
|
|
|
if h := r.Header.Get("Forwarded"); h != "" {
|
|
|
|
if r := reForwardedHost.FindStringSubmatch(h); len(r) == 2 {
|
|
|
|
host = r[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
if r := reForwardedProto.FindStringSubmatch(h); len(r) == 2 {
|
|
|
|
proto = r[1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The get sum of all sizes for a list of upload ids while checking whether
|
|
|
|
// all of these uploads are finished yet. This is used to calculate the size
|
|
|
|
// of a final resource.
|
2019-09-19 10:14:25 +00:00
|
|
|
func (handler *UnroutedHandler) sizeOfUploads(ctx context.Context, ids []string) (partialUploads []Upload, size int64, err error) {
|
|
|
|
partialUploads = make([]Upload, len(ids))
|
|
|
|
|
|
|
|
for i, id := range ids {
|
2019-09-15 11:43:59 +00:00
|
|
|
upload, err := handler.composer.Core.GetUpload(ctx, id)
|
2019-08-24 13:14:51 +00:00
|
|
|
if err != nil {
|
2019-09-19 10:14:25 +00:00
|
|
|
return nil, 0, err
|
2019-08-24 13:14:51 +00:00
|
|
|
}
|
|
|
|
|
2019-09-15 11:43:59 +00:00
|
|
|
info, err := upload.GetInfo(ctx)
|
2015-11-29 01:33:55 +00:00
|
|
|
if err != nil {
|
2019-09-19 10:14:25 +00:00
|
|
|
return nil, 0, err
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
2018-06-03 17:07:07 +00:00
|
|
|
if info.SizeIsDeferred || info.Offset != info.Size {
|
2015-11-29 01:33:55 +00:00
|
|
|
err = ErrUploadNotFinished
|
2019-09-19 10:14:25 +00:00
|
|
|
return nil, 0, err
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size += info.Size
|
2019-09-19 10:14:25 +00:00
|
|
|
partialUploads[i] = upload
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-23 21:10:23 +00:00
|
|
|
// Verify that the Upload-Length and Upload-Defer-Length headers are acceptable for creating a
|
|
|
|
// new upload
|
2018-05-05 19:20:26 +00:00
|
|
|
func (handler *UnroutedHandler) validateNewUploadLengthHeaders(uploadLengthHeader string, uploadDeferLengthHeader string) (uploadLength int64, uploadLengthDeferred bool, err error) {
|
2018-04-23 21:10:23 +00:00
|
|
|
haveBothLengthHeaders := uploadLengthHeader != "" && uploadDeferLengthHeader != ""
|
|
|
|
haveInvalidDeferHeader := uploadDeferLengthHeader != "" && uploadDeferLengthHeader != UploadLengthDeferred
|
|
|
|
lengthIsDeferred := uploadDeferLengthHeader == UploadLengthDeferred
|
|
|
|
|
2018-05-05 19:20:26 +00:00
|
|
|
if lengthIsDeferred && !handler.composer.UsesLengthDeferrer {
|
|
|
|
err = ErrNotImplemented
|
|
|
|
} else if haveBothLengthHeaders {
|
2018-04-23 21:10:23 +00:00
|
|
|
err = ErrUploadLengthAndUploadDeferLength
|
|
|
|
} else if haveInvalidDeferHeader {
|
|
|
|
err = ErrInvalidUploadDeferLength
|
|
|
|
} else if lengthIsDeferred {
|
|
|
|
uploadLengthDeferred = true
|
|
|
|
} else {
|
|
|
|
uploadLength, err = strconv.ParseInt(uploadLengthHeader, 10, 64)
|
|
|
|
if err != nil || uploadLength < 0 {
|
|
|
|
err = ErrInvalidUploadLength
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-12 10:37:43 +00:00
|
|
|
// lockUpload creates a new lock for the given upload ID and attempts to lock it.
|
|
|
|
// The created lock is returned if it was aquired successfully.
|
|
|
|
func (handler *UnroutedHandler) lockUpload(id string) (Lock, error) {
|
|
|
|
lock, err := handler.composer.Locker.NewLock(id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := lock.Lock(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return lock, nil
|
|
|
|
}
|
|
|
|
|
2018-05-22 16:46:18 +00:00
|
|
|
// ParseMetadataHeader parses the Upload-Metadata header as defined in the
|
|
|
|
// File Creation extension.
|
2015-11-29 01:33:55 +00:00
|
|
|
// e.g. Upload-Metadata: name bHVucmpzLnBuZw==,type aW1hZ2UvcG5n
|
2018-05-22 16:46:18 +00:00
|
|
|
func ParseMetadataHeader(header string) map[string]string {
|
2015-11-29 01:33:55 +00:00
|
|
|
meta := make(map[string]string)
|
|
|
|
|
|
|
|
for _, element := range strings.Split(header, ",") {
|
|
|
|
element := strings.TrimSpace(element)
|
|
|
|
|
|
|
|
parts := strings.Split(element, " ")
|
|
|
|
|
|
|
|
// Do not continue with this element if no key and value or presented
|
|
|
|
if len(parts) != 2 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore corrent element if the value is no valid base64
|
|
|
|
key := parts[0]
|
|
|
|
value, err := base64.StdEncoding.DecodeString(parts[1])
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
meta[key] = string(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return meta
|
|
|
|
}
|
|
|
|
|
2018-05-22 16:46:18 +00:00
|
|
|
// SerializeMetadataHeader serializes a map of strings into the Upload-Metadata
|
|
|
|
// header format used in the response for HEAD requests.
|
2015-11-29 01:33:55 +00:00
|
|
|
// e.g. Upload-Metadata: name bHVucmpzLnBuZw==,type aW1hZ2UvcG5n
|
2018-05-22 16:46:18 +00:00
|
|
|
func SerializeMetadataHeader(meta map[string]string) string {
|
2015-11-29 01:33:55 +00:00
|
|
|
header := ""
|
|
|
|
for key, value := range meta {
|
|
|
|
valueBase64 := base64.StdEncoding.EncodeToString([]byte(value))
|
|
|
|
header += key + " " + valueBase64 + ","
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove trailing comma
|
|
|
|
if len(header) > 0 {
|
|
|
|
header = header[:len(header)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return header
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the Upload-Concat header, e.g.
|
|
|
|
// Upload-Concat: partial
|
2017-01-31 15:58:31 +00:00
|
|
|
// Upload-Concat: final;http://tus.io/files/a /files/b/
|
2015-11-29 01:33:55 +00:00
|
|
|
func parseConcat(header string) (isPartial bool, isFinal bool, partialUploads []string, err error) {
|
|
|
|
if len(header) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if header == "partial" {
|
|
|
|
isPartial = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-01-31 15:58:31 +00:00
|
|
|
l := len("final;")
|
|
|
|
if strings.HasPrefix(header, "final;") && len(header) > l {
|
2015-11-29 01:33:55 +00:00
|
|
|
isFinal = true
|
|
|
|
|
|
|
|
list := strings.Split(header[l:], " ")
|
|
|
|
for _, value := range list {
|
|
|
|
value := strings.TrimSpace(value)
|
|
|
|
if value == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-12-07 20:37:34 +00:00
|
|
|
id, extractErr := extractIDFromPath(value)
|
|
|
|
if extractErr != nil {
|
|
|
|
err = extractErr
|
2015-11-29 01:33:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
partialUploads = append(partialUploads, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no valid partial upload ids are extracted this is not a final upload.
|
|
|
|
if len(partialUploads) == 0 {
|
|
|
|
isFinal = false
|
|
|
|
err = ErrInvalidConcat
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// extractIDFromPath pulls the last segment from the url provided
|
2015-12-07 20:37:34 +00:00
|
|
|
func extractIDFromPath(url string) (string, error) {
|
2015-11-29 01:33:55 +00:00
|
|
|
result := reExtractFileID.FindStringSubmatch(url)
|
|
|
|
if len(result) != 2 {
|
2015-12-07 20:37:34 +00:00
|
|
|
return "", ErrNotFound
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
2015-12-07 20:37:34 +00:00
|
|
|
return result[1], nil
|
2015-11-29 01:33:55 +00:00
|
|
|
}
|
2016-09-23 19:21:38 +00:00
|
|
|
|
|
|
|
func i64toa(num int64) string {
|
|
|
|
return strconv.FormatInt(num, 10)
|
|
|
|
}
|