Store file size in meta info, avoid Truncate()
Gives radically performance improvements for initial POST request.
This commit is contained in:
parent
e652e3f237
commit
e17edd2f71
|
@ -25,11 +25,7 @@ func (s *DataStore) CreateFile(id string, size int64, contentType string) error
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
if err := file.Truncate(size); err != nil {
|
entry := logEntry{Meta: &metaEntry{Size: size, ContentType: contentType}}
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := logEntry{Meta: &metaEntry{ContentType: contentType}}
|
|
||||||
return s.appendFileLog(id, entry)
|
return s.appendFileLog(id, entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +53,7 @@ func (s *DataStore) WriteFileChunk(id string, start int64, end int64, src io.Rea
|
||||||
return s.appendFileLog(id, entry)
|
return s.appendFileLog(id, entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DataStore) GetFileChunks(id string) (chunkSet, error) {
|
func (s *DataStore) GetFileMeta(id string) (*fileMeta, error) {
|
||||||
// @TODO stream the file / limit log file size?
|
// @TODO stream the file / limit log file size?
|
||||||
data, err := ioutil.ReadFile(s.logPath(id))
|
data, err := ioutil.ReadFile(s.logPath(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -67,7 +63,10 @@ func (s *DataStore) GetFileChunks(id string) (chunkSet, error) {
|
||||||
// last line is always empty, lets skip it
|
// last line is always empty, lets skip it
|
||||||
lines = lines[:len(lines)-1]
|
lines = lines[:len(lines)-1]
|
||||||
|
|
||||||
chunks := make(chunkSet, 0, len(lines))
|
meta := &fileMeta{
|
||||||
|
Chunks: make(chunkSet, 0, len(lines)),
|
||||||
|
}
|
||||||
|
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
entry := logEntry{}
|
entry := logEntry{}
|
||||||
if err := json.Unmarshal([]byte(line), &entry); err != nil {
|
if err := json.Unmarshal([]byte(line), &entry); err != nil {
|
||||||
|
@ -75,25 +74,25 @@ func (s *DataStore) GetFileChunks(id string) (chunkSet, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.Chunk != nil {
|
if entry.Chunk != nil {
|
||||||
chunks.Add(chunk{Start: entry.Chunk.Start, End: entry.Chunk.End})
|
meta.Chunks.Add(chunk{Start: entry.Chunk.Start, End: entry.Chunk.End})
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Meta != nil {
|
||||||
|
meta.ContentType = entry.Meta.ContentType
|
||||||
|
meta.Size = entry.Meta.Size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return chunks, nil
|
return meta, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DataStore) ReadFile(id string) (io.ReadCloser, int64, error) {
|
func (s *DataStore) ReadFile(id string) (io.ReadCloser, error) {
|
||||||
file, err := os.Open(s.filePath(id))
|
file, err := os.Open(s.filePath(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := file.Stat()
|
return file, nil
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return file, stat.Size(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DataStore) appendFileLog(id string, entry interface{}) error {
|
func (s *DataStore) appendFileLog(id string, entry interface{}) error {
|
||||||
|
@ -123,10 +122,21 @@ func (s *DataStore) logPath(id string) string {
|
||||||
return s.filePath(id) + ".log"
|
return s.filePath(id) + ".log"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fileMeta struct {
|
||||||
|
ContentType string
|
||||||
|
Size int64
|
||||||
|
Chunks chunkSet
|
||||||
|
}
|
||||||
|
|
||||||
type logEntry struct {
|
type logEntry struct {
|
||||||
Chunk *chunkEntry `json:",omitempty"`
|
Chunk *chunkEntry `json:",omitempty"`
|
||||||
Meta *metaEntry `json:",omitempty"`
|
Meta *metaEntry `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type chunkEntry struct{ Start, End int64 }
|
type chunkEntry struct {
|
||||||
type metaEntry struct{ ContentType string }
|
Start, End int64
|
||||||
|
}
|
||||||
|
type metaEntry struct {
|
||||||
|
Size int64
|
||||||
|
ContentType string
|
||||||
|
}
|
||||||
|
|
|
@ -125,7 +125,14 @@ func headFile(w http.ResponseWriter, r *http.Request, fileId string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFile(w http.ResponseWriter, r *http.Request, fileId string) {
|
func getFile(w http.ResponseWriter, r *http.Request, fileId string) {
|
||||||
data, size, err := dataStore.ReadFile(fileId)
|
meta, err := dataStore.GetFileMeta(fileId)
|
||||||
|
if err != nil {
|
||||||
|
// @TODO: Could be a 404 as well
|
||||||
|
reply(w, http.StatusInternalServerError, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := dataStore.ReadFile(fileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// @TODO: Could be a 404 as well
|
// @TODO: Could be a 404 as well
|
||||||
reply(w, http.StatusInternalServerError, err.Error())
|
reply(w, http.StatusInternalServerError, err.Error())
|
||||||
|
@ -134,9 +141,10 @@ func getFile(w http.ResponseWriter, r *http.Request, fileId string) {
|
||||||
defer data.Close()
|
defer data.Close()
|
||||||
|
|
||||||
setFileRangeHeader(w, fileId)
|
setFileRangeHeader(w, fileId)
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
|
w.Header().Set("Content-Type", meta.ContentType)
|
||||||
|
w.Header().Set("Content-Length", strconv.FormatInt(meta.Size, 10))
|
||||||
|
|
||||||
if _, err := io.CopyN(w, data, size); err != nil {
|
if _, err := io.CopyN(w, data, meta.Size); err != nil {
|
||||||
log.Printf("getFile: CopyN failed with: %s", err.Error())
|
log.Printf("getFile: CopyN failed with: %s", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -179,16 +187,16 @@ func putFile(w http.ResponseWriter, r *http.Request, fileId string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setFileRangeHeader(w http.ResponseWriter, fileId string) {
|
func setFileRangeHeader(w http.ResponseWriter, fileId string) {
|
||||||
chunks, err := dataStore.GetFileChunks(fileId)
|
meta, err := dataStore.GetFileMeta(fileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reply(w, http.StatusInternalServerError, err.Error())
|
reply(w, http.StatusInternalServerError, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rangeHeader := ""
|
rangeHeader := ""
|
||||||
for i, chunk := range chunks {
|
for i, chunk := range meta.Chunks {
|
||||||
rangeHeader += fmt.Sprintf("%d-%d", chunk.Start, chunk.End)
|
rangeHeader += fmt.Sprintf("%d-%d", chunk.Start, chunk.End)
|
||||||
if i+1 < len(chunks) {
|
if i+1 < len(meta.Chunks) {
|
||||||
rangeHeader += ","
|
rangeHeader += ","
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue