feat: cache resized images
parent
f2f914221c
commit
95bc92955f
18
cmd/root.go
18
cmd/root.go
|
@ -13,16 +13,17 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/filebrowser/filebrowser/v2/img"
|
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/spf13/afero"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
v "github.com/spf13/viper"
|
v "github.com/spf13/viper"
|
||||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||||
|
|
||||||
"github.com/filebrowser/filebrowser/v2/auth"
|
"github.com/filebrowser/filebrowser/v2/auth"
|
||||||
|
"github.com/filebrowser/filebrowser/v2/diskcache"
|
||||||
fbhttp "github.com/filebrowser/filebrowser/v2/http"
|
fbhttp "github.com/filebrowser/filebrowser/v2/http"
|
||||||
|
"github.com/filebrowser/filebrowser/v2/img"
|
||||||
"github.com/filebrowser/filebrowser/v2/settings"
|
"github.com/filebrowser/filebrowser/v2/settings"
|
||||||
"github.com/filebrowser/filebrowser/v2/storage"
|
"github.com/filebrowser/filebrowser/v2/storage"
|
||||||
"github.com/filebrowser/filebrowser/v2/users"
|
"github.com/filebrowser/filebrowser/v2/users"
|
||||||
|
@ -58,6 +59,7 @@ func addServerFlags(flags *pflag.FlagSet) {
|
||||||
flags.StringP("root", "r", ".", "root to prepend to relative paths")
|
flags.StringP("root", "r", ".", "root to prepend to relative paths")
|
||||||
flags.String("socket", "", "socket to listen to (cannot be used with address, port, cert nor key flags)")
|
flags.String("socket", "", "socket to listen to (cannot be used with address, port, cert nor key flags)")
|
||||||
flags.StringP("baseurl", "b", "", "base url")
|
flags.StringP("baseurl", "b", "", "base url")
|
||||||
|
flags.String("cache-dir", "", "file cache directory (disabled if empty)")
|
||||||
flags.Int("img-processors", 4, "image processors count")
|
flags.Int("img-processors", 4, "image processors count")
|
||||||
flags.Bool("disable-thumbnails", false, "disable image thumbnails")
|
flags.Bool("disable-thumbnails", false, "disable image thumbnails")
|
||||||
flags.Bool("disable-preview-resize", false, "disable resize of image previews")
|
flags.Bool("disable-preview-resize", false, "disable resize of image previews")
|
||||||
|
@ -116,6 +118,16 @@ user created with the credentials from options "username" and "password".`,
|
||||||
}
|
}
|
||||||
imgSvc := img.New(workersCount)
|
imgSvc := img.New(workersCount)
|
||||||
|
|
||||||
|
var fileCache diskcache.Interface = diskcache.NewNoOp()
|
||||||
|
cacheDir, err := cmd.Flags().GetString("cache-dir")
|
||||||
|
checkErr(err)
|
||||||
|
if cacheDir != "" {
|
||||||
|
if err := os.MkdirAll(cacheDir, 0700); err != nil { //nolint:govet
|
||||||
|
log.Fatalf("can't make directory %s: %s", cacheDir, err)
|
||||||
|
}
|
||||||
|
fileCache = diskcache.New(afero.NewOsFs(), cacheDir)
|
||||||
|
}
|
||||||
|
|
||||||
server := getRunParams(cmd.Flags(), d.store)
|
server := getRunParams(cmd.Flags(), d.store)
|
||||||
setupLog(server.Log)
|
setupLog(server.Log)
|
||||||
|
|
||||||
|
@ -145,7 +157,7 @@ user created with the credentials from options "username" and "password".`,
|
||||||
signal.Notify(sigc, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(sigc, os.Interrupt, syscall.SIGTERM)
|
||||||
go cleanupHandler(listener, sigc)
|
go cleanupHandler(listener, sigc)
|
||||||
|
|
||||||
handler, err := fbhttp.NewHandler(imgSvc, d.store, server)
|
handler, err := fbhttp.NewHandler(imgSvc, fileCache, d.store, server)
|
||||||
checkErr(err)
|
checkErr(err)
|
||||||
|
|
||||||
defer listener.Close()
|
defer listener.Close()
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
package diskcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Interface interface {
|
||||||
|
Store(ctx context.Context, key string, value []byte) error
|
||||||
|
Load(ctx context.Context, key string) (value []byte, exist bool, err error)
|
||||||
|
Delete(ctx context.Context, key string) error
|
||||||
|
}
|
|
@ -0,0 +1,110 @@
|
||||||
|
package diskcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha1" //nolint:gosec
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FileCache struct {
|
||||||
|
fs afero.Fs
|
||||||
|
|
||||||
|
// granular locks
|
||||||
|
scopedLocks struct {
|
||||||
|
sync.Mutex
|
||||||
|
sync.Once
|
||||||
|
locks map[string]sync.Locker
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(fs afero.Fs, root string) *FileCache {
|
||||||
|
return &FileCache{
|
||||||
|
fs: afero.NewBasePathFs(fs, root),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileCache) Store(ctx context.Context, key string, value []byte) error {
|
||||||
|
mu := f.getScopedLocks(key)
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
fileName := f.getFileName(key)
|
||||||
|
if err := f.fs.MkdirAll(filepath.Dir(fileName), 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := afero.WriteFile(f.fs, fileName, value, 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileCache) Load(ctx context.Context, key string) (value []byte, exist bool, err error) {
|
||||||
|
r, ok, err := f.open(key)
|
||||||
|
if err != nil || !ok {
|
||||||
|
return nil, ok, err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
value, err = ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return value, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileCache) Delete(ctx context.Context, key string) error {
|
||||||
|
mu := f.getScopedLocks(key)
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
fileName := f.getFileName(key)
|
||||||
|
if err := f.fs.Remove(fileName); err != nil && err != os.ErrNotExist {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileCache) open(key string) (afero.File, bool, error) {
|
||||||
|
fileName := f.getFileName(key)
|
||||||
|
file, err := f.fs.Open(fileName)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getScopedLocks pull lock from the map if found or create a new one
|
||||||
|
func (f *FileCache) getScopedLocks(key string) (lock sync.Locker) {
|
||||||
|
f.scopedLocks.Do(func() { f.scopedLocks.locks = map[string]sync.Locker{} })
|
||||||
|
|
||||||
|
f.scopedLocks.Lock()
|
||||||
|
lock, ok := f.scopedLocks.locks[key]
|
||||||
|
if !ok {
|
||||||
|
lock = &sync.Mutex{}
|
||||||
|
f.scopedLocks.locks[key] = lock
|
||||||
|
}
|
||||||
|
f.scopedLocks.Unlock()
|
||||||
|
|
||||||
|
return lock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileCache) getFileName(key string) string {
|
||||||
|
hasher := sha1.New() //nolint:gosec
|
||||||
|
_, _ = hasher.Write([]byte(key))
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
return fmt.Sprintf("%s/%s/%s", hash[:1], hash[1:3], hash)
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
package diskcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileCache(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
const (
|
||||||
|
key = "key"
|
||||||
|
value = "some text"
|
||||||
|
newValue = "new text"
|
||||||
|
cacheRoot = "/cache"
|
||||||
|
cachedFilePath = "a/62/a62f2225bf70bfaccbc7f1ef2a397836717377de"
|
||||||
|
)
|
||||||
|
|
||||||
|
fs := afero.NewMemMapFs()
|
||||||
|
cache := New(fs, "/cache")
|
||||||
|
|
||||||
|
// store new key
|
||||||
|
err := cache.Store(ctx, key, []byte(value))
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkValue(t, ctx, fs, filepath.Join(cacheRoot, cachedFilePath), cache, key, value)
|
||||||
|
|
||||||
|
// update existing key
|
||||||
|
err = cache.Store(ctx, key, []byte(newValue))
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkValue(t, ctx, fs, filepath.Join(cacheRoot, cachedFilePath), cache, key, newValue)
|
||||||
|
|
||||||
|
// delete key
|
||||||
|
err = cache.Delete(ctx, key)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exists, err := afero.Exists(fs, filepath.Join(cacheRoot, cachedFilePath))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, exists)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkValue(t *testing.T, ctx context.Context, fs afero.Fs, fileFullPath string, cache *FileCache, key, wantValue string) { //nolint:golint
|
||||||
|
t.Helper()
|
||||||
|
// check actual file content
|
||||||
|
b, err := afero.ReadFile(fs, fileFullPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, wantValue, string(b))
|
||||||
|
|
||||||
|
// check cache content
|
||||||
|
b, ok, err := cache.Load(ctx, key)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, wantValue, string(b))
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package diskcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NoOp struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNoOp() *NoOp {
|
||||||
|
return &NoOp{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NoOp) Store(ctx context.Context, key string, value []byte) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NoOp) Load(ctx context.Context, key string) (value []byte, exist bool, err error) {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NoOp) Delete(ctx context.Context, key string) error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -14,7 +14,7 @@ type modifyRequest struct {
|
||||||
Which []string `json:"which"` // Answer to: which fields?
|
Which []string `json:"which"` // Answer to: which fields?
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandler(imgSvc ImgService, store *storage.Storage, server *settings.Server) (http.Handler, error) {
|
func NewHandler(imgSvc ImgService, fileCache FileCache, store *storage.Storage, server *settings.Server) (http.Handler, error) {
|
||||||
server.Clean()
|
server.Clean()
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
|
@ -60,7 +60,7 @@ func NewHandler(imgSvc ImgService, store *storage.Storage, server *settings.Serv
|
||||||
|
|
||||||
api.PathPrefix("/raw").Handler(monkey(rawHandler, "/api/raw")).Methods("GET")
|
api.PathPrefix("/raw").Handler(monkey(rawHandler, "/api/raw")).Methods("GET")
|
||||||
api.PathPrefix("/preview/{size}/{path:.*}").
|
api.PathPrefix("/preview/{size}/{path:.*}").
|
||||||
Handler(monkey(previewHandler(imgSvc, server.EnableThumbnails, server.ResizePreview), "/api/preview")).Methods("GET")
|
Handler(monkey(previewHandler(imgSvc, fileCache, server.EnableThumbnails, server.ResizePreview), "/api/preview")).Methods("GET")
|
||||||
api.PathPrefix("/command").Handler(monkey(commandsHandler, "/api/command")).Methods("GET")
|
api.PathPrefix("/command").Handler(monkey(commandsHandler, "/api/command")).Methods("GET")
|
||||||
api.PathPrefix("/search").Handler(monkey(searchHandler, "/api/search")).Methods("GET")
|
api.PathPrefix("/search").Handler(monkey(searchHandler, "/api/search")).Methods("GET")
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -23,7 +23,12 @@ type ImgService interface {
|
||||||
Resize(ctx context.Context, in io.Reader, width, height int, out io.Writer, options ...img.Option) error
|
Resize(ctx context.Context, in io.Reader, width, height int, out io.Writer, options ...img.Option) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func previewHandler(imgSvc ImgService, enableThumbnails, resizePreview bool) handleFunc {
|
type FileCache interface {
|
||||||
|
Store(ctx context.Context, key string, value []byte) error
|
||||||
|
Load(ctx context.Context, key string) ([]byte, bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func previewHandler(imgSvc ImgService, fileCache FileCache, enableThumbnails, resizePreview bool) handleFunc {
|
||||||
return withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
return withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
||||||
if !d.user.Perm.Download {
|
if !d.user.Perm.Download {
|
||||||
return http.StatusAccepted, nil
|
return http.StatusAccepted, nil
|
||||||
|
@ -49,14 +54,14 @@ func previewHandler(imgSvc ImgService, enableThumbnails, resizePreview bool) han
|
||||||
|
|
||||||
switch file.Type {
|
switch file.Type {
|
||||||
case "image":
|
case "image":
|
||||||
return handleImagePreview(imgSvc, w, r, file, size, enableThumbnails, resizePreview)
|
return handleImagePreview(w, r, imgSvc, fileCache, file, size, enableThumbnails, resizePreview)
|
||||||
default:
|
default:
|
||||||
return http.StatusNotImplemented, fmt.Errorf("can't create preview for %s type", file.Type)
|
return http.StatusNotImplemented, fmt.Errorf("can't create preview for %s type", file.Type)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleImagePreview(imgSvc ImgService, w http.ResponseWriter, r *http.Request,
|
func handleImagePreview(w http.ResponseWriter, r *http.Request, imgSvc ImgService, fileCache FileCache,
|
||||||
file *files.FileInfo, size string, enableThumbnails, resizePreview bool) (int, error) {
|
file *files.FileInfo, size string, enableThumbnails, resizePreview bool) (int, error) {
|
||||||
format, err := imgSvc.FormatFromExtension(file.Extension)
|
format, err := imgSvc.FormatFromExtension(file.Extension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -67,6 +72,16 @@ func handleImagePreview(imgSvc ImgService, w http.ResponseWriter, r *http.Reques
|
||||||
return errToStatus(err), err
|
return errToStatus(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheKey := file.Path + size
|
||||||
|
cachedFile, ok, err := fileCache.Load(r.Context(), cacheKey)
|
||||||
|
if err != nil {
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
_, _ = w.Write(cachedFile)
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
fd, err := file.Fs.Open(file.Path)
|
fd, err := file.Fs.Open(file.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errToStatus(err), err
|
return errToStatus(err), err
|
||||||
|
@ -95,12 +110,18 @@ func handleImagePreview(imgSvc ImgService, w http.ResponseWriter, r *http.Reques
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := imgSvc.Resize(r.Context(), fd, width, height, w, options...); err != nil {
|
buf := &bytes.Buffer{}
|
||||||
switch {
|
if err := imgSvc.Resize(context.Background(), fd, width, height, buf, options...); err != nil {
|
||||||
case errors.Is(err, context.DeadlineExceeded), errors.Is(err, context.Canceled):
|
|
||||||
default:
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := fileCache.Store(context.Background(), cacheKey, buf.Bytes()); err != nil {
|
||||||
|
fmt.Printf("failed to cache resized image: %v", err)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, _ = w.Write(buf.Bytes())
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue