mirror of https://github.com/k3s-io/k3s
Merge pull request #50404 from apelisse/http-cache
Automatic merge from submit-queue (batch tested with PRs 51480, 49616, 50123, 50846, 50404) Kubectl to use http caching to cache openapi responses from the server **What this PR does / why we need it**: This PR is trying to address the problems raised in #50254 > * uses a disk-based cache that is not safe between processes (does not use atomic fs operations) > * writes get/list responses to disk that should not be cached (like kubectl get secrets) > * is vulnerable to partially written cache responses being used as responses to future requests > * breaks uses of the client transport that make use of websockets > * defaults to enabling the cache for any client builder using RecommendedConfigOverrideFlags or DefaultClientConfig which affects more components than just kubectl All of these points are addressed by this pull-request: 1. It now uses atomic fs operations 2. Doesn't cache by default, only if requested by the client (and it's only done by openapi client) 3. Fixed because of atomic fs operations 4. Found the reason for the bug: Cache wrapper couldn't be unwrapped. I implemented the `WrappedRoundTripper` interface. 5. Since 2. is fixed, I think that should be fine @smarterclayton @liggitt **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #50254 **Special notes for your reviewer**: **Release note**: ```release-note Allows kubectl to use http caching mechanism for the OpenAPI schema. The cache directory can be configured through `--cache-dir` command line flag to kubectl. If set to empty string, caching will be disabled. ```pull/6/head
commit
0f2c2bd847
|
@ -1720,6 +1720,14 @@
|
|||
"ImportPath": "github.com/gorilla/websocket",
|
||||
"Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Comment": "v1.1-4-g2500245",
|
||||
|
@ -2253,6 +2261,11 @@
|
|||
"Comment": "v0.3.5-10-g0049ab3",
|
||||
"Rev": "0049ab3dc4c4c70a9eee23087437b69c0dde2130"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Comment": "v2.0.1",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.7.0-13-ga221380",
|
||||
|
|
|
@ -60060,6 +60060,34 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/gregjones/httpcache licensed under: =
|
||||
|
||||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
= vendor/github.com/gregjones/httpcache/LICENSE.txt 3cfef421226b2dacde78a4871380ac24 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/gregjones/httpcache/diskcache licensed under: =
|
||||
|
||||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
= vendor/github.com/gregjones/httpcache/LICENSE.txt 3cfef421226b2dacde78a4871380ac24 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/grpc-ecosystem/go-grpc-prometheus licensed under: =
|
||||
|
||||
|
@ -72193,6 +72221,33 @@ SOFTWARE.
|
|||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/peterbourgon/diskv licensed under: =
|
||||
|
||||
Copyright (c) 2011-2012 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
= vendor/github.com/peterbourgon/diskv/LICENSE f9f3e815fc84aa04c4f4db33c553eef9 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/pkg/errors licensed under: =
|
||||
|
||||
|
|
|
@ -138,7 +138,6 @@ func NewCmdGet(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Comman
|
|||
usage := "identifying the resource to get from a server."
|
||||
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
|
||||
cmdutil.AddInclude3rdPartyFlags(cmd)
|
||||
cmdutil.AddOpenAPIFlags(cmd)
|
||||
cmd.Flags().StringVar(&options.Raw, "raw", options.Raw, "Raw URI to request from the server. Uses the transport specified by the kubeconfig file.")
|
||||
return cmd
|
||||
}
|
||||
|
@ -457,7 +456,7 @@ func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args [
|
|||
// if cmd does not specify output format and useOpenAPIPrintColumnFlagLabel flag is true,
|
||||
// then get the default output options for this mapping from OpenAPI schema.
|
||||
if !cmdSpecifiesOutputFmt(cmd) && useOpenAPIPrintColumns {
|
||||
outputOpts, _ = outputOptsForMappingFromOpenAPI(f, cmdutil.GetOpenAPICacheDir(cmd), mapping)
|
||||
outputOpts, _ = outputOptsForMappingFromOpenAPI(f, mapping)
|
||||
}
|
||||
|
||||
printer, err = f.PrinterForMapping(cmd, false, outputOpts, mapping, allNamespaces)
|
||||
|
@ -556,11 +555,11 @@ func cmdSpecifiesOutputFmt(cmd *cobra.Command) bool {
|
|||
|
||||
// outputOptsForMappingFromOpenAPI looks for the output format metatadata in the
|
||||
// openapi schema and returns the output options for the mapping if found.
|
||||
func outputOptsForMappingFromOpenAPI(f cmdutil.Factory, openAPIcacheDir string, mapping *meta.RESTMapping) (*printers.OutputOptions, bool) {
|
||||
func outputOptsForMappingFromOpenAPI(f cmdutil.Factory, mapping *meta.RESTMapping) (*printers.OutputOptions, bool) {
|
||||
|
||||
// user has not specified any output format, check if OpenAPI has
|
||||
// default specification to print this resource type
|
||||
api, err := f.OpenAPISchema(openAPIcacheDir)
|
||||
api, err := f.OpenAPISchema()
|
||||
if err != nil {
|
||||
// Error getting schema
|
||||
return nil, false
|
||||
|
|
|
@ -424,7 +424,7 @@ func (f *FakeFactory) SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDeclar
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *FakeFactory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
|
||||
func (f *FakeFactory) OpenAPISchema() (openapi.Resources, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -796,7 +796,7 @@ func (f *fakeAPIFactory) SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDec
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeAPIFactory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
|
||||
func (f *fakeAPIFactory) OpenAPISchema() (openapi.Resources, error) {
|
||||
if f.tf.OpenAPISchemaFunc != nil {
|
||||
return f.tf.OpenAPISchemaFunc()
|
||||
}
|
||||
|
|
|
@ -63,6 +63,10 @@ const (
|
|||
FlagMatchBinaryVersion = "match-server-version"
|
||||
)
|
||||
|
||||
var (
|
||||
FlagHTTPCacheDir = "cache-dir"
|
||||
)
|
||||
|
||||
// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
|
||||
// of resources and different API sets.
|
||||
// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations
|
||||
|
@ -82,13 +86,17 @@ type Factory interface {
|
|||
type DiscoveryClientFactory interface {
|
||||
// Returns a discovery client
|
||||
DiscoveryClient() (discovery.CachedDiscoveryInterface, error)
|
||||
|
||||
// BindFlags adds any discovery flags that are common to all kubectl sub commands.
|
||||
BindFlags(flags *pflag.FlagSet)
|
||||
}
|
||||
|
||||
// ClientAccessFactory holds the first level of factory methods.
|
||||
// Generally provides discovery, negotiation, and no-dep calls.
|
||||
// TODO The polymorphic calls probably deserve their own interface.
|
||||
type ClientAccessFactory interface {
|
||||
DiscoveryClientFactory
|
||||
// Returns a discovery client
|
||||
DiscoveryClient() (discovery.CachedDiscoveryInterface, error)
|
||||
|
||||
// ClientSet gives you back an internal, generated clientset
|
||||
ClientSet() (internalclientset.Interface, error)
|
||||
|
@ -229,7 +237,7 @@ type ObjectMappingFactory interface {
|
|||
// SwaggerSchema returns the schema declaration for the provided group version kind.
|
||||
SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDeclaration, error)
|
||||
// OpenAPISchema returns the schema openapi schema definiton
|
||||
OpenAPISchema(cacheDir string) (openapi.Resources, error)
|
||||
OpenAPISchema() (openapi.Resources, error)
|
||||
}
|
||||
|
||||
// BuilderFactory holds the second level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods.
|
||||
|
|
|
@ -90,6 +90,7 @@ func NewClientAccessFactoryFromDiscovery(flags *pflag.FlagSet, clientConfig clie
|
|||
|
||||
type discoveryFactory struct {
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheDir string
|
||||
}
|
||||
|
||||
func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
|
@ -97,6 +98,9 @@ func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.CacheDir = f.cacheDir
|
||||
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -105,6 +109,11 @@ func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface
|
|||
return NewCachedDiscoveryClient(discoveryClient, cacheDir, time.Duration(10*time.Minute)), nil
|
||||
}
|
||||
|
||||
func (f *discoveryFactory) BindFlags(flags *pflag.FlagSet) {
|
||||
defaultCacheDir := filepath.Join(homedir.HomeDir(), ".kube", "http-cache")
|
||||
flags.StringVar(&f.cacheDir, FlagHTTPCacheDir, defaultCacheDir, "Default HTTP cache directory")
|
||||
}
|
||||
|
||||
// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy:
|
||||
// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me.
|
||||
// 1. Merge the kubeconfig itself. This is done with the following hierarchy rules:
|
||||
|
@ -377,6 +386,8 @@ func (f *ring0Factory) BindFlags(flags *pflag.FlagSet) {
|
|||
// to do that automatically for every subcommand.
|
||||
flags.BoolVar(&f.clientCache.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version")
|
||||
|
||||
f.discoveryFactory.BindFlags(flags)
|
||||
|
||||
// Normalize all flags that are coming from other packages or pre-configurations
|
||||
// a.k.a. change all "_" to "-". e.g. glog package
|
||||
flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
|
||||
|
|
|
@ -407,7 +407,7 @@ func (f *ring1Factory) AttachablePodForObject(object runtime.Object, timeout tim
|
|||
func (f *ring1Factory) Validator(validate, openapi bool, cacheDir string) (validation.Schema, error) {
|
||||
if validate {
|
||||
if openapi {
|
||||
resources, err := f.OpenAPISchema(cacheDir)
|
||||
resources, err := f.OpenAPISchema()
|
||||
if err == nil {
|
||||
return validation.ConjunctiveSchema{
|
||||
openapivalidation.NewSchemaValidation(resources),
|
||||
|
@ -453,13 +453,7 @@ func (f *ring1Factory) SwaggerSchema(gvk schema.GroupVersionKind) (*swagger.ApiD
|
|||
}
|
||||
|
||||
// OpenAPISchema returns metadata and structural information about Kubernetes object definitions.
|
||||
// Will try to cache the data to a local file. Cache is written and read from a
|
||||
// file created with ioutil.TempFile and obeys the expiration semantics of that file.
|
||||
// The cache location is a function of the client and server versions so that the open API
|
||||
// schema will be cached separately for different client / server combinations.
|
||||
// Note, the cache will not be invalidated if the server changes its open API schema without
|
||||
// changing the server version.
|
||||
func (f *ring1Factory) OpenAPISchema(cacheDir string) (openapi.Resources, error) {
|
||||
func (f *ring1Factory) OpenAPISchema() (openapi.Resources, error) {
|
||||
discovery, err := f.clientAccessFactory.DiscoveryClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -467,23 +461,8 @@ func (f *ring1Factory) OpenAPISchema(cacheDir string) (openapi.Resources, error)
|
|||
|
||||
// Lazily initialize the OpenAPIGetter once
|
||||
f.openAPIGetter.once.Do(func() {
|
||||
// Get the server version for caching the openapi spec
|
||||
versionString := ""
|
||||
version, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
// Cache the result under the server version
|
||||
versionString = version.String()
|
||||
}
|
||||
|
||||
// Get the cache directory for caching the openapi spec
|
||||
cacheDir, err = substituteUserHome(cacheDir)
|
||||
if err != nil {
|
||||
// Don't cache the result if we couldn't substitute the home directory
|
||||
cacheDir = ""
|
||||
}
|
||||
|
||||
// Create the caching OpenAPIGetter
|
||||
f.openAPIGetter.getter = openapi.NewOpenAPIGetter(cacheDir, versionString, discovery)
|
||||
f.openAPIGetter.getter = openapi.NewOpenAPIGetter(discovery)
|
||||
})
|
||||
|
||||
// Delegate to the OpenAPIGetter
|
||||
|
|
|
@ -408,16 +408,6 @@ func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) {
|
|||
|
||||
func AddOpenAPIFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().Bool("openapi-validation", false, "If true, use openapi rather than swagger for validation")
|
||||
cmd.Flags().String("schema-cache-dir",
|
||||
fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName),
|
||||
fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'",
|
||||
clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName),
|
||||
)
|
||||
cmd.MarkFlagFilename("schema-cache-dir")
|
||||
}
|
||||
|
||||
func GetOpenAPICacheDir(cmd *cobra.Command) string {
|
||||
return GetFlagString(cmd, "schema-cache-dir")
|
||||
}
|
||||
|
||||
func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) {
|
||||
|
|
|
@ -13,14 +13,10 @@ go_library(
|
|||
"document.go",
|
||||
"extensions.go",
|
||||
"openapi.go",
|
||||
"openapi_cache.go",
|
||||
"openapi_getter.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/version:go_default_library",
|
||||
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library",
|
||||
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
|
@ -32,7 +28,6 @@ go_test(
|
|||
name = "go_default_xtest",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"openapi_cache_test.go",
|
||||
"openapi_getter_test.go",
|
||||
"openapi_suite_test.go",
|
||||
"openapi_test.go",
|
||||
|
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
)
|
||||
|
||||
const openapiFileName = "openapi_cache"
|
||||
|
||||
type CachingOpenAPIClient struct {
|
||||
version string
|
||||
client discovery.OpenAPISchemaInterface
|
||||
cacheDirName string
|
||||
}
|
||||
|
||||
// NewCachingOpenAPIClient returns a new discovery.OpenAPISchemaInterface
|
||||
// that will read the openapi spec from a local cache if it exists, and
|
||||
// if not will then fetch an openapi spec using a client.
|
||||
// client: used to fetch a new openapi spec if a local cache is not found
|
||||
// version: the server version and used as part of the cache file location
|
||||
// cacheDir: the directory under which the cache file will be written
|
||||
func NewCachingOpenAPIClient(client discovery.OpenAPISchemaInterface, version, cacheDir string) *CachingOpenAPIClient {
|
||||
return &CachingOpenAPIClient{
|
||||
client: client,
|
||||
version: version,
|
||||
cacheDirName: cacheDir,
|
||||
}
|
||||
}
|
||||
|
||||
// OpenAPIData returns an openapi spec.
|
||||
// It will first attempt to read the spec from a local cache
|
||||
// If it cannot read a local cache, it will read the file
|
||||
// using the client and then write the cache.
|
||||
func (c *CachingOpenAPIClient) OpenAPIData() (Resources, error) {
|
||||
// Try to use the cached version
|
||||
if c.useCache() {
|
||||
doc, err := c.readOpenAPICache()
|
||||
if err == nil {
|
||||
return NewOpenAPIData(doc)
|
||||
}
|
||||
}
|
||||
|
||||
// No cached version found, download from server
|
||||
s, err := c.client.OpenAPISchema()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed to download openapi data %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oa, err := NewOpenAPIData(s)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed to parse openapi data %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to cache the openapi spec
|
||||
if c.useCache() {
|
||||
err = c.writeToCache(s)
|
||||
if err != nil {
|
||||
// Just log an message, no need to fail the command since we got the data we need
|
||||
glog.V(2).Infof("Unable to cache openapi spec %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the parsed data
|
||||
return oa, nil
|
||||
}
|
||||
|
||||
// useCache returns true if the client should try to use the cache file
|
||||
func (c *CachingOpenAPIClient) useCache() bool {
|
||||
return len(c.version) > 0 && len(c.cacheDirName) > 0
|
||||
}
|
||||
|
||||
// readOpenAPICache tries to read the openapi spec from the local file cache
|
||||
func (c *CachingOpenAPIClient) readOpenAPICache() (*openapi_v2.Document, error) {
|
||||
// Get the filename to read
|
||||
filename := c.openAPICacheFilename()
|
||||
|
||||
// Read the cached file
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
doc := &openapi_v2.Document{}
|
||||
return doc, proto.Unmarshal(data, doc)
|
||||
}
|
||||
|
||||
// writeToCache tries to write the openapi spec to the local file cache.
|
||||
// writes the data to a new tempfile, and then links the cache file and the tempfile
|
||||
func (c *CachingOpenAPIClient) writeToCache(doc *openapi_v2.Document) error {
|
||||
// Get the constant filename used to read the cache.
|
||||
cacheFile := c.openAPICacheFilename()
|
||||
|
||||
// Binary encode the spec. This is 10x as fast as using json encoding. (60ms vs 600ms)
|
||||
b, err := proto.Marshal(doc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not binary encode openapi spec: %v", err)
|
||||
}
|
||||
|
||||
// Create a new temp file for the cached openapi spec.
|
||||
cacheDir := filepath.Dir(cacheFile)
|
||||
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||
return fmt.Errorf("Could not create directory: %v %v", cacheDir, err)
|
||||
}
|
||||
tmpFile, err := ioutil.TempFile(cacheDir, "openapi")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create temp cache file: %v %v", cacheFile, err)
|
||||
}
|
||||
|
||||
// Write the binary encoded openapi spec to the temp file
|
||||
if _, err := io.Copy(tmpFile, bytes.NewBuffer(b)); err != nil {
|
||||
return fmt.Errorf("Could not write temp cache file: %v", err)
|
||||
}
|
||||
|
||||
// Link the temp cache file to the constant cache filepath
|
||||
return linkFiles(tmpFile.Name(), cacheFile)
|
||||
}
|
||||
|
||||
// openAPICacheFilename returns the filename to read the cache from
|
||||
func (c *CachingOpenAPIClient) openAPICacheFilename() string {
|
||||
// Cache using the client and server versions
|
||||
return filepath.Join(c.cacheDirName, c.version, version.Get().GitVersion, openapiFileName)
|
||||
}
|
||||
|
||||
// linkFiles links the old file to the new file
|
||||
func linkFiles(old, new string) error {
|
||||
if err := os.Link(old, new); err != nil {
|
||||
// If we can't write due to file existing, or permission problems, keep going.
|
||||
if os.IsExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,268 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openapi_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/googleapis/gnostic/OpenAPIv2"
|
||||
"github.com/googleapis/gnostic/compiler"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi"
|
||||
)
|
||||
|
||||
var _ = Describe("When reading openAPIData", func() {
|
||||
var tmpDir string
|
||||
var err error
|
||||
var client *fakeOpenAPIClient
|
||||
var instance *openapi.CachingOpenAPIClient
|
||||
var expectedData openapi.Resources
|
||||
|
||||
BeforeEach(func() {
|
||||
tmpDir, err = ioutil.TempDir("", "openapi_cache_test")
|
||||
Expect(err).To(BeNil())
|
||||
client = &fakeOpenAPIClient{}
|
||||
instance = openapi.NewCachingOpenAPIClient(client, "v1.6", tmpDir)
|
||||
|
||||
d, err := data.OpenAPISchema()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
expectedData, err = openapi.NewOpenAPIData(d)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
|
||||
It("should write to the cache", func() {
|
||||
By("getting the live openapi spec from the server")
|
||||
result, err := instance.OpenAPIData()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(result).To(Equal(expectedData))
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
By("writing the live openapi spec to a local cache file")
|
||||
names, err := getFilenames(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(names).To(ConsistOf("v1.6"))
|
||||
|
||||
names, err = getFilenames(filepath.Join(tmpDir, "v1.6"))
|
||||
Expect(err).To(BeNil())
|
||||
Expect(names).To(HaveLen(1))
|
||||
clientVersion := names[0]
|
||||
|
||||
names, err = getFilenames(filepath.Join(tmpDir, "v1.6", clientVersion))
|
||||
Expect(err).To(BeNil())
|
||||
Expect(names).To(ContainElement("openapi_cache"))
|
||||
})
|
||||
|
||||
It("should read from the cache", func() {
|
||||
// First call should use the client
|
||||
result, err := instance.OpenAPIData()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(result).To(Equal(expectedData))
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
// Second call shouldn't use the client
|
||||
result, err = instance.OpenAPIData()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(result).To(Equal(expectedData))
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
names, err := getFilenames(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(names).To(ConsistOf("v1.6"))
|
||||
})
|
||||
|
||||
It("propagate errors that are encountered", func() {
|
||||
// Expect an error
|
||||
client.err = fmt.Errorf("expected error")
|
||||
result, err := instance.OpenAPIData()
|
||||
Expect(err.Error()).To(Equal(client.err.Error()))
|
||||
Expect(result).To(BeNil())
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
// No cache file is written
|
||||
files, err := ioutil.ReadDir(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(files).To(HaveLen(0))
|
||||
|
||||
// Client error is not cached
|
||||
result, err = instance.OpenAPIData()
|
||||
Expect(err.Error()).To(Equal(client.err.Error()))
|
||||
Expect(result).To(BeNil())
|
||||
Expect(client.calls).To(Equal(2))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Reading openAPIData", func() {
|
||||
var tmpDir string
|
||||
var serverVersion string
|
||||
var cacheDir string
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
tmpDir, err = ioutil.TempDir("", "openapi_cache_test")
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
|
||||
// Set the serverVersion to empty
|
||||
Context("when the server version is empty", func() {
|
||||
BeforeEach(func() {
|
||||
serverVersion = ""
|
||||
cacheDir = tmpDir
|
||||
})
|
||||
It("should not cache the result", func() {
|
||||
client := &fakeOpenAPIClient{}
|
||||
|
||||
instance := openapi.NewCachingOpenAPIClient(client, serverVersion, cacheDir)
|
||||
|
||||
d, err := data.OpenAPISchema()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
expectedData, err := openapi.NewOpenAPIData(d)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By("getting the live openapi schema")
|
||||
result, err := instance.OpenAPIData()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(result).To(Equal(expectedData))
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
files, err := ioutil.ReadDir(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(files).To(HaveLen(0))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when the cache directory is empty", func() {
|
||||
BeforeEach(func() {
|
||||
serverVersion = "v1.6"
|
||||
cacheDir = ""
|
||||
})
|
||||
It("should not cache the result", func() {
|
||||
client := &fakeOpenAPIClient{}
|
||||
|
||||
instance := openapi.NewCachingOpenAPIClient(client, serverVersion, cacheDir)
|
||||
|
||||
d, err := data.OpenAPISchema()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
expectedData, err := openapi.NewOpenAPIData(d)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By("getting the live openapi schema")
|
||||
result, err := instance.OpenAPIData()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(result).To(Equal(expectedData))
|
||||
Expect(client.calls).To(Equal(1))
|
||||
|
||||
files, err := ioutil.ReadDir(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(files).To(HaveLen(0))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test Utils
|
||||
func getFilenames(path string) ([]string, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := []string{}
|
||||
for _, n := range files {
|
||||
result = append(result, n.Name())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeOpenAPIClient struct {
|
||||
calls int
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
f.calls = f.calls + 1
|
||||
|
||||
if f.err != nil {
|
||||
return nil, f.err
|
||||
}
|
||||
|
||||
return data.OpenAPISchema()
|
||||
}
|
||||
|
||||
// Test utils
|
||||
var data apiData
|
||||
|
||||
type apiData struct {
|
||||
sync.Once
|
||||
data *openapi_v2.Document
|
||||
err error
|
||||
}
|
||||
|
||||
func (d *apiData) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
d.Do(func() {
|
||||
// Get the path to the swagger.json file
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
abs, err := filepath.Abs(wd)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
root := filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(abs)))))
|
||||
specpath := filepath.Join(root, "api", "openapi-spec", "swagger.json")
|
||||
_, err = os.Stat(specpath)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
spec, err := ioutil.ReadFile(specpath)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(spec, &info)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
d.data, d.err = openapi_v2.NewDocument(info, compiler.NewContext("$root", nil))
|
||||
})
|
||||
|
||||
return d.data, d.err
|
||||
}
|
|
@ -29,8 +29,6 @@ type synchronizedOpenAPIGetter struct {
|
|||
openAPISchema Resources
|
||||
err error
|
||||
|
||||
serverVersion string
|
||||
cacheDir string
|
||||
openAPIClient discovery.OpenAPISchemaInterface
|
||||
}
|
||||
|
||||
|
@ -42,12 +40,10 @@ type Getter interface {
|
|||
Get() (Resources, error)
|
||||
}
|
||||
|
||||
// NewOpenAPIGetter returns an object to return OpenAPIDatas which either read from a
|
||||
// local file cache or read from a server, and then stored in memory for subsequent invocations
|
||||
func NewOpenAPIGetter(cacheDir, serverVersion string, openAPIClient discovery.OpenAPISchemaInterface) Getter {
|
||||
// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads
|
||||
// from a server, and then stores in memory for subsequent invocations
|
||||
func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) Getter {
|
||||
return &synchronizedOpenAPIGetter{
|
||||
serverVersion: serverVersion,
|
||||
cacheDir: cacheDir,
|
||||
openAPIClient: openAPIClient,
|
||||
}
|
||||
}
|
||||
|
@ -55,15 +51,13 @@ func NewOpenAPIGetter(cacheDir, serverVersion string, openAPIClient discovery.Op
|
|||
// Resources implements Getter
|
||||
func (g *synchronizedOpenAPIGetter) Get() (Resources, error) {
|
||||
g.Do(func() {
|
||||
client := NewCachingOpenAPIClient(g.openAPIClient, g.serverVersion, g.cacheDir)
|
||||
result, err := client.OpenAPIData()
|
||||
s, err := g.openAPIClient.OpenAPISchema()
|
||||
if err != nil {
|
||||
g.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Save the result
|
||||
g.openAPISchema = result
|
||||
g.openAPISchema, g.err = NewOpenAPIData(s)
|
||||
})
|
||||
|
||||
// Return the save result
|
||||
|
|
|
@ -18,7 +18,15 @@ package openapi_test
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/googleapis/gnostic/OpenAPIv2"
|
||||
"github.com/googleapis/gnostic/compiler"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
|
@ -26,6 +34,68 @@ import (
|
|||
tst "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/testing"
|
||||
)
|
||||
|
||||
// Test utils
|
||||
var data apiData
|
||||
|
||||
type apiData struct {
|
||||
sync.Once
|
||||
data *openapi_v2.Document
|
||||
err error
|
||||
}
|
||||
|
||||
func (d *apiData) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
d.Do(func() {
|
||||
// Get the path to the swagger.json file
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
abs, err := filepath.Abs(wd)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
root := filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(abs)))))
|
||||
specpath := filepath.Join(root, "api", "openapi-spec", "swagger.json")
|
||||
_, err = os.Stat(specpath)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
spec, err := ioutil.ReadFile(specpath)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(spec, &info)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
d.data, d.err = openapi_v2.NewDocument(info, compiler.NewContext("$root", nil))
|
||||
})
|
||||
return d.data, d.err
|
||||
}
|
||||
|
||||
type fakeOpenAPIClient struct {
|
||||
calls int
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
f.calls = f.calls + 1
|
||||
|
||||
if f.err != nil {
|
||||
return nil, f.err
|
||||
}
|
||||
|
||||
return data.OpenAPISchema()
|
||||
}
|
||||
|
||||
var _ = Describe("Getting the Resources", func() {
|
||||
var client *tst.FakeClient
|
||||
var expectedData openapi.Resources
|
||||
|
@ -39,7 +109,7 @@ var _ = Describe("Getting the Resources", func() {
|
|||
expectedData, err = openapi.NewOpenAPIData(d)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
instance = openapi.NewOpenAPIGetter("", "", client)
|
||||
instance = openapi.NewOpenAPIGetter(client)
|
||||
})
|
||||
|
||||
Context("when the server returns a successful result", func() {
|
||||
|
|
|
@ -190,6 +190,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -206,6 +210,14 @@
|
|||
"ImportPath": "github.com/googleapis/gnostic/extensions",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
|
@ -270,6 +282,10 @@
|
|||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Rev": "a22138067af1c4942683050411a841ade67fe1eb"
|
||||
|
|
|
@ -430,6 +430,14 @@
|
|||
"ImportPath": "github.com/gophercloud/gophercloud/pagination",
|
||||
"Rev": "c0406a133c4a74a838baf0ddff3c2fab21155fba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
|
@ -498,6 +506,10 @@
|
|||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Rev": "a22138067af1c4942683050411a841ade67fe1eb"
|
||||
|
|
|
@ -154,6 +154,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -198,6 +202,14 @@
|
|||
"ImportPath": "github.com/gophercloud/gophercloud/pagination",
|
||||
"Rev": "c0406a133c4a74a838baf0ddff3c2fab21155fba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
|
@ -234,6 +246,10 @@
|
|||
"ImportPath": "github.com/mailru/easyjson/jwriter",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d"
|
||||
|
|
|
@ -71,6 +71,10 @@ type Config struct {
|
|||
// TODO: demonstrate an OAuth2 compatible client.
|
||||
BearerToken string
|
||||
|
||||
// CacheDir is the directory where we'll store HTTP cached responses.
|
||||
// If set to empty string, no caching mechanism will be used.
|
||||
CacheDir string
|
||||
|
||||
// Impersonate is the configuration that RESTClient will use for impersonation.
|
||||
Impersonate ImpersonationConfig
|
||||
|
||||
|
|
|
@ -249,6 +249,7 @@ func TestAnonymousConfig(t *testing.T) {
|
|||
expected.BearerToken = ""
|
||||
expected.Username = ""
|
||||
expected.Password = ""
|
||||
expected.CacheDir = ""
|
||||
expected.AuthProvider = nil
|
||||
expected.AuthConfigPersister = nil
|
||||
expected.TLSClientConfig.CertData = nil
|
||||
|
|
|
@ -89,6 +89,7 @@ func (c *Config) TransportConfig() (*transport.Config, error) {
|
|||
},
|
||||
Username: c.Username,
|
||||
Password: c.Password,
|
||||
CacheDir: c.CacheDir,
|
||||
BearerToken: c.BearerToken,
|
||||
Impersonate: transport.ImpersonationConfig{
|
||||
UserName: c.Impersonate.UserName,
|
||||
|
|
|
@ -26,6 +26,9 @@ go_library(
|
|||
],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gregjones/httpcache:go_default_library",
|
||||
"//vendor/github.com/gregjones/httpcache/diskcache:go_default_library",
|
||||
"//vendor/github.com/peterbourgon/diskv:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -34,6 +34,10 @@ type Config struct {
|
|||
// Bearer token for authentication
|
||||
BearerToken string
|
||||
|
||||
// CacheDir is the directory where we'll store HTTP cached responses.
|
||||
// If set to empty string, no caching mechanism will be used.
|
||||
CacheDir string
|
||||
|
||||
// Impersonate is the config that this Config will impersonate using
|
||||
Impersonate ImpersonationConfig
|
||||
|
||||
|
|
|
@ -19,10 +19,14 @@ package transport
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gregjones/httpcache"
|
||||
"github.com/gregjones/httpcache/diskcache"
|
||||
"github.com/peterbourgon/diskv"
|
||||
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
)
|
||||
|
@ -56,6 +60,9 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
|
|||
len(config.Impersonate.Extra) > 0 {
|
||||
rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
|
||||
}
|
||||
if len(config.CacheDir) > 0 {
|
||||
rt = NewCacheRoundTripper(config.CacheDir, rt)
|
||||
}
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
|
@ -79,6 +86,30 @@ type requestCanceler interface {
|
|||
CancelRequest(*http.Request)
|
||||
}
|
||||
|
||||
type cacheRoundTripper struct {
|
||||
rt *httpcache.Transport
|
||||
}
|
||||
|
||||
// NewCacheRoundTripper creates a roundtripper that reads the ETag on
|
||||
// response headers and send the If-None-Match header on subsequent
|
||||
// corresponding requests.
|
||||
func NewCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper {
|
||||
d := diskv.New(diskv.Options{
|
||||
BasePath: cacheDir,
|
||||
TempDir: filepath.Join(cacheDir, ".diskv-temp"),
|
||||
})
|
||||
t := httpcache.NewTransport(diskcache.NewWithDiskv(d))
|
||||
t.Transport = rt
|
||||
|
||||
return &cacheRoundTripper{rt: t}
|
||||
}
|
||||
|
||||
func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return rt.rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport }
|
||||
|
||||
type authProxyRoundTripper struct {
|
||||
username string
|
||||
groups []string
|
||||
|
|
|
@ -17,7 +17,11 @@ limitations under the License.
|
|||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -216,3 +220,60 @@ func TestAuthProxyRoundTripper(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheRoundTripper(t *testing.T) {
|
||||
rt := &testRoundTripper{}
|
||||
cacheDir, err := ioutil.TempDir("", "cache-rt")
|
||||
defer os.RemoveAll(cacheDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cache := NewCacheRoundTripper(cacheDir, rt)
|
||||
|
||||
// First call, caches the response
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: &url.URL{Host: "localhost"},
|
||||
}
|
||||
rt.Response = &http.Response{
|
||||
Header: http.Header{"ETag": []string{`"123456"`}},
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))),
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
resp, err := cache.RoundTrip(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
content, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(content) != "Content" {
|
||||
t.Errorf(`Expected Body to be "Content", got %q`, string(content))
|
||||
}
|
||||
|
||||
// Second call, returns cached response
|
||||
req = &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: &url.URL{Host: "localhost"},
|
||||
}
|
||||
rt.Response = &http.Response{
|
||||
StatusCode: http.StatusNotModified,
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("Other Content"))),
|
||||
}
|
||||
|
||||
resp, err = cache.RoundTrip(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read body and make sure we have the initial content
|
||||
content, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(content) != "Content" {
|
||||
t.Errorf("Invalid content read from cache %q", string(content))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -174,6 +174,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -190,6 +194,14 @@
|
|||
"ImportPath": "github.com/googleapis/gnostic/extensions",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
|
@ -214,6 +226,10 @@
|
|||
"ImportPath": "github.com/mailru/easyjson/jwriter",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
|
|
|
@ -170,6 +170,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -186,6 +190,14 @@
|
|||
"ImportPath": "github.com/googleapis/gnostic/extensions",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
|
@ -250,6 +262,10 @@
|
|||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Rev": "a22138067af1c4942683050411a841ade67fe1eb"
|
||||
|
|
|
@ -82,6 +82,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -98,6 +102,14 @@
|
|||
"ImportPath": "github.com/googleapis/gnostic/extensions",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
|
@ -114,6 +126,10 @@
|
|||
"ImportPath": "github.com/mailru/easyjson/jwriter",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
|
|
|
@ -162,6 +162,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/btree",
|
||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
@ -178,6 +182,14 @@
|
|||
"ImportPath": "github.com/googleapis/gnostic/extensions",
|
||||
"Rev": "0c5108395e2debce0d731cf0287ddf7242066aba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
|
@ -242,6 +254,10 @@
|
|||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterbourgon/diskv",
|
||||
"Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Rev": "a22138067af1c4942683050411a841ade67fe1eb"
|
||||
|
|
|
@ -228,6 +228,7 @@ filegroup(
|
|||
"//vendor/github.com/gorilla/context:all-srcs",
|
||||
"//vendor/github.com/gorilla/mux:all-srcs",
|
||||
"//vendor/github.com/gorilla/websocket:all-srcs",
|
||||
"//vendor/github.com/gregjones/httpcache:all-srcs",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:all-srcs",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:all-srcs",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:all-srcs",
|
||||
|
@ -274,6 +275,7 @@ filegroup(
|
|||
"//vendor/github.com/pborman/uuid:all-srcs",
|
||||
"//vendor/github.com/pelletier/go-buffruneio:all-srcs",
|
||||
"//vendor/github.com/pelletier/go-toml:all-srcs",
|
||||
"//vendor/github.com/peterbourgon/diskv:all-srcs",
|
||||
"//vendor/github.com/pkg/errors:all-srcs",
|
||||
"//vendor/github.com/pkg/sftp:all-srcs",
|
||||
"//vendor/github.com/pmezard/go-difflib/difflib:all-srcs",
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
|
@ -0,0 +1,24 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["httpcache.go"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/gregjones/httpcache/diskcache:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,7 @@
|
|||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,24 @@
|
|||
httpcache
|
||||
=========
|
||||
|
||||
[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
|
||||
|
||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
|
||||
|
||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
||||
|
||||
Cache Backends
|
||||
--------------
|
||||
|
||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
- [MIT License](LICENSE.txt)
|
|
@ -0,0 +1,22 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["diskcache.go"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/peterbourgon/diskv:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,61 @@
|
|||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
// to supplement an in-memory map with persistent storage
|
||||
//
|
||||
package diskcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/peterbourgon/diskv"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
type Cache struct {
|
||||
d *diskv.Diskv
|
||||
}
|
||||
|
||||
// Get returns the response corresponding to key if present
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
key = keyToFilename(key)
|
||||
resp, err := c.d.Read(key)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
return resp, true
|
||||
}
|
||||
|
||||
// Set saves a response to the cache as key
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
key = keyToFilename(key)
|
||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
||||
}
|
||||
|
||||
// Delete removes the response with key from the cache
|
||||
func (c *Cache) Delete(key string) {
|
||||
key = keyToFilename(key)
|
||||
c.d.Erase(key)
|
||||
}
|
||||
|
||||
func keyToFilename(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache that will store files in basePath
|
||||
func New(basePath string) *Cache {
|
||||
return &Cache{
|
||||
d: diskv.New(diskv.Options{
|
||||
BasePath: basePath,
|
||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
// storage.
|
||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
||||
return &Cache{d}
|
||||
}
|
|
@ -0,0 +1,553 @@
|
|||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
// mostly RFC-compliant cache for http responses.
|
||||
//
|
||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
// and not for a shared proxy).
|
||||
//
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
stale = iota
|
||||
fresh
|
||||
transparent
|
||||
// XFromCache is the header added to responses that are returned from the cache
|
||||
XFromCache = "X-From-Cache"
|
||||
)
|
||||
|
||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
type Cache interface {
|
||||
// Get returns the []byte representation of a cached response and a bool
|
||||
// set to true if the value isn't empty
|
||||
Get(key string) (responseBytes []byte, ok bool)
|
||||
// Set stores the []byte representation of a response against a key
|
||||
Set(key string, responseBytes []byte)
|
||||
// Delete removes the value associated with the key
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// cacheKey returns the cache key for req.
|
||||
func cacheKey(req *http.Request) string {
|
||||
return req.URL.String()
|
||||
}
|
||||
|
||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
// otherwise.
|
||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
||||
cachedVal, ok := c.Get(cacheKey(req))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(cachedVal)
|
||||
return http.ReadResponse(bufio.NewReader(b), req)
|
||||
}
|
||||
|
||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
type MemoryCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
// Get returns the []byte representation of the response and true if present, false if not
|
||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
||||
c.mu.RLock()
|
||||
resp, ok = c.items[key]
|
||||
c.mu.RUnlock()
|
||||
return resp, ok
|
||||
}
|
||||
|
||||
// Set saves response resp to the cache with key
|
||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
||||
c.mu.Lock()
|
||||
c.items[key] = resp
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Delete removes key from the cache
|
||||
func (c *MemoryCache) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
func NewMemoryCache() *MemoryCache {
|
||||
c := &MemoryCache{items: map[string][]byte{}}
|
||||
return c
|
||||
}
|
||||
|
||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
type Transport struct {
|
||||
// The RoundTripper interface actually used to make requests
|
||||
// If nil, http.DefaultTransport is used
|
||||
Transport http.RoundTripper
|
||||
Cache Cache
|
||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
MarkCachedResponses bool
|
||||
}
|
||||
|
||||
// NewTransport returns a new Transport with the
|
||||
// provided Cache implementation and MarkCachedResponses set to true
|
||||
func NewTransport(c Cache) *Transport {
|
||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
||||
}
|
||||
|
||||
// Client returns an *http.Client that caches responses.
|
||||
func (t *Transport) Client() *http.Client {
|
||||
return &http.Client{Transport: t}
|
||||
}
|
||||
|
||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
// match the new request
|
||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RoundTrip takes a Request and returns a Response
|
||||
//
|
||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
// the server.
|
||||
//
|
||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
// will be returned.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
cacheKey := cacheKey(req)
|
||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
||||
var cachedResp *http.Response
|
||||
if cacheable {
|
||||
cachedResp, err = CachedResponse(t.Cache, req)
|
||||
} else {
|
||||
// Need to invalidate an existing value
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
|
||||
transport := t.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
if cacheable && cachedResp != nil && err == nil {
|
||||
if t.MarkCachedResponses {
|
||||
cachedResp.Header.Set(XFromCache, "1")
|
||||
}
|
||||
|
||||
if varyMatches(cachedResp, req) {
|
||||
// Can only use cached value if the new request doesn't Vary significantly
|
||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
||||
if freshness == fresh {
|
||||
return cachedResp, nil
|
||||
}
|
||||
|
||||
if freshness == stale {
|
||||
var req2 *http.Request
|
||||
// Add validators if caller hasn't already done so
|
||||
etag := cachedResp.Header.Get("etag")
|
||||
if etag != "" && req.Header.Get("etag") == "" {
|
||||
req2 = cloneRequest(req)
|
||||
req2.Header.Set("if-none-match", etag)
|
||||
}
|
||||
lastModified := cachedResp.Header.Get("last-modified")
|
||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
||||
if req2 == nil {
|
||||
req2 = cloneRequest(req)
|
||||
}
|
||||
req2.Header.Set("if-modified-since", lastModified)
|
||||
}
|
||||
if req2 != nil {
|
||||
req = req2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
||||
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
||||
for _, header := range endToEndHeaders {
|
||||
cachedResp.Header[header] = resp.Header[header]
|
||||
}
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
|
||||
resp = cachedResp
|
||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
||||
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
// when available
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
return cachedResp, nil
|
||||
} else {
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqCacheControl := parseCacheControl(req.Header)
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
resp = newGatewayTimeoutResponse(req)
|
||||
} else {
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
||||
fakeHeader := "X-Varied-" + varyKey
|
||||
reqValue := req.Header.Get(varyKey)
|
||||
if reqValue != "" {
|
||||
resp.Header.Set(fakeHeader, reqValue)
|
||||
}
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
// Delay caching until EOF is reached.
|
||||
resp.Body = &cachingReadCloser{
|
||||
R: resp.Body,
|
||||
OnEOF: func(r io.Reader) {
|
||||
resp := *resp
|
||||
resp.Body = ioutil.NopCloser(r)
|
||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
},
|
||||
}
|
||||
default:
|
||||
respBytes, err := httputil.DumpResponse(resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
var ErrNoDateHeader = errors.New("no Date header")
|
||||
|
||||
// Date parses and returns the value of the Date header.
|
||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
||||
dateHeader := respHeaders.Get("date")
|
||||
if dateHeader == "" {
|
||||
err = ErrNoDateHeader
|
||||
return
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC1123, dateHeader)
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (c *realClock) since(d time.Time) time.Duration {
|
||||
return time.Since(d)
|
||||
}
|
||||
|
||||
type timer interface {
|
||||
since(d time.Time) time.Duration
|
||||
}
|
||||
|
||||
var clock timer = &realClock{}
|
||||
|
||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
// values of the request and the response
|
||||
//
|
||||
// fresh indicates the response can be returned
|
||||
// stale indicates that the response needs validating before it is returned
|
||||
// transparent indicates the response should not be used to fulfil the request
|
||||
//
|
||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
// signficant. Similarly, smax-age isn't used.
|
||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
||||
return transparent
|
||||
}
|
||||
if _, ok := respCacheControl["no-cache"]; ok {
|
||||
return stale
|
||||
}
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
return fresh
|
||||
}
|
||||
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return stale
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
|
||||
var lifetime time.Duration
|
||||
var zeroDuration time.Duration
|
||||
|
||||
// If a response includes both an Expires header and a max-age directive,
|
||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
} else {
|
||||
expiresHeader := respHeaders.Get("Expires")
|
||||
if expiresHeader != "" {
|
||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
} else {
|
||||
lifetime = expires.Sub(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
}
|
||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
// its expiration time by no more than the specified number of seconds.
|
||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
//
|
||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
// return-value available here.
|
||||
if maxstale == "" {
|
||||
return fresh
|
||||
}
|
||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime > currentAge {
|
||||
return fresh
|
||||
}
|
||||
|
||||
return stale
|
||||
}
|
||||
|
||||
// Returns true if either the request or the response includes the stale-if-error
|
||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
|
||||
var err error
|
||||
lifetime := time.Duration(-1)
|
||||
|
||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime >= 0 {
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
if lifetime > currentAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
||||
// These headers are always hop-by-hop
|
||||
hopByHopHeaders := map[string]struct{}{
|
||||
"Connection": struct{}{},
|
||||
"Keep-Alive": struct{}{},
|
||||
"Proxy-Authenticate": struct{}{},
|
||||
"Proxy-Authorization": struct{}{},
|
||||
"Te": struct{}{},
|
||||
"Trailers": struct{}{},
|
||||
"Transfer-Encoding": struct{}{},
|
||||
"Upgrade": struct{}{},
|
||||
}
|
||||
|
||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
||||
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
if strings.Trim(extra, " ") != "" {
|
||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
||||
}
|
||||
}
|
||||
endToEndHeaders := []string{}
|
||||
for respHeader, _ := range respHeaders {
|
||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
||||
}
|
||||
}
|
||||
return endToEndHeaders
|
||||
}
|
||||
|
||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
||||
if _, ok := respCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := reqCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type cacheControl map[string]string
|
||||
|
||||
func parseCacheControl(headers http.Header) cacheControl {
|
||||
cc := cacheControl{}
|
||||
ccHeader := headers.Get("Cache-Control")
|
||||
for _, part := range strings.Split(ccHeader, ",") {
|
||||
part = strings.Trim(part, " ")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(part, '=') {
|
||||
keyval := strings.Split(part, "=")
|
||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
||||
} else {
|
||||
cc[part] = ""
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
// with whitespace trimmed) for header name in headers. According to
|
||||
// Section 4.2 of the HTTP/1.1 spec
|
||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
// values from multiple occurrences of a header should be concatenated, if
|
||||
// the header's value is a comma-separated list.
|
||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
||||
var vals []string
|
||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
||||
fields := strings.Split(val, ",")
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
vals = append(vals, fields...)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
||||
// handler with a full copy of the content read from R when EOF is
|
||||
// reached.
|
||||
type cachingReadCloser struct {
|
||||
// Underlying ReadCloser.
|
||||
R io.ReadCloser
|
||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
||||
OnEOF func(io.Reader)
|
||||
|
||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
||||
// return value n is the number of bytes read. If R has no data to
|
||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
||||
// has been read so far.
|
||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = r.R.Read(p)
|
||||
r.buf.Write(p[:n])
|
||||
if err == io.EOF {
|
||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *cachingReadCloser) Close() error {
|
||||
return r.R.Close()
|
||||
}
|
||||
|
||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
func NewMemoryCacheTransport() *Transport {
|
||||
c := NewMemoryCache()
|
||||
t := NewTransport(c)
|
||||
return t
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"compression.go",
|
||||
"diskv.go",
|
||||
"index.go",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/google/btree:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2011-2012 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,141 @@
|
|||
# What is diskv?
|
||||
|
||||
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
|
||||
language. It starts with an incredibly simple API for storing arbitrary data on
|
||||
a filesystem by key, and builds several layers of performance-enhancing
|
||||
abstraction on top. The end result is a conceptually simple, but highly
|
||||
performant, disk-backed storage system.
|
||||
|
||||
[![Build Status][1]][2]
|
||||
|
||||
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
|
||||
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
|
||||
|
||||
|
||||
# Installing
|
||||
|
||||
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
|
||||
Then,
|
||||
|
||||
```bash
|
||||
$ go get github.com/peterbourgon/diskv
|
||||
```
|
||||
|
||||
[3]: http://golang.org
|
||||
[4]: http://golang.org/doc/install/source
|
||||
[5]: http://golang.org/doc/install
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/peterbourgon/diskv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Simplest transform function: put all the data files into the base dir.
|
||||
flatTransform := func(s string) []string { return []string{} }
|
||||
|
||||
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
|
||||
d := diskv.New(diskv.Options{
|
||||
BasePath: "my-data-dir",
|
||||
Transform: flatTransform,
|
||||
CacheSizeMax: 1024 * 1024,
|
||||
})
|
||||
|
||||
// Write three bytes to the key "alpha".
|
||||
key := "alpha"
|
||||
d.Write(key, []byte{'1', '2', '3'})
|
||||
|
||||
// Read the value back out of the store.
|
||||
value, _ := d.Read(key)
|
||||
fmt.Printf("%v\n", value)
|
||||
|
||||
// Erase the key+value from the store (and the disk).
|
||||
d.Erase(key)
|
||||
}
|
||||
```
|
||||
|
||||
More complex examples can be found in the "examples" subdirectory.
|
||||
|
||||
|
||||
# Theory
|
||||
|
||||
## Basic idea
|
||||
|
||||
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
|
||||
The data is written to a single file on disk, with the same name as the key.
|
||||
The key determines where that file will be stored, via a user-provided
|
||||
`TransformFunc`, which takes a key and returns a slice (`[]string`)
|
||||
corresponding to a path list where the key file will be stored. The simplest
|
||||
TransformFunc,
|
||||
|
||||
```go
|
||||
func SimpleTransform (key string) []string {
|
||||
return []string{}
|
||||
}
|
||||
```
|
||||
|
||||
will place all keys in the same, base directory. The design is inspired by
|
||||
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
|
||||
behavior is available in the content-addressable-storage example.
|
||||
|
||||
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
|
||||
|
||||
**Note** that your TransformFunc should ensure that one valid key doesn't
|
||||
transform to a subset of another valid key. That is, it shouldn't be possible
|
||||
to construct valid keys that resolve to directory names. As a concrete example,
|
||||
if your TransformFunc splits on every 3 characters, then
|
||||
|
||||
```go
|
||||
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
|
||||
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
|
||||
```
|
||||
|
||||
This will be addressed in an upcoming version of diskv.
|
||||
|
||||
Probably the most important design principle behind diskv is that your data is
|
||||
always flatly available on the disk. diskv will never do anything that would
|
||||
prevent you from accessing, copying, backing up, or otherwise interacting with
|
||||
your data via common UNIX commandline tools.
|
||||
|
||||
## Adding a cache
|
||||
|
||||
An in-memory caching layer is provided by combining the BasicStore
|
||||
functionality with a simple map structure, and keeping it up-to-date as
|
||||
appropriate. Since the map structure in Go is not threadsafe, it's combined
|
||||
with a RWMutex to provide safe concurrent access.
|
||||
|
||||
## Adding order
|
||||
|
||||
diskv is a key-value store and therefore inherently unordered. An ordering
|
||||
system can be injected into the store by passing something which satisfies the
|
||||
diskv.Index interface. (A default implementation, using Google's
|
||||
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
|
||||
user-provided Less function) index of the keys, which can be queried.
|
||||
|
||||
[7]: https://github.com/google/btree
|
||||
|
||||
## Adding compression
|
||||
|
||||
Something which implements the diskv.Compression interface may be passed
|
||||
during store creation, so that all Writes and Reads are filtered through
|
||||
a compression/decompression pipeline. Several default implementations,
|
||||
using stdlib compression algorithms, are provided. Note that data is cached
|
||||
compressed; the cost of decompression is borne with each Read.
|
||||
|
||||
## Streaming
|
||||
|
||||
diskv also now provides ReadStream and WriteStream methods, to allow very large
|
||||
data to be handled efficiently.
|
||||
|
||||
|
||||
# Future plans
|
||||
|
||||
* Needs plenty of robust testing: huge datasets, etc...
|
||||
* More thorough benchmarking
|
||||
* Your suggestions for use-cases I haven't thought of
|
|
@ -0,0 +1,64 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Compression is an interface that Diskv uses to implement compression of
|
||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
||||
// compresses all data written through it. Reader takes a source io.Reader and
|
||||
// returns a ReadCloser that decompresses all data read through it. You may
|
||||
// define these methods on your own type, or use one of the NewCompression
|
||||
// helpers.
|
||||
type Compression interface {
|
||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
||||
Reader(src io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// NewGzipCompression returns a Gzip-based Compression.
|
||||
func NewGzipCompression() Compression {
|
||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
||||
func NewGzipCompressionLevel(level int) Compression {
|
||||
return &genericCompression{
|
||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
||||
}
|
||||
}
|
||||
|
||||
// NewZlibCompression returns a Zlib-based Compression.
|
||||
func NewZlibCompression() Compression {
|
||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
||||
func NewZlibCompressionLevel(level int) Compression {
|
||||
return NewZlibCompressionLevelDict(level, nil)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
||||
// level, based on the given dictionary.
|
||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
||||
return &genericCompression{
|
||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
||||
}
|
||||
}
|
||||
|
||||
type genericCompression struct {
|
||||
wf func(w io.Writer) (io.WriteCloser, error)
|
||||
rf func(r io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
||||
return g.wf(dst)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
||||
return g.rf(src)
|
||||
}
|
|
@ -0,0 +1,624 @@
|
|||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
||||
// It stores all data flatly on the filesystem.
|
||||
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBasePath = "diskv"
|
||||
defaultFilePerm os.FileMode = 0666
|
||||
defaultPathPerm os.FileMode = 0777
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTransform = func(s string) []string { return []string{} }
|
||||
errCanceled = errors.New("canceled")
|
||||
errEmptyKey = errors.New("empty key")
|
||||
errBadKey = errors.New("bad key")
|
||||
errImportDirectory = errors.New("can't import a directory")
|
||||
)
|
||||
|
||||
// TransformFunction transforms a key into a slice of strings, with each
|
||||
// element in the slice representing a directory in the file path where the
|
||||
// key's entry will eventually be stored.
|
||||
//
|
||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
||||
type TransformFunction func(s string) []string
|
||||
|
||||
// Options define a set of properties that dictate Diskv behavior.
|
||||
// All values are optional.
|
||||
type Options struct {
|
||||
BasePath string
|
||||
Transform TransformFunction
|
||||
CacheSizeMax uint64 // bytes
|
||||
PathPerm os.FileMode
|
||||
FilePerm os.FileMode
|
||||
// If TempDir is set, it will enable filesystem atomic writes by
|
||||
// writing temporary files to that location before being moved
|
||||
// to BasePath.
|
||||
// Note that TempDir MUST be on the same device/partition as
|
||||
// BasePath.
|
||||
TempDir string
|
||||
|
||||
Index Index
|
||||
IndexLess LessFunction
|
||||
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
||||
// structures directly; instead, use the New constructor.
|
||||
type Diskv struct {
|
||||
Options
|
||||
mu sync.RWMutex
|
||||
cache map[string][]byte
|
||||
cacheSize uint64
|
||||
}
|
||||
|
||||
// New returns an initialized Diskv structure, ready to use.
|
||||
// If the path identified by baseDir already contains data,
|
||||
// it will be accessible, but not yet cached.
|
||||
func New(o Options) *Diskv {
|
||||
if o.BasePath == "" {
|
||||
o.BasePath = defaultBasePath
|
||||
}
|
||||
if o.Transform == nil {
|
||||
o.Transform = defaultTransform
|
||||
}
|
||||
if o.PathPerm == 0 {
|
||||
o.PathPerm = defaultPathPerm
|
||||
}
|
||||
if o.FilePerm == 0 {
|
||||
o.FilePerm = defaultFilePerm
|
||||
}
|
||||
|
||||
d := &Diskv{
|
||||
Options: o,
|
||||
cache: map[string][]byte{},
|
||||
cacheSize: 0,
|
||||
}
|
||||
|
||||
if d.Index != nil && d.IndexLess != nil {
|
||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
||||
// available for reads. Write relies on the filesystem to perform an eventual
|
||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
||||
func (d *Diskv) Write(key string, val []byte) error {
|
||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
||||
}
|
||||
|
||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
||||
// the file as soon as it's written.
|
||||
//
|
||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
||||
if len(key) <= 0 {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
return d.writeStreamWithLock(key, r, sync)
|
||||
}
|
||||
|
||||
// createKeyFileWithLock either creates the key file directly, or
|
||||
// creates a temporary file in TempDir if it is set.
|
||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
||||
if d.TempDir != "" {
|
||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
||||
}
|
||||
f, err := ioutil.TempFile(d.TempDir, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("temp file: %s", err)
|
||||
}
|
||||
|
||||
if err := f.Chmod(d.FilePerm); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return nil, fmt.Errorf("chmod: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// writeStream does no input validation checking.
|
||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
||||
if err := d.ensurePathWithLock(key); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
f, err := d.createKeyFileWithLock(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create key file: %s", err)
|
||||
}
|
||||
|
||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
||||
if d.Compression != nil {
|
||||
wc, err = d.Compression.Writer(f)
|
||||
if err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression writer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(wc, r); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("i/o copy: %s", err)
|
||||
}
|
||||
|
||||
if err := wc.Close(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression close: %s", err)
|
||||
}
|
||||
|
||||
if sync {
|
||||
if err := f.Sync(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("file sync: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return fmt.Errorf("file close: %s", err)
|
||||
}
|
||||
|
||||
if f.Name() != d.completeFilename(key) {
|
||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("rename: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.Index != nil {
|
||||
d.Index.Insert(key)
|
||||
}
|
||||
|
||||
d.bustCacheWithLock(key) // cache only on read
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import imports the source file into diskv under the destination key. If the
|
||||
// destination key already exists, it's overwritten. If move is true, the
|
||||
// source file is removed after a successful import.
|
||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
||||
if dstKey == "" {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(srcFilename); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
return errImportDirectory
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
if move {
|
||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
||||
d.bustCacheWithLock(dstKey)
|
||||
return nil
|
||||
} else if err != syscall.EXDEV {
|
||||
// If it failed due to being on a different device, fall back to copying
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Open(srcFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
err = d.writeStreamWithLock(dstKey, f, false)
|
||||
if err == nil && move {
|
||||
err = os.Remove(srcFilename)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads the key and returns the value.
|
||||
// If the key is available in the cache, Read won't touch the disk.
|
||||
// If the key is not in the cache, Read will have the side-effect of
|
||||
// lazily caching the value.
|
||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
||||
rc, err := d.ReadStream(key, false)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
||||
|
||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
||||
// If the value is cached from a previous read, and direct is false,
|
||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
||||
// the file on disk, and cache the data on read.
|
||||
//
|
||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
||||
// key, and return a direct handle to the file on disk.
|
||||
//
|
||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
||||
// to decompression, and caches the compressed data.
|
||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if val, ok := d.cache[key]; ok {
|
||||
if !direct {
|
||||
buf := bytes.NewBuffer(val)
|
||||
if d.Compression != nil {
|
||||
return d.Compression.Reader(buf)
|
||||
}
|
||||
return ioutil.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}()
|
||||
}
|
||||
|
||||
return d.readWithRLock(key)
|
||||
}
|
||||
|
||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
||||
// decompressed data for the given key, streamed from the disk. Clients should
|
||||
// acquire a read lock on the Diskv and check the cache themselves before
|
||||
// calling read.
|
||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
||||
filename := d.completeFilename(key)
|
||||
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if d.CacheSizeMax > 0 {
|
||||
r = newSiphon(f, d, key)
|
||||
} else {
|
||||
r = &closingReader{f}
|
||||
}
|
||||
|
||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
||||
if d.Compression != nil {
|
||||
rc, err = d.Compression.Reader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// closingReader provides a Reader that automatically closes the
|
||||
// embedded ReadCloser when it reaches EOF
|
||||
type closingReader struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (cr closingReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// siphon is like a TeeReader: it copies all data read through it to an
|
||||
// internal buffer, and moves that buffer to the cache at EOF.
|
||||
type siphon struct {
|
||||
f *os.File
|
||||
d *Diskv
|
||||
key string
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
||||
// When a successful series of reads ends in an EOF, the siphon will write
|
||||
// the buffered data to Diskv's cache under the given key.
|
||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
||||
return &siphon{
|
||||
f: f,
|
||||
d: d,
|
||||
key: key,
|
||||
buf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface for siphon.
|
||||
func (s *siphon) Read(p []byte) (int, error) {
|
||||
n, err := s.f.Read(p)
|
||||
|
||||
if err == nil {
|
||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
||||
if closeErr := s.f.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Erase synchronously erases the given key from the disk and the cache.
|
||||
func (d *Diskv) Erase(key string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.bustCacheWithLock(key)
|
||||
|
||||
// erase from index
|
||||
if d.Index != nil {
|
||||
d.Index.Delete(key)
|
||||
}
|
||||
|
||||
// erase from disk
|
||||
filename := d.completeFilename(key)
|
||||
if s, err := os.Stat(filename); err == nil {
|
||||
if s.IsDir() {
|
||||
return errBadKey
|
||||
}
|
||||
if err = os.Remove(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Return err as-is so caller can do os.IsNotExist(err).
|
||||
return err
|
||||
}
|
||||
|
||||
// clean up and return
|
||||
d.pruneDirsWithLock(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
||||
// diskv-related data. Care should be taken to always specify a diskv base
|
||||
// directory that is exclusively for diskv data.
|
||||
func (d *Diskv) EraseAll() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.cache = make(map[string][]byte)
|
||||
d.cacheSize = 0
|
||||
if d.TempDir != "" {
|
||||
os.RemoveAll(d.TempDir) // errors ignored
|
||||
}
|
||||
return os.RemoveAll(d.BasePath)
|
||||
}
|
||||
|
||||
// Has returns true if the given key exists.
|
||||
func (d *Diskv) Has(key string) bool {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if _, ok := d.cache[key]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
filename := d.completeFilename(key)
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if s.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Keys returns a channel that will yield every key accessible by the store,
|
||||
// in undefined order. If a cancel channel is provided, closing it will
|
||||
// terminate and close the keys channel.
|
||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
||||
return d.KeysPrefix("", cancel)
|
||||
}
|
||||
|
||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
||||
// store with the given prefix, in undefined order. If a cancel channel is
|
||||
// provided, closing it will terminate and close the keys channel. If the
|
||||
// provided prefix is the empty string, all keys will be yielded.
|
||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
||||
var prepath string
|
||||
if prefix == "" {
|
||||
prepath = d.BasePath
|
||||
} else {
|
||||
prepath = d.pathFor(prefix)
|
||||
}
|
||||
c := make(chan string)
|
||||
go func() {
|
||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
||||
// It sends every non-directory file entry down the channel c.
|
||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
||||
return nil // "pass"
|
||||
}
|
||||
|
||||
select {
|
||||
case c <- info.Name():
|
||||
case <-cancel:
|
||||
return errCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// pathFor returns the absolute path for location on the filesystem where the
|
||||
// data for the given key will be stored.
|
||||
func (d *Diskv) pathFor(key string) string {
|
||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
||||
}
|
||||
|
||||
// ensurePathWithLock is a helper function that generates all necessary
|
||||
// directories on the filesystem for the given key.
|
||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
||||
}
|
||||
|
||||
// completeFilename returns the absolute path to the file for the given key.
|
||||
func (d *Diskv) completeFilename(key string) string {
|
||||
return filepath.Join(d.pathFor(key), key)
|
||||
}
|
||||
|
||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
||||
valueSize := uint64(len(val))
|
||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
||||
return fmt.Errorf("%s; not caching", err)
|
||||
}
|
||||
|
||||
// be very strict about memory guarantees
|
||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
d.cache[key] = val
|
||||
d.cacheSize += valueSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.cacheWithLock(key, val)
|
||||
}
|
||||
|
||||
func (d *Diskv) bustCacheWithLock(key string) {
|
||||
if val, ok := d.cache[key]; ok {
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
||||
d.cacheSize -= sz
|
||||
delete(d.cache, key)
|
||||
}
|
||||
|
||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
||||
// key k. Typically this function is called after an Erase is made.
|
||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
||||
pathlist := d.Transform(key)
|
||||
for i := range pathlist {
|
||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
||||
|
||||
// thanks to Steven Blenkinsop for this snippet
|
||||
switch fi, err := os.Stat(dir); true {
|
||||
case err != nil:
|
||||
return err
|
||||
case !fi.IsDir():
|
||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
||||
}
|
||||
|
||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(nlinks) > 0 {
|
||||
return nil // has subdirs -- do not prune
|
||||
}
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
||||
// until the cache has at least valueSize bytes available.
|
||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
||||
if valueSize > d.CacheSizeMax {
|
||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
||||
}
|
||||
|
||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
||||
|
||||
for key, val := range d.cache {
|
||||
if safe() {
|
||||
break
|
||||
}
|
||||
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
|
||||
if !safe() {
|
||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
||||
// satisfy the io.WriteCloser interface.
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
||||
func (wc *nopWriteCloser) Close() error { return nil }
|
|
@ -0,0 +1,115 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
)
|
||||
|
||||
// Index is a generic interface for things that can
|
||||
// provide an ordered list of keys.
|
||||
type Index interface {
|
||||
Initialize(less LessFunction, keys <-chan string)
|
||||
Insert(key string)
|
||||
Delete(key string)
|
||||
Keys(from string, n int) []string
|
||||
}
|
||||
|
||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
||||
type LessFunction func(string, string) bool
|
||||
|
||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
||||
// making the strings it wraps sortable by the BTree package.
|
||||
type btreeString struct {
|
||||
s string
|
||||
l LessFunction
|
||||
}
|
||||
|
||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
||||
func (s btreeString) Less(i btree.Item) bool {
|
||||
return s.l(s.s, i.(btreeString).s)
|
||||
}
|
||||
|
||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
||||
type BTreeIndex struct {
|
||||
sync.RWMutex
|
||||
LessFunction
|
||||
*btree.BTree
|
||||
}
|
||||
|
||||
// Initialize populates the BTree tree with data from the keys channel,
|
||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
i.LessFunction = less
|
||||
i.BTree = rebuild(less, keys)
|
||||
}
|
||||
|
||||
// Insert inserts the given key (only) into the BTree tree.
|
||||
func (i *BTreeIndex) Insert(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Delete removes the given key (only) from the BTree tree.
|
||||
func (i *BTreeIndex) Delete(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
||||
// first key in the returned slice will be the key that immediately follows the
|
||||
// passed key, in key order.
|
||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
|
||||
if i.BTree.Len() <= 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
||||
skipFirst := true
|
||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
||||
// no such key, so fabricate an always-smallest item
|
||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
||||
skipFirst = false
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
iterator := func(i btree.Item) bool {
|
||||
keys = append(keys, i.(btreeString).s)
|
||||
return len(keys) < n
|
||||
}
|
||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
||||
|
||||
if skipFirst && len(keys) > 0 {
|
||||
keys = keys[1:]
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// rebuildIndex does the work of regenerating the index
|
||||
// with the given keys.
|
||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
||||
tree := btree.New(2)
|
||||
for key := range keys {
|
||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
||||
}
|
||||
return tree
|
||||
}
|
Loading…
Reference in New Issue