mirror of https://github.com/prometheus/prometheus
commit
2b78147417
|
@ -42,13 +42,12 @@ crossbuild:
|
|||
- linux/arm
|
||||
- linux/arm64
|
||||
- freebsd/arm
|
||||
- openbsd/arm
|
||||
# Temporarily deactivated as golang.org/x/sys does not have syscalls
|
||||
# implemented for that os/platform combination.
|
||||
#- openbsd/arm
|
||||
#- linux/mips64
|
||||
#- linux/mips64le
|
||||
- netbsd/arm
|
||||
- linux/ppc64
|
||||
- linux/ppc64le
|
||||
- linux/s390x
|
||||
|
||||
|
|
30
CHANGELOG.md
30
CHANGELOG.md
|
@ -1,3 +1,33 @@
|
|||
## 2.11.0-rc.0 / 2019-07-04
|
||||
|
||||
* [CHANGE] Remove `max_retries` from queue_config (it has been unused since rewriting remote-write to utilize the write-ahead-log). #5649
|
||||
* [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before. tsdb#637
|
||||
* [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`. tsdb#622
|
||||
* [FEATURE] Add option to use Alertmanager API v2. #5482
|
||||
* [FEATURE] Added `humanizePercentage` function for templates. #5670
|
||||
* [FEATURE] Include InitContainers in Kubernetes Service Discovery. #5598
|
||||
* [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609)
|
||||
* [ENHANCEMENT] Create new clean segment when starting the WAL. tsdb#608
|
||||
* [ENHANCEMENT] Reduce allocations in PromQL aggregations. #5641
|
||||
* [ENHANCEMENT] Add storage warnings to LabelValues and LabelNames API results. #5673
|
||||
* [ENHANCEMENT] Add prometheus_http_requests_total metric. #5640
|
||||
* [ENHANCEMENT] Enable openbsd/arm build. #5696
|
||||
* [ENHANCEMENT] Remote-write allocation improvements. #5614
|
||||
* [ENHANCEMENT] Query performance improvement: Efficient iteration and search in HashForLabels and HashWithoutLabels. #5707
|
||||
* [ENHANCEMENT] Allow injection of arbitrary headers in promtool. #4389
|
||||
* [ENHANCEMENT] Allow passing `external_labels` in alert unit tests groups. #5608
|
||||
* [ENHANCEMENT] Allows globs for rules when unit testing. #5595
|
||||
* [ENHANCEMENT] Improved postings intersection matching. tsdb#616
|
||||
* [ENHANCEMENT] Reduced disk usage for WAL for small setups. tsdb#605
|
||||
* [ENHANCEMENT] Optimize queries using regexp for set lookups. tsdb#602
|
||||
* [BUGFIX] resolve race condition in maxGauge. #5647
|
||||
* [BUGFIX] Fix ZooKeeper connection leak. #5675
|
||||
* [BUGFIX] Improved atomicity of .tmp block replacement during compaction for usual case. tsdb#636
|
||||
* [BUGFIX] Fix "unknown series references" after clean shutdown. tsdb#623
|
||||
* [BUGFIX] Re-calculate block size when calling `block.Delete`. tsdb#637
|
||||
* [BUGFIX] Fix unsafe snapshots with head block. tsdb#641
|
||||
* [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure. tsdb#613
|
||||
|
||||
## 2.10.0 / 2019-05-25
|
||||
|
||||
* [CHANGE/BUGFIX] API: Encode alert values as string to correctly represent Inf/NaN. #5582
|
||||
|
|
|
@ -40,7 +40,7 @@ go build ./cmd/prometheus/
|
|||
make test # Make sure all the tests pass before you commit and push :)
|
||||
```
|
||||
|
||||
We use `golangci-lint`[https://github.com/golangci/golangci-lint] for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
|
||||
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
|
||||
|
||||
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ else
|
|||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.4.0
|
||||
PROMU_VERSION ?= 0.5.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
|
@ -86,7 +86,8 @@ endif
|
|||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
|
@ -211,9 +212,10 @@ common-tarball: promu
|
|||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERFILE_PATH)
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
|
|
14
RELEASE.md
14
RELEASE.md
|
@ -80,22 +80,17 @@ $ git push --tags
|
|||
|
||||
Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing.
|
||||
|
||||
Once a tag is created, the release process through CircleCI will be triggered for this tag.
|
||||
You must create a Github Release using the UI for this tag, as otherwise CircleCI will not be able to upload tarballs for this tag. __Also, you must create the Github Release using a Github user that has granted access rights to CircleCI.__ If you did not or cannot grant those rights to your personal account, you can log in as `prombot` in an anonymous browser tab. (This will, however, prevent verified releases signed with your GPG key. For verified releases, the signing identity must be the same as the one creating the release.)
|
||||
Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account.
|
||||
|
||||
Go to the releases page of the project, click on the _Draft a new release_ button and select the tag you just pushed. The title of the release is formatted `x.y.z / YYYY-MM-DD`. Add the relevant part of `CHANGELOG.md` as description. Click _Save draft_ rather than _Publish release_ at this time. (This will prevent the release being visible before it has got the binaries attached to it.)
|
||||
|
||||
You can also create the tag and the Github release in one go through the Github UI by going to the releases page and then click on the `Draft a new release` button and enter your tag version.
|
||||
|
||||
Now all you can do is to wait for tarballs to be uploaded to the Github release and Docker images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification.
|
||||
Now all you can do is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification.
|
||||
|
||||
### Wrapping up
|
||||
|
||||
If the release has happened in the latest release branch, merge the changes into master.
|
||||
|
||||
To update the docs, a PR needs to be created to `prometheus/docs`. See [this PR](https://github.com/prometheus/docs/pull/952/files) for inspiration.
|
||||
To update the docs, a PR needs to be created to `prometheus/docs`. See [this PR](https://github.com/prometheus/docs/pull/952/files) for inspiration (note: only actually merge this for final releases, not for pre-releases like a release candidate).
|
||||
|
||||
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
||||
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
||||
|
||||
### Pre-releases
|
||||
|
||||
|
@ -104,4 +99,5 @@ The following changes to the above procedures apply:
|
|||
* In line with [Semantic Versioning](https://semver.org/), append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
|
||||
* Tick the _This is a pre-release_ box when drafting the release in the Github UI.
|
||||
* Still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.
|
||||
* Run the benchmark for 3 days using the `/benchmark x.y.z` command, `x.y.z` being the latest stable patch release of the previous minor release series.
|
||||
|
||||
|
|
|
@ -123,6 +123,10 @@ func main() {
|
|||
notifier: notifier.Options{
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
},
|
||||
web: web.Options{
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
},
|
||||
promlogConfig: promlog.Config{},
|
||||
}
|
||||
|
||||
|
@ -203,6 +207,9 @@ func main() {
|
|||
a.Flag("storage.tsdb.allow-overlapping-blocks", "[EXPERIMENTAL] Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
|
||||
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
|
||||
|
||||
a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
Default("false").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
||||
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
|
||||
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
|
||||
|
||||
|
@ -667,6 +674,7 @@ func main() {
|
|||
"RetentionDuration", cfg.tsdb.RetentionDuration,
|
||||
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
|
||||
"AllowOverlappingBlocks", cfg.tsdb.AllowOverlappingBlocks,
|
||||
"WALCompression", cfg.tsdb.WALCompression,
|
||||
)
|
||||
|
||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -30,6 +31,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -70,6 +72,7 @@ func main() {
|
|||
queryRangeCmd := queryCmd.Command("range", "Run range query.")
|
||||
queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().String()
|
||||
queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String()
|
||||
queryRangeHeaders := queryRangeCmd.Flag("header", "Extra headers to send to server.").StringMap()
|
||||
queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String()
|
||||
queryRangeEnd := queryRangeCmd.Flag("end", "Query range end time (RFC3339 or Unix timestamp).").String()
|
||||
queryRangeStep := queryRangeCmd.Flag("step", "Query step size (duration).").Duration()
|
||||
|
@ -123,7 +126,7 @@ func main() {
|
|||
os.Exit(QueryInstant(*queryServer, *queryExpr, p))
|
||||
|
||||
case queryRangeCmd.FullCommand():
|
||||
os.Exit(QueryRange(*queryRangeServer, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
||||
os.Exit(QueryRange(*queryRangeServer, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
||||
|
||||
case querySeriesCmd.FullCommand():
|
||||
os.Exit(QuerySeries(*querySeriesServer, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p))
|
||||
|
@ -143,7 +146,6 @@ func main() {
|
|||
case testRulesCmd.FullCommand():
|
||||
os.Exit(RulesUnitTest(*testRulesFiles...))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// CheckConfig validates configuration files.
|
||||
|
@ -348,7 +350,7 @@ func QueryInstant(url, query string, p printer) int {
|
|||
api := v1.NewAPI(c)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, err := api.Query(ctx, query, time.Now())
|
||||
val, _, err := api.Query(ctx, query, time.Now()) // Ignoring warnings for now.
|
||||
cancel()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "query error:", err)
|
||||
|
@ -361,11 +363,20 @@ func QueryInstant(url, query string, p printer) int {
|
|||
}
|
||||
|
||||
// QueryRange performs a range query against a Prometheus server.
|
||||
func QueryRange(url, query, start, end string, step time.Duration, p printer) int {
|
||||
func QueryRange(url string, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||
config := api.Config{
|
||||
Address: url,
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
for key, value := range headers {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
return http.DefaultTransport.RoundTrip(req)
|
||||
})
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
|
@ -408,7 +419,7 @@ func QueryRange(url, query, start, end string, step time.Duration, p printer) in
|
|||
api := v1.NewAPI(c)
|
||||
r := v1.Range{Start: stime, End: etime, Step: step}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, err := api.QueryRange(ctx, query, r)
|
||||
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
|
@ -462,7 +473,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
|||
// Run query against client.
|
||||
api := v1.NewAPI(c)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, err := api.Series(ctx, matchers, stime, etime)
|
||||
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestQueryRange(t *testing.T) {
|
|||
defer s.Close()
|
||||
|
||||
p := &promqlPrinter{}
|
||||
exitCode := QueryRange(s.URL, "up", "0", "300", 0, p)
|
||||
exitCode := QueryRange(s.URL, map[string]string{}, "up", "0", "300", 0, p)
|
||||
expectedPath := "/api/v1/query_range"
|
||||
gotPath := getRequest().URL.Path
|
||||
if gotPath != expectedPath {
|
||||
|
@ -45,7 +45,7 @@ func TestQueryRange(t *testing.T) {
|
|||
t.Error()
|
||||
}
|
||||
|
||||
exitCode = QueryRange(s.URL, "up", "0", "300", 10*time.Millisecond, p)
|
||||
exitCode = QueryRange(s.URL, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
||||
gotPath = getRequest().URL.Path
|
||||
if gotPath != expectedPath {
|
||||
t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath)
|
||||
|
|
|
@ -157,6 +157,7 @@ type testGroup struct {
|
|||
InputSeries []series `yaml:"input_series"`
|
||||
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
|
||||
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
|
||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
|
@ -177,8 +178,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
|||
Logger: log.NewNopLogger(),
|
||||
}
|
||||
m := rules.NewManager(opts)
|
||||
// TODO(beorn7): Provide a way to pass in external labels.
|
||||
groupsMap, ers := m.LoadGroups(tg.Interval, nil, ruleFiles...)
|
||||
groupsMap, ers := m.LoadGroups(tg.Interval, tg.ExternalLabels, ruleFiles...)
|
||||
if ers != nil {
|
||||
return ers
|
||||
}
|
||||
|
|
|
@ -92,8 +92,9 @@ var (
|
|||
|
||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
DefaultAlertmanagerConfig = AlertmanagerConfig{
|
||||
Scheme: "http",
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
Scheme: "http",
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
APIVersion: AlertmanagerAPIVersionV1,
|
||||
}
|
||||
|
||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||
|
@ -116,8 +117,7 @@ var (
|
|||
Capacity: 10,
|
||||
BatchSendDeadline: model.Duration(5 * time.Second),
|
||||
|
||||
// Max number of times to retry a batch on recoverable errors.
|
||||
MaxRetries: 3,
|
||||
// Backoff times for retrying a batch of samples on recoverable errors.
|
||||
MinBackoff: model.Duration(30 * time.Millisecond),
|
||||
MaxBackoff: model.Duration(100 * time.Millisecond),
|
||||
}
|
||||
|
@ -454,6 +454,40 @@ func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// AlertmanagerAPIVersion represents a version of the
|
||||
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
|
||||
type AlertmanagerAPIVersion string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*v = AlertmanagerAPIVersion("")
|
||||
type plain AlertmanagerAPIVersion
|
||||
if err := unmarshal((*plain)(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, supportedVersion := range SupportedAlertmanagerAPIVersions {
|
||||
if *v == supportedVersion {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v)
|
||||
}
|
||||
|
||||
const (
|
||||
// AlertmanagerAPIVersionV1 represents
|
||||
// github.com/prometheus/alertmanager/api/v1.
|
||||
AlertmanagerAPIVersionV1 AlertmanagerAPIVersion = "v1"
|
||||
// AlertmanagerAPIVersionV2 represents
|
||||
// github.com/prometheus/alertmanager/api/v2.
|
||||
AlertmanagerAPIVersionV2 AlertmanagerAPIVersion = "v2"
|
||||
)
|
||||
|
||||
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
|
||||
AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2,
|
||||
}
|
||||
|
||||
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
||||
type AlertmanagerConfig struct {
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
|
@ -469,6 +503,9 @@ type AlertmanagerConfig struct {
|
|||
// The timeout used when sending alerts.
|
||||
Timeout model.Duration `yaml:"timeout,omitempty"`
|
||||
|
||||
// The api version of Alertmanager.
|
||||
APIVersion AlertmanagerAPIVersion `yaml:"api_version"`
|
||||
|
||||
// List of Alertmanager relabel configurations.
|
||||
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
|
||||
}
|
||||
|
@ -594,9 +631,6 @@ type QueueConfig struct {
|
|||
// Maximum time sample will wait in buffer.
|
||||
BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"`
|
||||
|
||||
// Max number of times to retry a batch on recoverable errors.
|
||||
MaxRetries int `yaml:"max_retries,omitempty"`
|
||||
|
||||
// On recoverable errors, backoff exponentially.
|
||||
MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
|
||||
MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
|
||||
|
|
|
@ -631,8 +631,9 @@ var expectedConf = &Config{
|
|||
AlertingConfig: AlertingConfig{
|
||||
AlertmanagerConfigs: []*AlertmanagerConfig{
|
||||
{
|
||||
Scheme: "https",
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
Scheme: "https",
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
APIVersion: AlertmanagerAPIVersionV1,
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
|
|
|
@ -36,6 +36,7 @@ var PATH_PREFIX = "{{ pathPrefix }}";
|
|||
{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }}
|
||||
{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }}
|
||||
{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }}
|
||||
{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }}
|
||||
{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }}
|
||||
{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }}
|
||||
|
|
|
@ -50,10 +50,10 @@
|
|||
<tr>
|
||||
<th colspan="2">HTTP Server</th>
|
||||
</tr>
|
||||
{{ range printf "http_request_duration_microseconds_count{job='prometheus',instance='%s',handler=~'^(query.*|federate|consoles)$'}" .Params.instance | query | sortByLabel "handler" }}
|
||||
{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.handler }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(http_request_duration_microseconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
|
@ -82,7 +82,7 @@
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#serverGraph"),
|
||||
expr: "irate(http_request_duration_microseconds_count{job='prometheus',instance='{{ .Params.instance }}',handler=~'^(query.*|federate|consoles)$'}[5m])",
|
||||
expr: "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
||||
name: '[[handler]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
|
|
@ -127,8 +127,11 @@ job_name: <job_name>
|
|||
# If honor_labels is set to "false", label conflicts are resolved by renaming
|
||||
# conflicting labels in the scraped data to "exported_<original-label>" (for
|
||||
# example "exported_instance", "exported_job") and then attaching server-side
|
||||
# labels. This is useful for use cases such as federation, where all labels
|
||||
# specified in the target should be preserved.
|
||||
# labels.
|
||||
#
|
||||
# Setting honor_labels to "true" is useful for use cases such as federation and
|
||||
# scraping the Pushgateway, where all labels specified in the target should be
|
||||
# preserved.
|
||||
#
|
||||
# Note that any globally configured "external_labels" are unaffected by this
|
||||
# setting. In communication with external systems, they are always applied only
|
||||
|
@ -439,8 +442,8 @@ See below for the configuration options for EC2 discovery:
|
|||
```yaml
|
||||
# The information to access the EC2 API.
|
||||
|
||||
# The AWS Region.
|
||||
region: <string>
|
||||
# The AWS region. If blank, the region from the instance metadata is used.
|
||||
[ region: <string> ]
|
||||
|
||||
# Custom endpoint to be used.
|
||||
[ endpoint: <string> ]
|
||||
|
@ -1125,8 +1128,8 @@ anchored on both ends. To un-anchor the regex, use `.*<regex>.*`.
|
|||
* `labelkeep`: Match `regex` against all label names. Any label that does not match will be
|
||||
removed from the set of labels.
|
||||
|
||||
Care must be taken with `labeldrop` and `labelkeep` to ensure that metrics are still uniquely labeled
|
||||
once the labels are removed.
|
||||
Care must be taken with `labeldrop` and `labelkeep` to ensure that metrics are
|
||||
still uniquely labeled once the labels are removed.
|
||||
|
||||
### `<metric_relabel_configs>`
|
||||
|
||||
|
@ -1147,8 +1150,9 @@ external labels send identical alerts.
|
|||
|
||||
### `<alertmanager_config>`
|
||||
|
||||
An `alertmanager_config` section specifies Alertmanager instances the Prometheus server sends
|
||||
alerts to. It also provides parameters to configure how to communicate with these Alertmanagers.
|
||||
An `alertmanager_config` section specifies Alertmanager instances the Prometheus
|
||||
server sends alerts to. It also provides parameters to configure how to
|
||||
communicate with these Alertmanagers.
|
||||
|
||||
Alertmanagers may be statically configured via the `static_configs` parameter or
|
||||
dynamically discovered using one of the supported service-discovery mechanisms.
|
||||
|
@ -1161,6 +1165,9 @@ through the `__alerts_path__` label.
|
|||
# Per-target Alertmanager timeout when pushing alerts.
|
||||
[ timeout: <duration> | default = 10s ]
|
||||
|
||||
# The api version of Alertmanager.
|
||||
[ api_version: <version> | default = v1 ]
|
||||
|
||||
# Prefix for the HTTP path alerts are pushed to.
|
||||
[ path_prefix: <path> | default = / ]
|
||||
|
||||
|
@ -1288,8 +1295,8 @@ tls_config:
|
|||
|
||||
# Configures the queue used to write to remote storage.
|
||||
queue_config:
|
||||
# Number of samples to buffer per shard before we start dropping them.
|
||||
[ capacity: <int> | default = 10000 ]
|
||||
# Number of samples to buffer per shard before we block reading of more samples from the WAL.
|
||||
[ capacity: <int> | default = 10 ]
|
||||
# Maximum number of shards, i.e. amount of concurrency.
|
||||
[ max_shards: <int> | default = 1000 ]
|
||||
# Minimum number of shards, i.e. amount of concurrency.
|
||||
|
@ -1298,8 +1305,6 @@ queue_config:
|
|||
[ max_samples_per_send: <int> | default = 100]
|
||||
# Maximum time a sample will wait in buffer.
|
||||
[ batch_send_deadline: <duration> | default = 5s ]
|
||||
# Maximum number of times to retry a batch on recoverable errors.
|
||||
[ max_retries: <int> | default = 3 ]
|
||||
# Initial retry delay. Gets doubled for every retry.
|
||||
[ min_backoff: <duration> | default = 30ms ]
|
||||
# Maximum retry delay.
|
||||
|
|
|
@ -56,6 +56,7 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar
|
|||
| humanize | number | string | Converts a number to a more readable format, using [metric prefixes](https://en.wikipedia.org/wiki/Metric_prefix).
|
||||
| humanize1024 | number | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
|
||||
| humanizeDuration | number | string | Converts a duration in seconds to a more readable format. |
|
||||
| humanizePercentage | number | string | Converts a ratio value to a fraction of 100. |
|
||||
| humanizeTimestamp | number | string | Converts a Unix timestamp in seconds to a more readable format. |
|
||||
|
||||
Humanizing functions are intended to produce reasonable output for consumption
|
||||
|
|
|
@ -53,6 +53,10 @@ alert_rule_test:
|
|||
# Unit tests PromQL expressions.
|
||||
promql_expr_test:
|
||||
[ - <promql_test_case> ]
|
||||
|
||||
# External labels accessible to the alert template.
|
||||
external_labels:
|
||||
[ <labelname>: <string> ... ]
|
||||
```
|
||||
|
||||
### `<series>`
|
||||
|
|
|
@ -20,7 +20,7 @@ the respective repository.
|
|||
|
||||
All Prometheus services are available as Docker images on
|
||||
[Quay.io](https://quay.io/repository/prometheus/prometheus) or
|
||||
[Docker Hub](https://hub.docker.com/u/prom/).
|
||||
[Docker Hub](https://hub.docker.com/r/prom/prometheus/).
|
||||
|
||||
Running Prometheus on Docker is as simple as `docker run -p 9090:9090
|
||||
prom/prometheus`. This starts Prometheus with a sample
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
title: Management API
|
||||
sort_rank: 7
|
||||
---
|
||||
|
||||
# Management API
|
||||
|
||||
Prometheus provides a set of management API to ease automation and integrations.
|
||||
|
||||
|
||||
### Health check
|
||||
|
||||
```
|
||||
GET /-/healthy
|
||||
```
|
||||
|
||||
This endpoint always returns 200 and should be used to check Prometheus health.
|
||||
|
||||
|
||||
### Readiness check
|
||||
|
||||
```
|
||||
GET /-/ready
|
||||
```
|
||||
|
||||
This endpoint returns 200 when Prometheus is ready to serve traffic (i.e. respond to queries).
|
||||
|
||||
|
||||
### Reload
|
||||
|
||||
```
|
||||
PUT /-/reload
|
||||
POST /-/reload
|
||||
```
|
||||
|
||||
This endpoint triggers a reload of the Prometheus configuration and rule files. It's disabled by default and can be enabled via the `--web.enable-lifecycle` flag.
|
||||
|
||||
An alternative way trigger a configuration reload is by sending a `SIGHUP` to the Prometheus process.
|
||||
|
||||
|
||||
### Quit
|
||||
|
||||
```
|
||||
PUT /-/quit
|
||||
POST /-/quit
|
||||
```
|
||||
|
||||
This endpoint triggers a graceful shutdown of Prometheus. It's disabled by default and can be enabled via the `--web.enable-lifecycle` flag.
|
||||
|
||||
An alternative way trigger a graceful shutdown is by sending a `SIGTERM` to the Prometheus process.
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Migration
|
||||
sort_rank: 7
|
||||
sort_rank: 8
|
||||
---
|
||||
|
||||
# Prometheus 2.0 migration guide
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: API Stability
|
||||
sort_rank: 8
|
||||
sort_rank: 9
|
||||
---
|
||||
|
||||
# API Stability Guarantees
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
*.yaml
|
||||
dashboards_out
|
||||
vendor
|
||||
jsonnetfile.lock.json
|
|
@ -0,0 +1,25 @@
|
|||
JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s
|
||||
|
||||
all: fmt prometheus_alerts.yaml dashboards_out lint
|
||||
|
||||
fmt:
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
xargs -n 1 -- $(JSONNET_FMT) -i
|
||||
|
||||
prometheus_alerts.yaml: mixin.libsonnet config.libsonnet alerts.libsonnet
|
||||
jsonnet -S alerts.jsonnet > $@
|
||||
|
||||
dashboards_out: mixin.libsonnet config.libsonnet dashboards.libsonnet
|
||||
@mkdir -p dashboards_out
|
||||
jsonnet -J vendor -m dashboards_out dashboards.jsonnet
|
||||
|
||||
lint: prometheus_alerts.yaml
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
while read f; do \
|
||||
$(JSONNET_FMT) "$$f" | diff -u "$$f" -; \
|
||||
done
|
||||
|
||||
promtool check rules prometheus_alerts.yaml
|
||||
|
||||
clean:
|
||||
rm -rf dashboards_out prometheus_alerts.yaml
|
|
@ -0,0 +1,36 @@
|
|||
# Prometheus Mixin
|
||||
|
||||
_This is work in progress. We aim for it to become a good role model for alerts
|
||||
and dashboards eventually, but it is not quite there yet._
|
||||
|
||||
The Prometheus Mixin is a set of configurable, reusable, and extensible alerts
|
||||
and dashboards for Prometheus.
|
||||
|
||||
To use them, you need to have `jsonnet` (v0.13+) and `jb` installed. If you
|
||||
have a working Go development environment, it's easiest to run the following:
|
||||
```bash
|
||||
$ go get github.com/google/go-jsonnet/cmd/jsonnet
|
||||
$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
```
|
||||
|
||||
_Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is
|
||||
currently not included in the Go implementation of `jsonnet`. For the time
|
||||
being, you have to install the [C++ version of
|
||||
jsonnetfmt](https://github.com/google/jsonnet) if you want to use `make lint`
|
||||
or `make fmt`._
|
||||
|
||||
Next, install the dependencies by running the following command in this
|
||||
directory:
|
||||
```bash
|
||||
$ jb install
|
||||
```
|
||||
|
||||
You can then build a `prometheus_alerts.yaml` with the alerts and a directory
|
||||
`dashboards_out` with the Grafana dashboard JSON files:
|
||||
```bash
|
||||
$ make prometheus_alerts.yaml
|
||||
$ make dashboards_out
|
||||
```
|
||||
|
||||
For more advanced uses of mixins, see https://github.com/monitoring-mixins/docs.
|
||||
|
|
@ -0,0 +1 @@
|
|||
std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts)
|
|
@ -0,0 +1,260 @@
|
|||
{
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'prometheus',
|
||||
rules: [
|
||||
{
|
||||
alert: 'PrometheusBadConfig',
|
||||
expr: |||
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_config_last_reload_successful{%(prometheusSelector)s}[5m]) == 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Failed Prometheus configuration reload.',
|
||||
description: 'Prometheus %(prometheusName)s has failed to reload its configuration.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotificationQueueRunningFull',
|
||||
expr: |||
|
||||
# Without min_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
predict_linear(prometheus_notifications_queue_length{%(prometheusSelector)s}[5m], 60 * 30)
|
||||
>
|
||||
min_over_time(prometheus_notifications_queue_capacity{%(prometheusSelector)s}[5m])
|
||||
)
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus alert notification queue predicted to run full in less than 30m.',
|
||||
description: 'Alert notification queue of Prometheus %(prometheusName)s is running full.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusErrorSendingAlertsToSomeAlertmanagers',
|
||||
expr: |||
|
||||
(
|
||||
rate(prometheus_notifications_errors_total{%(prometheusSelector)s}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{%(prometheusSelector)s}[5m])
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.',
|
||||
description: '{{ printf "%%.1f" $value }}%% errors while sending alerts from Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}}.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusErrorSendingAlertsToAnyAlertmanager',
|
||||
expr: |||
|
||||
min without(alertmanager) (
|
||||
rate(prometheus_notifications_errors_total{%(prometheusSelector)s}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{%(prometheusSelector)s}[5m])
|
||||
)
|
||||
* 100
|
||||
> 3
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus encounters more than 3% errors sending alerts to any Alertmanager.',
|
||||
description: '{{ printf "%%.1f" $value }}%% minimum errors while sending alerts from Prometheus %(prometheusName)s to any Alertmanager.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotConnectedToAlertmanagers',
|
||||
expr: |||
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_notifications_alertmanagers_discovered{%(prometheusSelector)s}[5m]) < 1
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is not connected to any Alertmanagers.',
|
||||
description: 'Prometheus %(prometheusName)s is not connected to any Alertmanagers.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBReloadsFailing',
|
||||
expr: |||
|
||||
increase(prometheus_tsdb_reloads_failures_total{%(prometheusSelector)s}[3h]) > 0
|
||||
||| % $._config,
|
||||
'for': '4h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus has issues reloading blocks from disk.',
|
||||
description: 'Prometheus %(prometheusName)s has detected {{$value | humanize}} reload failures over the last 3h.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBCompactionsFailing',
|
||||
expr: |||
|
||||
increase(prometheus_tsdb_compactions_failed_total{%(prometheusSelector)s}[3h]) > 0
|
||||
||| % $._config,
|
||||
'for': '4h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus has issues compacting blocks.',
|
||||
description: 'Prometheus %(prometheusName)s has detected {{$value | humanize}} compaction failures over the last 3h.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBWALCorruptions',
|
||||
expr: |||
|
||||
increase(tsdb_wal_corruptions_total{%(prometheusSelector)s}[3h]) > 0
|
||||
||| % $._config,
|
||||
'for': '4h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is detecting WAL corruptions.',
|
||||
description: 'Prometheus %(prometheusName)s has detected {{$value | humanize}} corruptions of the write-ahead log (WAL) over the last 3h.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotIngestingSamples',
|
||||
expr: |||
|
||||
rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m]) <= 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is not ingesting samples.',
|
||||
description: 'Prometheus %(prometheusName)s is not ingesting samples.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusDuplicateTimestamps',
|
||||
expr: |||
|
||||
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is dropping samples with duplicate timestamps.',
|
||||
description: 'Prometheus %(prometheusName)s is dropping {{$value | humanize}} samples/s with different values but duplicated timestamp.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusOutOfOrderTimestamps',
|
||||
expr: |||
|
||||
rate(prometheus_target_scrapes_sample_out_of_order_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus drops samples with out-of-order timestamps.',
|
||||
description: 'Prometheus %(prometheusName)s is dropping {{$value | humanize}} samples/s with timestamps arriving out of order.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusRemoteStorageFailures',
|
||||
expr: |||
|
||||
(
|
||||
rate(prometheus_remote_storage_failed_samples_total{%(prometheusSelector)s}[5m])
|
||||
/
|
||||
(
|
||||
rate(prometheus_remote_storage_failed_samples_total{%(prometheusSelector)s}[5m])
|
||||
+
|
||||
rate(prometheus_remote_storage_succeeded_samples_total{%(prometheusSelector)s}[5m])
|
||||
)
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus fails to send samples to remote storage.',
|
||||
description: 'Prometheus %(prometheusName)s failed to send {{ printf "%%.1f" $value }}%% of the samples to queue {{$labels.queue}}.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusRemoteWriteBehind',
|
||||
expr: |||
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{%(prometheusSelector)s}[5m])
|
||||
- on(job, instance) group_right
|
||||
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(prometheusSelector)s}[5m])
|
||||
)
|
||||
> 120
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus remote write is behind.',
|
||||
description: 'Prometheus %(prometheusName)s remote write is {{ printf "%%.1f" $value }}s behind for queue {{$labels.queue}}.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusRuleFailures',
|
||||
expr: |||
|
||||
increase(prometheus_rule_evaluation_failures_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is failing rule evaluations.',
|
||||
description: 'Prometheus %(prometheusName)s has failed to evaluate {{ printf "%%.0f" $value }} rules in the last 5m.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusMissingRuleEvaluations',
|
||||
expr: |||
|
||||
increase(prometheus_rule_group_iterations_missed_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is missing rule evaluations due to slow rule group evaluation.',
|
||||
description: 'Prometheus %(prometheusName)s has missed {{ printf "%%.0f" $value }} rule group evaluations in the last 5m.' % $._config,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
_config+:: {
|
||||
// prometheusSelector is inserted as part of the label selector in
|
||||
// PromQL queries to identify metrics collected from Prometheus
|
||||
// servers.
|
||||
prometheusSelector: 'job="prometheus"',
|
||||
|
||||
// prometheusName is inserted into annotations to name the Prometheus
|
||||
// instance affected by the alert.
|
||||
prometheusName: '{{$labels.instance}}',
|
||||
// If you run Prometheus on Kubernetes with the Prometheus
|
||||
// Operator, you can make use of the configured target labels for
|
||||
// nicer naming:
|
||||
// prometheusNameTemplate: '{{$labels.namespace}}/{{$labels.pod}}'
|
||||
},
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
local dashboards = (import 'mixin.libsonnet').dashboards;
|
||||
|
||||
{
|
||||
[name]: dashboards[name]
|
||||
for name in std.objectFields(dashboards)
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
local g = import 'grafana-builder/grafana.libsonnet';
|
||||
|
||||
{
|
||||
dashboards+: {
|
||||
'prometheus.json':
|
||||
g.dashboard('Prometheus')
|
||||
.addMultiTemplate('job', 'prometheus_build_info', 'job')
|
||||
.addMultiTemplate('instance', 'prometheus_build_info', 'instance')
|
||||
.addRow(
|
||||
g.row('Prometheus Stats')
|
||||
.addPanel(
|
||||
g.panel('Prometheus Stats') +
|
||||
g.tablePanel([
|
||||
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
|
||||
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
|
||||
], {
|
||||
job: { alias: 'Job' },
|
||||
instance: { alias: 'Instance' },
|
||||
version: { alias: 'Version' },
|
||||
'Value #A': { alias: 'Count', type: 'hidden' },
|
||||
'Value #B': { alias: 'Uptime' },
|
||||
})
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Discovery')
|
||||
.addPanel(
|
||||
g.panel('Target Sync') +
|
||||
g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', '{{scrape_job}}') +
|
||||
{ yaxes: g.yaxes('ms') }
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Targets') +
|
||||
g.queryPanel('sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', 'Targets') +
|
||||
g.stack
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Retrieval')
|
||||
.addPanel(
|
||||
g.panel('Average Scrape Interval Duration') +
|
||||
g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{interval}} configured') +
|
||||
{ yaxes: g.yaxes('ms') }
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Scrape failures') +
|
||||
g.queryPanel([
|
||||
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
|
||||
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
|
||||
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
|
||||
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
|
||||
], [
|
||||
'exceeded sample limit: {{job}}',
|
||||
'duplicate timestamp: {{job}}',
|
||||
'out of bounds: {{job}}',
|
||||
'out of order: {{job}}',
|
||||
]) +
|
||||
g.stack
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Appended Samples') +
|
||||
g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', '{{job}} {{instance}}') +
|
||||
g.stack
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Storage')
|
||||
.addPanel(
|
||||
g.panel('Head Series') +
|
||||
g.queryPanel('prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', '{{job}} {{instance}} head series') +
|
||||
g.stack
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Head Chunks') +
|
||||
g.queryPanel('prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', '{{job}} {{instance}} head chunks') +
|
||||
g.stack
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Query')
|
||||
.addPanel(
|
||||
g.panel('Query Rate') +
|
||||
g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{job}} {{instance}}') +
|
||||
g.stack,
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Stage Duration') +
|
||||
g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') +
|
||||
{ yaxes: g.yaxes('ms') } +
|
||||
g.stack,
|
||||
)
|
||||
),
|
||||
// Remote write specific dashboard.
|
||||
'prometheus-remote-write.json':
|
||||
g.dashboard('Prometheus Remote Write')
|
||||
.addMultiTemplate('instance', 'prometheus_build_info', 'instance')
|
||||
.addMultiTemplate('cluster', 'kube_pod_container_info{image=~".*prometheus.*"}', 'cluster')
|
||||
.addRow(
|
||||
g.row('Timestamps')
|
||||
.addPanel(
|
||||
g.panel('Highest Timestamp In vs. Highest Timestamp Sent') +
|
||||
g.queryPanel('prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} - ignoring(queue) group_right(instance) prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}', '{{cluster}}:{{instance}}-{{queue}}') +
|
||||
{ yaxes: g.yaxes('s') }
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Rate[5m]') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) - ignoring (queue) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Samples')
|
||||
.addPanel(
|
||||
g.panel('Rate, in vs. succeeded or dropped [5m]') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m])- ignoring(queue) group_right(instance) rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) - rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Shards')
|
||||
.addPanel(
|
||||
g.panel('Num. Shards') +
|
||||
g.queryPanel('prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Capacity') +
|
||||
g.queryPanel('prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
)
|
||||
.addRow(
|
||||
g.row('Misc Rates.')
|
||||
.addPanel(
|
||||
g.panel('Dropped Samples') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Failed Samples') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Retried Samples') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
.addPanel(
|
||||
g.panel('Enqueue Retries') +
|
||||
g.queryPanel('rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance"}[5m])', '{{cluster}}:{{instance}}-{{queue}}')
|
||||
)
|
||||
),
|
||||
},
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "grafana-builder",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs",
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
(import 'config.libsonnet') +
|
||||
(import 'dashboards.libsonnet') +
|
||||
(import 'alerts.libsonnet')
|
51
go.mod
51
go.mod
|
@ -5,31 +5,14 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go v23.2.0+incompatible
|
||||
github.com/Azure/go-autorest v11.2.8+incompatible
|
||||
github.com/OneOfOne/xxhash v1.2.5 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f // indirect
|
||||
github.com/VividCortex/ewma v1.1.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf
|
||||
github.com/aws/aws-sdk-go v1.15.24
|
||||
github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8 // indirect
|
||||
github.com/cenk/backoff v2.0.0+incompatible // indirect
|
||||
github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2 // indirect
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
||||
github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 // indirect
|
||||
github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c // indirect
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
|
||||
github.com/coreos/etcd v3.3.12+incompatible // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/elastic/gosigar v0.9.0 // indirect
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
|
||||
github.com/getsentry/raven-go v0.1.2 // indirect
|
||||
github.com/go-kit/kit v0.8.0
|
||||
github.com/go-logfmt/logfmt v0.4.0
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/go-openapi/strfmt v0.19.0
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4 // indirect
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
@ -39,47 +22,27 @@ require (
|
|||
github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/hashicorp/consul/api v1.1.0
|
||||
github.com/hashicorp/go-msgpack v0.5.4 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||
github.com/jackc/pgx v3.2.0+incompatible // indirect
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
||||
github.com/json-iterator/go v1.1.5
|
||||
github.com/json-iterator/go v1.1.6
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c // indirect
|
||||
github.com/lib/pq v1.0.0 // indirect
|
||||
github.com/lightstep/lightstep-tracer-go v0.15.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.10
|
||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
|
||||
github.com/oklog/run v1.0.0
|
||||
github.com/olekukonko/tablewriter v0.0.1 // indirect
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7
|
||||
github.com/opentracing/basictracer-go v1.0.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.0.2
|
||||
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect
|
||||
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.9.3
|
||||
github.com/prometheus/alertmanager v0.17.0
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||
github.com/prometheus/common v0.4.0
|
||||
github.com/prometheus/tsdb v0.8.0
|
||||
github.com/rlmcpherson/s3gof3r v0.5.0 // indirect
|
||||
github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect
|
||||
github.com/prometheus/common v0.4.1
|
||||
github.com/prometheus/tsdb v0.9.1
|
||||
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13
|
||||
github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
|
||||
github.com/soheilhy/cmux v0.1.4
|
||||
|
|
165
go.sum
165
go.sum
|
@ -13,12 +13,12 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
|
|||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY=
|
||||
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
|
@ -28,6 +28,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
|
|||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.15.24 h1:xLAdTA/ore6xdPAljzZRed7IGqQgC+nY+ERS5vaj4Ro=
|
||||
github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
|
@ -35,55 +37,33 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8 h1:tYoz1OeRpx3dJZlh9T4dQt4kAndcmpl+VNdzbSgFC/0=
|
||||
github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4=
|
||||
github.com/cenk/backoff v2.0.0+incompatible h1:7vXVw3g7XE+Vnj0A9TmFGtMeP4oZQ5ZzpPvKhLFa80E=
|
||||
github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE=
|
||||
github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2 h1:MmeatFT1pTPSVb4nkPmBFN/LRZ97vPjsFKsZrU3KKTs=
|
||||
github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
|
||||
github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 h1:dzj1/xcivGjNPwwifh/dWTczkwcuqsXXFHY1X/TZMtw=
|
||||
github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4=
|
||||
github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f h1:0FHGBrsIyDci8tF7zujQkHdMTJdCTSIV9esrni2fKQI=
|
||||
github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/etcd v3.3.12+incompatible h1:pAWNwdf7QiT1zfaWyqCtNZQWCLByQyA3JrSQyuYAqnQ=
|
||||
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elastic/gosigar v0.9.0 h1:ehdJWCzrtTHhYDmUAO6Zpu+uez4UB/dhH0oJSQ/o1Pk=
|
||||
github.com/elastic/gosigar v0.9.0/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/getsentry/raven-go v0.1.2 h1:4V0z512S5mZXiBvmW2RbuZBSIY1sEdMNsPjpx2zwtSE=
|
||||
github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
|
@ -91,10 +71,37 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.17.2 h1:eYp14J1o8TTSCzndHBtsNuckikV1PfZOSnx4BcBeu0c=
|
||||
github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.17.2 h1:azEQ8Fnx0jmtFF2fxsnmd6I0x6rsweUF63qqSO1NmKk=
|
||||
github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.17.2 h1:3ekBy41gar/iJi2KSh/au/PrC2vpLr85upF/UZmm3W0=
|
||||
github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.17.2 h1:lF3z7AH8dd0IKXc1zEBi1dj0B4XgVb5cVjn39dCK3Ls=
|
||||
github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.17.2 h1:tEXYu6Xc0pevpzzQx5ghrMN9F7IVpN/+u4iD3rkYE5o=
|
||||
github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/runtime v0.18.0 h1:ddoL4Uo/729XbNAS9UIsG7Oqa8R8l2edBe6Pq/i8AHM=
|
||||
github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M=
|
||||
github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k=
|
||||
github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/validate v0.17.2 h1:lwFfiS4sv5DvOrsYDsYq4N7UU8ghXiYtPJ+VcQnC3Xg=
|
||||
github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -121,6 +128,10 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeq
|
|||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34 h1:mGdRet4qWdrDnNidFrlgpa8iNWM/RAwRDEMsLRICCac=
|
||||
github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e h1:hQpY0g0UGsLKLDs8UJ6xpA2gNCkEdEbvxSPqLItXCpI=
|
||||
|
@ -131,8 +142,6 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
|||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=
|
||||
|
@ -174,23 +183,18 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1 h1:O08dwjOwv9CYlJJEUZKAazSoQDKlsN34Bq3dnhqhyVI=
|
||||
github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=
|
||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c h1:45aLE1GlZRKxNfTMkok85BUKAJNLdHr5GAm3h8Fqoww=
|
||||
github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
|
@ -199,14 +203,11 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightstep/lightstep-tracer-go v0.15.6 h1:D0GGa7afJ7GcQvu5as6ssLEEKYXvRgKI5d5cevtz8r4=
|
||||
github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI=
|
||||
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
|
@ -223,22 +224,17 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
|
|||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s=
|
||||
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
|
@ -247,17 +243,12 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
|||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7 h1:8KbikWulLUcMM96hBxjgoo6gTmCkG6HYSDohv/WygYU=
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
|
||||
github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea h1:sKwxy1H95npauwu8vtF95vG/syrL0p8fSZo/XlDg5gk=
|
||||
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg=
|
||||
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 h1:rUMC+oZ89Om6l9wvUNjzI0ZrKrSnXzV+opsgAohYUNc=
|
||||
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -266,53 +257,48 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/alertmanager v0.17.0 h1:h4EqB7nSCb0zNl8prrb9kX9nO2ZQh//aQkCiemLCw3Q=
|
||||
github.com/prometheus/alertmanager v0.17.0/go.mod h1:3/vUuD9sDlkVuB2KLczjrlG7aqT09pyK0jfTp/itWS0=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.8.0 h1:w1tAGxsBMLkuGrFMhqgcCeBkM5d1YI24udArs+aASuQ=
|
||||
github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
|
||||
github.com/prometheus/tsdb v0.9.1 h1:IWaAmWkYlgG7/S4iw4IpAQt5Y35QaZM6/GsZ7GsjAuk=
|
||||
github.com/prometheus/tsdb v0.9.1/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rlmcpherson/s3gof3r v0.5.0 h1:1izOJpTiohSibfOHuNyEA/yQnAirh05enzEdmhez43k=
|
||||
github.com/rlmcpherson/s3gof3r v0.5.0/go.mod h1:s7vv7SMDPInkitQMuZzH615G7yWHdrU2r/Go7Bo71Rs=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rubyist/circuitbreaker v2.2.1+incompatible h1:KUKd/pV8Geg77+8LNDwdow6rVCAYOp8+kHUyFvL6Mhk=
|
||||
github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13 h1:4AQBn5RJY4WH8t8TLEMZUsWeXHAUcoao42TCAfpEJJE=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d h1:yVBZEAirqhDYAc7xftf/swe8eHcg63jqfwdqN8KSoR8=
|
||||
github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04 h1:y0cMJ0qjii33BnD6tMGcF/+gHYsoKQ6tbwQpy233OII=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b h1:rKVW5h3pEu8gGxD+ZlOmBvFYAxXLCYeQv/eg+t6QvLQ=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/simonpasquier/klog-gokit v0.1.0 h1:l3GGzgwlUF4vC1ApCOEsMsV+6nJPM01VoVCUCZgOIUw=
|
||||
github.com/simonpasquier/klog-gokit v0.1.0/go.mod h1:4lorAA0CyDox4KO34BrvNAJk8J2Ma/M9Q2BDkR38vSI=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -331,6 +317,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
@ -349,6 +336,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
|||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -386,12 +374,15 @@ golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqY
|
|||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98 h1:Cf5h/jCzhiiL0W8VrlJhOm+8+YYZPMHXcHsruWXnD40=
|
||||
golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b h1:qMK98NmNCRVDIYFycQ5yVRkvgDUFfdP8Ip4KqmDEB7g=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
|
|
@ -32,12 +32,14 @@ import (
|
|||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/alertmanager/api/v2/models"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -45,8 +47,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
alertPushEndpoint = "/api/v1/alerts"
|
||||
contentTypeJSON = "application/json"
|
||||
contentTypeJSON = "application/json"
|
||||
)
|
||||
|
||||
// String constants for instrumentation.
|
||||
|
@ -458,11 +459,10 @@ func (n *Manager) DroppedAlertmanagers() []*url.URL {
|
|||
func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||
begin := time.Now()
|
||||
|
||||
b, err := json.Marshal(alerts)
|
||||
if err != nil {
|
||||
level.Error(n.logger).Log("msg", "Encoding alerts failed", "err", err)
|
||||
return false
|
||||
}
|
||||
// v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API
|
||||
// v1 or v2. Marshaling happens below. Reference here is for caching between
|
||||
// for loop iterations.
|
||||
var v1Payload, v2Payload []byte
|
||||
|
||||
n.mtx.RLock()
|
||||
amSets := n.alertmanagers
|
||||
|
@ -473,36 +473,106 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
|||
numSuccess uint64
|
||||
)
|
||||
for _, ams := range amSets {
|
||||
var (
|
||||
payload []byte
|
||||
err error
|
||||
)
|
||||
|
||||
ams.mtx.RLock()
|
||||
|
||||
switch ams.cfg.APIVersion {
|
||||
case config.AlertmanagerAPIVersionV1:
|
||||
{
|
||||
if v1Payload == nil {
|
||||
v1Payload, err = json.Marshal(alerts)
|
||||
if err != nil {
|
||||
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
payload = v1Payload
|
||||
}
|
||||
case config.AlertmanagerAPIVersionV2:
|
||||
{
|
||||
if v2Payload == nil {
|
||||
openAPIAlerts := alertsToOpenAPIAlerts(alerts)
|
||||
|
||||
v2Payload, err = json.Marshal(openAPIAlerts)
|
||||
if err != nil {
|
||||
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
payload = v2Payload
|
||||
}
|
||||
default:
|
||||
{
|
||||
level.Error(n.logger).Log(
|
||||
"msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
|
||||
"err", err,
|
||||
)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, am := range ams.ams {
|
||||
wg.Add(1)
|
||||
|
||||
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
||||
defer cancel()
|
||||
|
||||
go func(ams *alertmanagerSet, am alertmanager) {
|
||||
u := am.url().String()
|
||||
|
||||
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
|
||||
level.Error(n.logger).Log("alertmanager", u, "count", len(alerts), "msg", "Error sending alert", "err", err)
|
||||
n.metrics.errors.WithLabelValues(u).Inc()
|
||||
go func(client *http.Client, url string) {
|
||||
if err := n.sendOne(ctx, client, url, payload); err != nil {
|
||||
level.Error(n.logger).Log("alertmanager", url, "count", len(alerts), "msg", "Error sending alert", "err", err)
|
||||
n.metrics.errors.WithLabelValues(url).Inc()
|
||||
} else {
|
||||
atomic.AddUint64(&numSuccess, 1)
|
||||
}
|
||||
n.metrics.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds())
|
||||
n.metrics.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
||||
n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds())
|
||||
n.metrics.sent.WithLabelValues(url).Add(float64(len(alerts)))
|
||||
|
||||
wg.Done()
|
||||
}(ams, am)
|
||||
}(ams.client, am.url().String())
|
||||
}
|
||||
|
||||
ams.mtx.RUnlock()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return numSuccess > 0
|
||||
}
|
||||
|
||||
func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts {
|
||||
openAPIAlerts := models.PostableAlerts{}
|
||||
for _, a := range alerts {
|
||||
start := strfmt.DateTime(a.StartsAt)
|
||||
end := strfmt.DateTime(a.EndsAt)
|
||||
openAPIAlerts = append(openAPIAlerts, &models.PostableAlert{
|
||||
Annotations: labelsToOpenAPILabelSet(a.Annotations),
|
||||
EndsAt: end,
|
||||
StartsAt: start,
|
||||
Alert: models.Alert{
|
||||
GeneratorURL: strfmt.URI(a.GeneratorURL),
|
||||
Labels: labelsToOpenAPILabelSet(a.Labels),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return openAPIAlerts
|
||||
}
|
||||
|
||||
func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet {
|
||||
apiLabelSet := models.LabelSet{}
|
||||
for _, label := range modelLabelSet {
|
||||
apiLabelSet[label.Name] = string(label.Value)
|
||||
}
|
||||
|
||||
return apiLabelSet
|
||||
}
|
||||
|
||||
func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
|
@ -523,6 +593,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
|||
if resp.StatusCode/100 != 2 {
|
||||
return errors.Errorf("bad response status %s", resp.Status)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -615,7 +686,8 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
|
|||
}
|
||||
}
|
||||
|
||||
func postPath(pre string) string {
|
||||
func postPath(pre string, v config.AlertmanagerAPIVersion) string {
|
||||
alertPushEndpoint := fmt.Sprintf("/api/%v/alerts", string(v))
|
||||
return path.Join("/", pre, alertPushEndpoint)
|
||||
}
|
||||
|
||||
|
@ -633,7 +705,7 @@ func alertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig
|
|||
}
|
||||
// Set configured scheme as the initial scheme label for overwrite.
|
||||
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme})
|
||||
lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix)})
|
||||
lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)})
|
||||
|
||||
// Combine target labels with target group labels.
|
||||
for ln, lv := range tg.Labels {
|
||||
|
|
|
@ -64,7 +64,7 @@ func TestPostPath(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
testutil.Equals(t, c.out, postPath(c.in))
|
||||
testutil.Equals(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV1))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,15 +165,19 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(time.Second)
|
||||
|
||||
am2Cfg := config.DefaultAlertmanagerConfig
|
||||
am2Cfg.Timeout = model.Duration(time.Second)
|
||||
|
||||
h.alertmanagers["1"] = &alertmanagerSet{
|
||||
ams: []alertmanager{
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return server1.URL },
|
||||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
cfg: &am1Cfg,
|
||||
client: authClient,
|
||||
}
|
||||
|
||||
|
@ -183,9 +187,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
urlf: func() string { return server2.URL },
|
||||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
cfg: &am2Cfg,
|
||||
}
|
||||
|
||||
for i := range make([]struct{}, maxBatchSize) {
|
||||
|
@ -332,15 +334,16 @@ func TestHandlerQueueing(t *testing.T) {
|
|||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(time.Second)
|
||||
|
||||
h.alertmanagers["1"] = &alertmanagerSet{
|
||||
ams: []alertmanager{
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return server.URL },
|
||||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
cfg: &am1Cfg,
|
||||
}
|
||||
|
||||
var alerts []*Alert
|
||||
|
|
|
@ -131,44 +131,46 @@ func (ls Labels) Hash() uint64 {
|
|||
}
|
||||
|
||||
// HashForLabels returns a hash value for the labels matching the provided names.
|
||||
func (ls Labels) HashForLabels(names ...string) uint64 {
|
||||
b := make([]byte, 0, 1024)
|
||||
|
||||
for _, v := range ls {
|
||||
for _, n := range names {
|
||||
if v.Name == n {
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, sep)
|
||||
break
|
||||
}
|
||||
// 'names' have to be sorted in ascending order.
|
||||
func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
||||
b = b[:0]
|
||||
i, j := 0, 0
|
||||
for i < len(ls) && j < len(names) {
|
||||
if names[j] < ls[i].Name {
|
||||
j++
|
||||
} else if ls[i].Name < names[j] {
|
||||
i++
|
||||
} else {
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, sep)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, sep)
|
||||
i++
|
||||
j++
|
||||
}
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
||||
// HashWithoutLabels returns a hash value for all labels except those matching
|
||||
// the provided names.
|
||||
func (ls Labels) HashWithoutLabels(names ...string) uint64 {
|
||||
b := make([]byte, 0, 1024)
|
||||
|
||||
Outer:
|
||||
for _, v := range ls {
|
||||
if v.Name == MetricName {
|
||||
// 'names' have to be sorted in ascending order.
|
||||
func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
||||
b = b[:0]
|
||||
j := 0
|
||||
for i := range ls {
|
||||
for j < len(names) && names[j] < ls[i].Name {
|
||||
j++
|
||||
}
|
||||
if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
|
||||
continue
|
||||
}
|
||||
for _, n := range names {
|
||||
if v.Name == n {
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
||||
// Copy returns a copy of the labels.
|
||||
|
|
|
@ -1523,13 +1523,17 @@ func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorM
|
|||
// signatureFunc returns a function that calculates the signature for a metric
|
||||
// ignoring the provided labels. If on, then the given labels are only used instead.
|
||||
func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 {
|
||||
// TODO(fabxc): ensure names are sorted and then use that and sortedness
|
||||
// of labels by names to speed up the operations below.
|
||||
// Alternatively, inline the hashing and don't build new label sets.
|
||||
sort.Strings(names)
|
||||
if on {
|
||||
return func(lset labels.Labels) uint64 { return lset.HashForLabels(names...) }
|
||||
return func(lset labels.Labels) uint64 {
|
||||
h, _ := lset.HashForLabels(make([]byte, 0, 1024), names...)
|
||||
return h
|
||||
}
|
||||
}
|
||||
return func(lset labels.Labels) uint64 {
|
||||
h, _ := lset.HashWithoutLabels(make([]byte, 0, 1024), names...)
|
||||
return h
|
||||
}
|
||||
return func(lset labels.Labels) uint64 { return lset.HashWithoutLabels(names...) }
|
||||
}
|
||||
|
||||
// resultMetric returns the metric for the given sample(s) based on the Vector
|
||||
|
@ -1722,8 +1726,9 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
|
|||
}
|
||||
}
|
||||
|
||||
sort.Strings(grouping)
|
||||
lb := labels.NewBuilder(nil)
|
||||
|
||||
buf := make([]byte, 0, 1024)
|
||||
for _, s := range vec {
|
||||
metric := s.Metric
|
||||
|
||||
|
@ -1737,9 +1742,9 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
|
|||
groupingKey uint64
|
||||
)
|
||||
if without {
|
||||
groupingKey = metric.HashWithoutLabels(grouping...)
|
||||
groupingKey, buf = metric.HashWithoutLabels(buf, grouping...)
|
||||
} else {
|
||||
groupingKey = metric.HashForLabels(grouping...)
|
||||
groupingKey, buf = metric.HashForLabels(buf, grouping...)
|
||||
}
|
||||
|
||||
group, ok := result[groupingKey]
|
||||
|
|
|
@ -173,9 +173,9 @@ type errQuerier struct {
|
|||
func (q *errQuerier) Select(*storage.SelectParams, ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||
return errSeriesSet{err: q.err}, nil, q.err
|
||||
}
|
||||
func (*errQuerier) LabelValues(name string) ([]string, error) { return nil, nil }
|
||||
func (*errQuerier) LabelNames() ([]string, error) { return nil, nil }
|
||||
func (*errQuerier) Close() error { return nil }
|
||||
func (*errQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { return nil, nil, nil }
|
||||
func (*errQuerier) LabelNames() ([]string, storage.Warnings, error) { return nil, nil, nil }
|
||||
func (*errQuerier) Close() error { return nil }
|
||||
|
||||
// errSeriesSet implements storage.SeriesSet which always returns error.
|
||||
type errSeriesSet struct {
|
||||
|
@ -242,9 +242,11 @@ func (q *paramCheckerQuerier) Select(sp *storage.SelectParams, _ ...*labels.Matc
|
|||
|
||||
return errSeriesSet{err: nil}, nil, nil
|
||||
}
|
||||
func (*paramCheckerQuerier) LabelValues(name string) ([]string, error) { return nil, nil }
|
||||
func (*paramCheckerQuerier) LabelNames() ([]string, error) { return nil, nil }
|
||||
func (*paramCheckerQuerier) Close() error { return nil }
|
||||
func (*paramCheckerQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (*paramCheckerQuerier) LabelNames() ([]string, storage.Warnings, error) { return nil, nil, nil }
|
||||
func (*paramCheckerQuerier) Close() error { return nil }
|
||||
|
||||
func TestParamsSetCorrectly(t *testing.T) {
|
||||
opts := EngineOpts{
|
||||
|
|
|
@ -253,16 +253,28 @@ func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher)
|
|||
}
|
||||
|
||||
// LabelValues returns all potential values for a label name.
|
||||
func (q *mergeQuerier) LabelValues(name string) ([]string, error) {
|
||||
func (q *mergeQuerier) LabelValues(name string) ([]string, Warnings, error) {
|
||||
var results [][]string
|
||||
var warnings Warnings
|
||||
for _, querier := range q.queriers {
|
||||
values, err := querier.LabelValues(name)
|
||||
values, wrn, err := querier.LabelValues(name)
|
||||
|
||||
if wrn != nil {
|
||||
warnings = append(warnings, wrn...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
q.failedQueriers[querier] = struct{}{}
|
||||
// If the error source isn't the primary querier, return the error as a warning and continue.
|
||||
if querier != q.primaryQuerier {
|
||||
warnings = append(warnings, err)
|
||||
continue
|
||||
} else {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
results = append(results, values)
|
||||
}
|
||||
return mergeStringSlices(results), nil
|
||||
return mergeStringSlices(results), warnings, nil
|
||||
}
|
||||
|
||||
func (q *mergeQuerier) IsFailedSet(set SeriesSet) bool {
|
||||
|
@ -310,13 +322,25 @@ func mergeTwoStringSlices(a, b []string) []string {
|
|||
}
|
||||
|
||||
// LabelNames returns all the unique label names present in the block in sorted order.
|
||||
func (q *mergeQuerier) LabelNames() ([]string, error) {
|
||||
func (q *mergeQuerier) LabelNames() ([]string, Warnings, error) {
|
||||
labelNamesMap := make(map[string]struct{})
|
||||
var warnings Warnings
|
||||
for _, b := range q.queriers {
|
||||
names, err := b.LabelNames()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "LabelNames() from Querier")
|
||||
names, wrn, err := b.LabelNames()
|
||||
if wrn != nil {
|
||||
warnings = append(warnings, wrn...)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// If the error source isn't the primary querier, return the error as a warning and continue.
|
||||
if b != q.primaryQuerier {
|
||||
warnings = append(warnings, err)
|
||||
continue
|
||||
} else {
|
||||
return nil, nil, errors.Wrap(err, "LabelNames() from Querier")
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
labelNamesMap[name] = struct{}{}
|
||||
}
|
||||
|
@ -328,7 +352,7 @@ func (q *mergeQuerier) LabelNames() ([]string, error) {
|
|||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
return labelNames, nil
|
||||
return labelNames, warnings, nil
|
||||
}
|
||||
|
||||
// Close releases the resources of the Querier.
|
||||
|
|
|
@ -55,10 +55,10 @@ type Querier interface {
|
|||
Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error)
|
||||
|
||||
// LabelValues returns all potential values for a label name.
|
||||
LabelValues(name string) ([]string, error)
|
||||
LabelValues(name string) ([]string, Warnings, error)
|
||||
|
||||
// LabelNames returns all the unique label names present in the block in sorted order.
|
||||
LabelNames() ([]string, error)
|
||||
LabelNames() ([]string, Warnings, error)
|
||||
|
||||
// Close releases the resources of the Querier.
|
||||
Close() error
|
||||
|
|
|
@ -30,12 +30,12 @@ func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warning
|
|||
return NoopSeriesSet(), nil, nil
|
||||
}
|
||||
|
||||
func (noopQuerier) LabelValues(name string) ([]string, error) {
|
||||
return nil, nil
|
||||
func (noopQuerier) LabelValues(name string) ([]string, Warnings, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (noopQuerier) LabelNames() ([]string, error) {
|
||||
return nil, nil
|
||||
func (noopQuerier) LabelNames() ([]string, Warnings, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (noopQuerier) Close() error {
|
||||
|
|
|
@ -166,7 +166,6 @@ type QueueManager struct {
|
|||
client StorageClient
|
||||
watcher *WALWatcher
|
||||
|
||||
seriesMtx sync.Mutex
|
||||
seriesLabels map[uint64][]prompb.Label
|
||||
seriesSegmentIndexes map[uint64]int
|
||||
droppedSeries map[uint64]struct{}
|
||||
|
@ -231,16 +230,10 @@ func NewQueueManager(logger log.Logger, walDir string, samplesIn *ewmaRate, cfg
|
|||
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
|
||||
// enqueued on their shards or a shutdown signal is received.
|
||||
func (t *QueueManager) Append(s []tsdb.RefSample) bool {
|
||||
type enqueuable struct {
|
||||
ts prompb.TimeSeries
|
||||
ref uint64
|
||||
}
|
||||
|
||||
tempSamples := make([]enqueuable, 0, len(s))
|
||||
t.seriesMtx.Lock()
|
||||
outer:
|
||||
for _, sample := range s {
|
||||
// If we have no labels for the series, due to relabelling or otherwise, don't send the sample.
|
||||
if _, ok := t.seriesLabels[sample.Ref]; !ok {
|
||||
lbls, ok := t.seriesLabels[sample.Ref]
|
||||
if !ok {
|
||||
t.droppedSamplesTotal.Inc()
|
||||
t.samplesDropped.incr(1)
|
||||
if _, ok := t.droppedSeries[sample.Ref]; !ok {
|
||||
|
@ -248,23 +241,6 @@ func (t *QueueManager) Append(s []tsdb.RefSample) bool {
|
|||
}
|
||||
continue
|
||||
}
|
||||
tempSamples = append(tempSamples, enqueuable{
|
||||
ts: prompb.TimeSeries{
|
||||
Labels: t.seriesLabels[sample.Ref],
|
||||
Samples: []prompb.Sample{
|
||||
prompb.Sample{
|
||||
Value: float64(sample.V),
|
||||
Timestamp: sample.T,
|
||||
},
|
||||
},
|
||||
},
|
||||
ref: sample.Ref,
|
||||
})
|
||||
}
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
outer:
|
||||
for _, sample := range tempSamples {
|
||||
// This will only loop if the queues are being resharded.
|
||||
backoff := t.cfg.MinBackoff
|
||||
for {
|
||||
|
@ -274,7 +250,16 @@ outer:
|
|||
default:
|
||||
}
|
||||
|
||||
if t.shards.enqueue(sample.ref, sample.ts) {
|
||||
ts := prompb.TimeSeries{
|
||||
Labels: lbls,
|
||||
Samples: []prompb.Sample{
|
||||
prompb.Sample{
|
||||
Value: float64(sample.V),
|
||||
Timestamp: sample.T,
|
||||
},
|
||||
},
|
||||
}
|
||||
if t.shards.enqueue(sample.Ref, ts) {
|
||||
continue outer
|
||||
}
|
||||
|
||||
|
@ -336,8 +321,6 @@ func (t *QueueManager) Stop() {
|
|||
t.watcher.Stop()
|
||||
|
||||
// On shutdown, release the strings in the labels from the intern pool.
|
||||
t.seriesMtx.Lock()
|
||||
defer t.seriesMtx.Unlock()
|
||||
for _, labels := range t.seriesLabels {
|
||||
release(labels)
|
||||
}
|
||||
|
@ -357,11 +340,6 @@ func (t *QueueManager) Stop() {
|
|||
|
||||
// StoreSeries keeps track of which series we know about for lookups when sending samples to remote.
|
||||
func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
|
||||
// Lock before any calls to labelsToLabels proto, as that's where string interning is done.
|
||||
t.seriesMtx.Lock()
|
||||
defer t.seriesMtx.Unlock()
|
||||
|
||||
temp := make(map[uint64][]prompb.Label, len(series))
|
||||
for _, s := range series {
|
||||
ls := processExternalLabels(s.Labels, t.externalLabels)
|
||||
rl := relabel.Process(ls, t.relabelConfigs...)
|
||||
|
@ -369,19 +347,16 @@ func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
|
|||
t.droppedSeries[s.Ref] = struct{}{}
|
||||
continue
|
||||
}
|
||||
temp[s.Ref] = labelsToLabelsProto(rl)
|
||||
}
|
||||
|
||||
for ref, labels := range temp {
|
||||
t.seriesSegmentIndexes[ref] = index
|
||||
t.seriesSegmentIndexes[s.Ref] = index
|
||||
labels := labelsToLabelsProto(rl)
|
||||
|
||||
// We should not ever be replacing a series labels in the map, but just
|
||||
// in case we do we need to ensure we do not leak the replaced interned
|
||||
// strings.
|
||||
if orig, ok := t.seriesLabels[ref]; ok {
|
||||
if orig, ok := t.seriesLabels[s.Ref]; ok {
|
||||
release(orig)
|
||||
}
|
||||
t.seriesLabels[ref] = labels
|
||||
t.seriesLabels[s.Ref] = labels
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -389,9 +364,6 @@ func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
|
|||
// stored series records with the checkpoints index number, so we can now
|
||||
// delete any ref ID's lower than that # from the two maps.
|
||||
func (t *QueueManager) SeriesReset(index int) {
|
||||
t.seriesMtx.Lock()
|
||||
defer t.seriesMtx.Unlock()
|
||||
|
||||
// Check for series that are in segments older than the checkpoint
|
||||
// that were not also present in the checkpoint.
|
||||
for k, v := range t.seriesSegmentIndexes {
|
||||
|
@ -661,9 +633,10 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri
|
|||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||
// If we have fewer samples than that, flush them out after a deadline
|
||||
// anyways.
|
||||
pendingSamples := []prompb.TimeSeries{}
|
||||
|
||||
max := s.qm.cfg.MaxSamplesPerSend
|
||||
pendingSamples := make([]prompb.TimeSeries, 0, max)
|
||||
var buf []byte
|
||||
|
||||
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
stop := func() {
|
||||
if !timer.Stop() {
|
||||
|
@ -684,7 +657,7 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri
|
|||
if !ok {
|
||||
if len(pendingSamples) > 0 {
|
||||
level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", len(pendingSamples))
|
||||
s.sendSamples(ctx, pendingSamples)
|
||||
s.sendSamples(ctx, pendingSamples, &buf)
|
||||
s.qm.pendingSamplesMetric.Sub(float64(len(pendingSamples)))
|
||||
level.Debug(s.qm.logger).Log("msg", "Done flushing.")
|
||||
}
|
||||
|
@ -698,8 +671,8 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri
|
|||
s.qm.pendingSamplesMetric.Inc()
|
||||
|
||||
if len(pendingSamples) >= max {
|
||||
s.sendSamples(ctx, pendingSamples[:max])
|
||||
pendingSamples = pendingSamples[max:]
|
||||
s.sendSamples(ctx, pendingSamples[:max], &buf)
|
||||
pendingSamples = append(pendingSamples[:0], pendingSamples[max:]...)
|
||||
s.qm.pendingSamplesMetric.Sub(float64(max))
|
||||
|
||||
stop()
|
||||
|
@ -707,10 +680,10 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri
|
|||
}
|
||||
|
||||
case <-timer.C:
|
||||
if len(pendingSamples) > 0 {
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", len(pendingSamples), "shard", shardNum)
|
||||
n := len(pendingSamples)
|
||||
s.sendSamples(ctx, pendingSamples)
|
||||
n := len(pendingSamples)
|
||||
if n > 0 {
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", n, "shard", shardNum)
|
||||
s.sendSamples(ctx, pendingSamples, &buf)
|
||||
pendingSamples = pendingSamples[:0]
|
||||
s.qm.pendingSamplesMetric.Sub(float64(n))
|
||||
}
|
||||
|
@ -719,9 +692,9 @@ func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeri
|
|||
}
|
||||
}
|
||||
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries) {
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) {
|
||||
begin := time.Now()
|
||||
err := s.sendSamplesWithBackoff(ctx, samples)
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, buf)
|
||||
if err != nil {
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", len(samples), "err", err)
|
||||
s.qm.failedSamplesTotal.Add(float64(len(samples)))
|
||||
|
@ -734,9 +707,10 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries) {
|
|||
}
|
||||
|
||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries) error {
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) error {
|
||||
backoff := s.qm.cfg.MinBackoff
|
||||
req, highest, err := buildWriteRequest(samples)
|
||||
req, highest, err := buildWriteRequest(samples, *buf)
|
||||
*buf = req
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
|
@ -774,7 +748,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
}
|
||||
}
|
||||
|
||||
func buildWriteRequest(samples []prompb.TimeSeries) ([]byte, int64, error) {
|
||||
func buildWriteRequest(samples []prompb.TimeSeries, buf []byte) ([]byte, int64, error) {
|
||||
var highest int64
|
||||
for _, ts := range samples {
|
||||
// At the moment we only ever append a TimeSeries with a single sample in it.
|
||||
|
@ -791,6 +765,11 @@ func buildWriteRequest(samples []prompb.TimeSeries) ([]byte, int64, error) {
|
|||
return nil, highest, err
|
||||
}
|
||||
|
||||
compressed := snappy.Encode(nil, data)
|
||||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||
// buffer as long as possible.
|
||||
if buf != nil {
|
||||
buf = buf[0:cap(buf)]
|
||||
}
|
||||
compressed := snappy.Encode(buf, data)
|
||||
return compressed, highest, nil
|
||||
}
|
||||
|
|
|
@ -260,18 +260,8 @@ func TestReshardRaceWithStop(t *testing.T) {
|
|||
|
||||
func TestReleaseNoninternedString(t *testing.T) {
|
||||
c := NewTestStorageClient()
|
||||
var m *QueueManager
|
||||
h := sync.Mutex{}
|
||||
|
||||
h.Lock()
|
||||
|
||||
m = NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
|
||||
m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
|
||||
m.Start()
|
||||
go func() {
|
||||
for {
|
||||
m.SeriesReset(1)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 1; i < 1000; i++ {
|
||||
m.StoreSeries([]tsdb.RefSeries{
|
||||
|
@ -285,6 +275,7 @@ func TestReleaseNoninternedString(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}, 0)
|
||||
m.SeriesReset(1)
|
||||
}
|
||||
|
||||
metric := client_testutil.ToFloat64(noReferenceReleases)
|
||||
|
@ -323,6 +314,7 @@ type TestStorageClient struct {
|
|||
expectedSamples map[string][]prompb.Sample
|
||||
wg sync.WaitGroup
|
||||
mtx sync.Mutex
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func NewTestStorageClient() *TestStorageClient {
|
||||
|
@ -349,21 +341,36 @@ func (c *TestStorageClient) expectSamples(ss []tsdb.RefSample, series []tsdb.Ref
|
|||
c.wg.Add(len(ss))
|
||||
}
|
||||
|
||||
func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) {
|
||||
func (c *TestStorageClient) waitForExpectedSamples(tb testing.TB) {
|
||||
c.wg.Wait()
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
for ts, expectedSamples := range c.expectedSamples {
|
||||
if !reflect.DeepEqual(expectedSamples, c.receivedSamples[ts]) {
|
||||
t.Fatalf("%s: Expected %v, got %v", ts, expectedSamples, c.receivedSamples[ts])
|
||||
tb.Fatalf("%s: Expected %v, got %v", ts, expectedSamples, c.receivedSamples[ts])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TestStorageClient) expectSampleCount(ss []tsdb.RefSample) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
c.wg.Add(len(ss))
|
||||
}
|
||||
|
||||
func (c *TestStorageClient) waitForExpectedSampleCount() {
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
||||
func (c *TestStorageClient) Store(_ context.Context, req []byte) error {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
reqBuf, err := snappy.Decode(nil, req)
|
||||
// nil buffers are ok for snappy, ignore cast error.
|
||||
if c.buf != nil {
|
||||
c.buf = c.buf[:cap(c.buf)]
|
||||
}
|
||||
reqBuf, err := snappy.Decode(c.buf, req)
|
||||
c.buf = reqBuf
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -421,6 +428,39 @@ func (c *TestBlockingStorageClient) Name() string {
|
|||
return "testblockingstorageclient"
|
||||
}
|
||||
|
||||
func BenchmarkSampleDelivery(b *testing.B) {
|
||||
// Let's create an even number of send batches so we don't run into the
|
||||
// batch timeout case.
|
||||
n := config.DefaultQueueConfig.MaxSamplesPerSend * 10
|
||||
samples, series := createTimeseries(n)
|
||||
|
||||
c := NewTestStorageClient()
|
||||
|
||||
cfg := config.DefaultQueueConfig
|
||||
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
|
||||
cfg.MaxShards = 1
|
||||
|
||||
dir, err := ioutil.TempDir("", "BenchmarkSampleDelivery")
|
||||
testutil.Ok(b, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
m := NewQueueManager(nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline)
|
||||
m.StoreSeries(series, 0)
|
||||
|
||||
// These should be received by the client.
|
||||
m.Start()
|
||||
defer m.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.expectSampleCount(samples)
|
||||
m.Append(samples)
|
||||
c.waitForExpectedSampleCount()
|
||||
}
|
||||
// Do not include shutdown
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkStartup(b *testing.B) {
|
||||
dir := os.Getenv("WALDIR")
|
||||
if dir == "" {
|
||||
|
|
|
@ -77,15 +77,15 @@ func (q *querier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (
|
|||
}
|
||||
|
||||
// LabelValues implements storage.Querier and is a noop.
|
||||
func (q *querier) LabelValues(name string) ([]string, error) {
|
||||
func (q *querier) LabelValues(name string) ([]string, storage.Warnings, error) {
|
||||
// TODO implement?
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// LabelNames implements storage.Querier and is a noop.
|
||||
func (q *querier) LabelNames() ([]string, error) {
|
||||
func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
|
||||
// TODO implement?
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Close implements storage.Querier and is a noop.
|
||||
|
|
|
@ -15,13 +15,10 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -40,13 +37,7 @@ type Storage struct {
|
|||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
configHash [16]byte
|
||||
|
||||
// For writes
|
||||
walDir string
|
||||
queues []*QueueManager
|
||||
samplesIn *ewmaRate
|
||||
flushDeadline time.Duration
|
||||
rws *WriteStorage
|
||||
|
||||
// For reads
|
||||
queryables []storage.Queryable
|
||||
|
@ -61,28 +52,17 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
|
|||
s := &Storage{
|
||||
logger: logging.Dedupe(l, 1*time.Minute),
|
||||
localStartTimeCallback: stCallback,
|
||||
flushDeadline: flushDeadline,
|
||||
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||
walDir: walDir,
|
||||
rws: NewWriteStorage(l, walDir, flushDeadline),
|
||||
}
|
||||
go s.run()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Storage) run() {
|
||||
ticker := time.NewTicker(shardUpdateDuration)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
s.samplesIn.tick()
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyConfig updates the state as the new config requires.
|
||||
func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
if err := s.applyRemoteWriteConfig(conf); err != nil {
|
||||
if err := s.rws.ApplyConfig(conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -113,66 +93,6 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// applyRemoteWriteConfig applies the remote write config only if the config has changed.
|
||||
// The caller must hold the lock on s.mtx.
|
||||
func (s *Storage) applyRemoteWriteConfig(conf *config.Config) error {
|
||||
// Remote write queues only need to change if the remote write config or
|
||||
// external labels change. Hash these together and only reload if the hash
|
||||
// changes.
|
||||
cfgBytes, err := json.Marshal(conf.RemoteWriteConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
externalLabelBytes, err := json.Marshal(conf.GlobalConfig.ExternalLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := md5.Sum(append(cfgBytes, externalLabelBytes...))
|
||||
if hash == s.configHash {
|
||||
level.Debug(s.logger).Log("msg", "remote write config has not changed, no need to restart QueueManagers")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.configHash = hash
|
||||
|
||||
// Update write queues
|
||||
newQueues := []*QueueManager{}
|
||||
// TODO: we should only stop & recreate queues which have changes,
|
||||
// as this can be quite disruptive.
|
||||
for i, rwConf := range conf.RemoteWriteConfigs {
|
||||
c, err := NewClient(i, &ClientConfig{
|
||||
URL: rwConf.URL,
|
||||
Timeout: rwConf.RemoteTimeout,
|
||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newQueues = append(newQueues, NewQueueManager(
|
||||
s.logger,
|
||||
s.walDir,
|
||||
s.samplesIn,
|
||||
rwConf.QueueConfig,
|
||||
conf.GlobalConfig.ExternalLabels,
|
||||
rwConf.WriteRelabelConfigs,
|
||||
c,
|
||||
s.flushDeadline,
|
||||
))
|
||||
}
|
||||
|
||||
for _, q := range s.queues {
|
||||
q.Stop()
|
||||
}
|
||||
|
||||
s.queues = newQueues
|
||||
for _, q := range s.queues {
|
||||
q.Start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartTime implements the Storage interface.
|
||||
func (s *Storage) StartTime() (int64, error) {
|
||||
return int64(model.Latest), nil
|
||||
|
@ -196,16 +116,16 @@ func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querie
|
|||
return storage.NewMergeQuerier(nil, queriers), nil
|
||||
}
|
||||
|
||||
// Appender implements storage.Storage.
|
||||
func (s *Storage) Appender() (storage.Appender, error) {
|
||||
return s.rws.Appender()
|
||||
}
|
||||
|
||||
// Close the background processing of the storage queues.
|
||||
func (s *Storage) Close() error {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
for _, q := range s.queues {
|
||||
q.Stop()
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.rws.Close()
|
||||
}
|
||||
|
||||
func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
|
@ -42,7 +41,7 @@ func TestStorageLifecycle(t *testing.T) {
|
|||
s.ApplyConfig(conf)
|
||||
|
||||
// make sure remote write has a queue.
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Equals(t, 1, len(s.rws.queues))
|
||||
|
||||
// make sure remote write has a queue.
|
||||
testutil.Equals(t, 1, len(s.queryables))
|
||||
|
@ -51,33 +50,6 @@ func TestStorageLifecycle(t *testing.T) {
|
|||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateExternalLabels(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestUpdateExternalLabels")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
s := NewStorage(nil, prometheus.DefaultRegisterer, nil, dir, defaultFlushDeadline)
|
||||
|
||||
externalLabels := labels.FromStrings("external", "true")
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
},
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Equals(t, labels.Labels(nil), s.queues[0].externalLabels)
|
||||
|
||||
conf.GlobalConfig.ExternalLabels = externalLabels
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Equals(t, externalLabels, s.queues[0].externalLabels)
|
||||
|
||||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateRemoteReadConfigs(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestUpdateRemoteReadConfigs")
|
||||
testutil.Ok(t, err)
|
||||
|
@ -100,31 +72,3 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
|
|||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateRemoteWriteConfigsNoop(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestUpdateRemoteWriteConfigsNoop")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
s := NewStorage(nil, prometheus.DefaultRegisterer, nil, dir, defaultFlushDeadline)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
},
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
queue := s.queues[0]
|
||||
|
||||
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
|
||||
&config.DefaultRemoteReadConfig,
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Assert(t, queue == s.queues[0], "Queue pointer should have remained the same")
|
||||
|
||||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ var (
|
|||
},
|
||||
[]string{queue},
|
||||
)
|
||||
liveReaderMetrics = wal.NewLiveReaderMetrics(prometheus.DefaultRegisterer)
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -293,7 +294,7 @@ func (w *WALWatcher) watch(segmentNum int, tail bool) error {
|
|||
}
|
||||
defer segment.Close()
|
||||
|
||||
reader := wal.NewLiveReader(w.logger, segment)
|
||||
reader := wal.NewLiveReader(w.logger, liveReaderMetrics, segment)
|
||||
|
||||
readTicker := time.NewTicker(readPeriod)
|
||||
defer readTicker.Stop()
|
||||
|
@ -418,6 +419,7 @@ func (w *WALWatcher) readSegment(r *wal.LiveReader, segmentNum int, tail bool) e
|
|||
dec tsdb.RecordDecoder
|
||||
series []tsdb.RefSeries
|
||||
samples []tsdb.RefSample
|
||||
send []tsdb.RefSample
|
||||
)
|
||||
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
|
@ -444,7 +446,6 @@ func (w *WALWatcher) readSegment(r *wal.LiveReader, segmentNum int, tail bool) e
|
|||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
var send []tsdb.RefSample
|
||||
for _, s := range samples {
|
||||
if s.T > w.startTime {
|
||||
send = append(send, s)
|
||||
|
@ -453,6 +454,7 @@ func (w *WALWatcher) readSegment(r *wal.LiveReader, segmentNum int, tail bool) e
|
|||
if len(send) > 0 {
|
||||
// Blocks until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
|
||||
w.writer.Append(send)
|
||||
send = send[:0]
|
||||
}
|
||||
|
||||
case tsdb.RecordTombstones:
|
||||
|
@ -508,7 +510,7 @@ func (w *WALWatcher) readCheckpoint(checkpointDir string) error {
|
|||
}
|
||||
defer sr.Close()
|
||||
|
||||
r := wal.NewLiveReader(w.logger, sr)
|
||||
r := wal.NewLiveReader(w.logger, liveReaderMetrics, sr)
|
||||
if err := w.readSegment(r, index, false); err != io.EOF && err != nil {
|
||||
return errors.Wrap(err, "readSegment")
|
||||
}
|
||||
|
|
|
@ -92,71 +92,75 @@ func TestTailSamples(t *testing.T) {
|
|||
pageSize := 32 * 1024
|
||||
const seriesCount = 10
|
||||
const samplesCount = 250
|
||||
now := time.Now()
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
|
||||
testutil.Ok(t, err)
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize, compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(now.UnixNano()) + 1,
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(now.UnixNano()) + 1,
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
|
||||
// Start read after checkpoint, no more data written.
|
||||
first, last, err := w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.startTime = now.UnixNano()
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
watcher.setMetrics()
|
||||
for i := first; i <= last; i++ {
|
||||
segment, err := wal.OpenReadSegment(wal.SegmentName(watcher.walDir, i))
|
||||
testutil.Ok(t, err)
|
||||
defer segment.Close()
|
||||
|
||||
reader := wal.NewLiveReader(nil, liveReaderMetrics, segment)
|
||||
// Use tail true so we can ensure we got the right number of samples.
|
||||
watcher.readSegment(reader, i, true)
|
||||
}
|
||||
|
||||
expectedSeries := seriesCount
|
||||
expectedSamples := seriesCount * samplesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
})
|
||||
testutil.Equals(t, expectedSeries, wt.checkNumLabels())
|
||||
testutil.Equals(t, expectedSamples, wt.samplesAppended)
|
||||
})
|
||||
}
|
||||
|
||||
// Start read after checkpoint, no more data written.
|
||||
first, last, err := w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.startTime = now.UnixNano()
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
watcher.setMetrics()
|
||||
for i := first; i <= last; i++ {
|
||||
segment, err := wal.OpenReadSegment(wal.SegmentName(watcher.walDir, i))
|
||||
testutil.Ok(t, err)
|
||||
defer segment.Close()
|
||||
|
||||
reader := wal.NewLiveReader(nil, segment)
|
||||
// Use tail true so we can ensure we got the right number of samples.
|
||||
watcher.readSegment(reader, i, true)
|
||||
}
|
||||
|
||||
expectedSeries := seriesCount
|
||||
expectedSamples := seriesCount * samplesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
})
|
||||
testutil.Equals(t, expectedSeries, wt.checkNumLabels())
|
||||
testutil.Equals(t, expectedSamples, wt.samplesAppended)
|
||||
}
|
||||
|
||||
func TestReadToEndNoCheckpoint(t *testing.T) {
|
||||
|
@ -164,61 +168,65 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
const seriesCount = 10
|
||||
const samplesCount = 250
|
||||
|
||||
dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
|
||||
testutil.Ok(t, err)
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize, compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
var recs [][]byte
|
||||
var recs [][]byte
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
enc := tsdb.RecordEncoder{}
|
||||
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
recs = append(recs, series)
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
recs = append(recs, series)
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
recs = append(recs, sample)
|
||||
recs = append(recs, sample)
|
||||
|
||||
// Randomly batch up records.
|
||||
if rand.Intn(4) < 3 {
|
||||
testutil.Ok(t, w.Log(recs...))
|
||||
recs = recs[:0]
|
||||
// Randomly batch up records.
|
||||
if rand.Intn(4) < 3 {
|
||||
testutil.Ok(t, w.Log(recs...))
|
||||
recs = recs[:0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
testutil.Ok(t, w.Log(recs...))
|
||||
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expected, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
testutil.Ok(t, w.Log(recs...))
|
||||
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expected, wt.checkNumLabels())
|
||||
}
|
||||
|
||||
func TestReadToEndWithCheckpoint(t *testing.T) {
|
||||
|
@ -228,79 +236,83 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
const seriesCount = 10
|
||||
const samplesCount = 250
|
||||
|
||||
dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, segmentSize)
|
||||
testutil.Ok(t, err)
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, segmentSize, compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
|
||||
tsdb.Checkpoint(w, 0, 1, func(x uint64) bool { return true }, 0)
|
||||
w.Truncate(1)
|
||||
|
||||
// Write more records after checkpointing.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount * 2
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expected, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
|
||||
tsdb.Checkpoint(w, 0, 1, func(x uint64) bool { return true }, 0)
|
||||
w.Truncate(1)
|
||||
|
||||
// Write more records after checkpointing.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount * 2
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expected, wt.checkNumLabels())
|
||||
}
|
||||
|
||||
func TestReadCheckpoint(t *testing.T) {
|
||||
|
@ -308,61 +320,65 @@ func TestReadCheckpoint(t *testing.T) {
|
|||
const seriesCount = 10
|
||||
const samplesCount = 250
|
||||
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
os.Create(wal.SegmentName(wdir, 30))
|
||||
os.Create(wal.SegmentName(wdir, 30))
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
|
||||
testutil.Ok(t, err)
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, 128*pageSize, compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
// Write to the initial segment then checkpoint.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
tsdb.Checkpoint(w, 30, 31, func(x uint64) bool { return true }, 0)
|
||||
w.Truncate(32)
|
||||
|
||||
// Start read after checkpoint, no more data written.
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
// watcher.
|
||||
go watcher.Start()
|
||||
|
||||
expectedSeries := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expectedSeries, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
tsdb.Checkpoint(w, 30, 31, func(x uint64) bool { return true }, 0)
|
||||
w.Truncate(32)
|
||||
|
||||
// Start read after checkpoint, no more data written.
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
// watcher.
|
||||
go watcher.Start()
|
||||
|
||||
expectedSeries := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
})
|
||||
watcher.Stop()
|
||||
testutil.Equals(t, expectedSeries, wt.checkNumLabels())
|
||||
}
|
||||
|
||||
func TestReadCheckpointMultipleSegments(t *testing.T) {
|
||||
|
@ -372,65 +388,69 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
|||
const seriesCount = 20
|
||||
const samplesCount = 300
|
||||
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "readCheckpoint")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, pageSize)
|
||||
testutil.Ok(t, err)
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, pageSize, compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Write a bunch of data.
|
||||
for i := 0; i < segments; i++ {
|
||||
for j := 0; j < seriesCount; j++ {
|
||||
ref := j + (i * 100)
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", j)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
// Write a bunch of data.
|
||||
for i := 0; i < segments; i++ {
|
||||
for j := 0; j < seriesCount; j++ {
|
||||
ref := j + (i * 100)
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", j)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for k := 0; k < samplesCount; k++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
for k := 0; k < samplesCount; k++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
|
||||
checkpointDir := dir + "/wal/checkpoint.000004"
|
||||
err = os.Mkdir(checkpointDir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
for i := 0; i <= 4; i++ {
|
||||
err := os.Rename(wal.SegmentName(dir+"/wal", i), wal.SegmentName(checkpointDir, i))
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.maxSegment = -1
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
watcher.setMetrics()
|
||||
|
||||
lastCheckpoint, _, err := tsdb.LastCheckpoint(watcher.walDir)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
err = watcher.readCheckpoint(lastCheckpoint)
|
||||
testutil.Ok(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
|
||||
checkpointDir := dir + "/wal/checkpoint.000004"
|
||||
err = os.Mkdir(checkpointDir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
for i := 0; i <= 4; i++ {
|
||||
err := os.Rename(wal.SegmentName(dir+"/wal", i), wal.SegmentName(checkpointDir, i))
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.maxSegment = -1
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
watcher.setMetrics()
|
||||
|
||||
lastCheckpoint, _, err := tsdb.LastCheckpoint(watcher.walDir)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
err = watcher.readCheckpoint(lastCheckpoint)
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestCheckpointSeriesReset(t *testing.T) {
|
||||
|
@ -439,71 +459,82 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
// in order to get enough segments for us to checkpoint.
|
||||
const seriesCount = 20
|
||||
const samplesCount = 350
|
||||
|
||||
dir, err := ioutil.TempDir("", "seriesReset")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, segmentSize)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Write to the initial segment, then checkpoint later.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
testCases := []struct {
|
||||
compress bool
|
||||
segments int
|
||||
}{
|
||||
{compress: false, segments: 14},
|
||||
{compress: true, segments: 13},
|
||||
}
|
||||
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "seriesReset")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.maxSegment = -1
|
||||
go watcher.Start()
|
||||
wdir := path.Join(dir, "wal")
|
||||
err = os.Mkdir(wdir, 0777)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
testutil.Equals(t, seriesCount, wt.checkNumLabels())
|
||||
enc := tsdb.RecordEncoder{}
|
||||
w, err := wal.NewSize(nil, nil, wdir, segmentSize, tc.compress)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
_, err = tsdb.Checkpoint(w, 2, 4, func(x uint64) bool { return true }, 0)
|
||||
testutil.Ok(t, err)
|
||||
// Write to the initial segment, then checkpoint later.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(series))
|
||||
|
||||
err = w.Truncate(5)
|
||||
testutil.Ok(t, err)
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
},
|
||||
}, nil)
|
||||
testutil.Ok(t, w.Log(sample))
|
||||
}
|
||||
}
|
||||
|
||||
_, cpi, err := tsdb.LastCheckpoint(path.Join(dir, "wal"))
|
||||
testutil.Ok(t, err)
|
||||
err = watcher.garbageCollectSeries(cpi + 1)
|
||||
testutil.Ok(t, err)
|
||||
_, _, err = w.Segments()
|
||||
testutil.Ok(t, err)
|
||||
|
||||
watcher.Stop()
|
||||
// If you modify the checkpoint and truncate segment #'s run the test to see how
|
||||
// many series records you end up with and change the last Equals check accordingly
|
||||
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
|
||||
testutil.Equals(t, 14, wt.checkNumLabels())
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWALWatcher(nil, "", wt, dir)
|
||||
watcher.maxSegment = -1
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
testutil.Equals(t, seriesCount, wt.checkNumLabels())
|
||||
|
||||
_, err = tsdb.Checkpoint(w, 2, 4, func(x uint64) bool { return true }, 0)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
err = w.Truncate(5)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
_, cpi, err := tsdb.LastCheckpoint(path.Join(dir, "wal"))
|
||||
testutil.Ok(t, err)
|
||||
err = watcher.garbageCollectSeries(cpi + 1)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
watcher.Stop()
|
||||
// If you modify the checkpoint and truncate segment #'s run the test to see how
|
||||
// many series records you end up with and change the last Equals check accordingly
|
||||
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
|
||||
testutil.Equals(t, tc.segments, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,8 +14,16 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
@ -37,15 +45,121 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
// Appender implements scrape.Appendable.
|
||||
func (s *Storage) Appender() (storage.Appender, error) {
|
||||
// WriteStorage represents all the remote write storage.
|
||||
type WriteStorage struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
configHash [16]byte
|
||||
walDir string
|
||||
queues []*QueueManager
|
||||
samplesIn *ewmaRate
|
||||
flushDeadline time.Duration
|
||||
}
|
||||
|
||||
// NewWriteStorage creates and runs a WriteStorage.
|
||||
func NewWriteStorage(logger log.Logger, walDir string, flushDeadline time.Duration) *WriteStorage {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
rws := &WriteStorage{
|
||||
logger: logger,
|
||||
flushDeadline: flushDeadline,
|
||||
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||
walDir: walDir,
|
||||
}
|
||||
go rws.run()
|
||||
return rws
|
||||
}
|
||||
|
||||
func (rws *WriteStorage) run() {
|
||||
ticker := time.NewTicker(shardUpdateDuration)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
rws.samplesIn.tick()
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyConfig updates the state as the new config requires.
|
||||
func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||
rws.mtx.Lock()
|
||||
defer rws.mtx.Unlock()
|
||||
|
||||
// Remote write queues only need to change if the remote write config or
|
||||
// external labels change. Hash these together and only reload if the hash
|
||||
// changes.
|
||||
cfgBytes, err := json.Marshal(conf.RemoteWriteConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
externalLabelBytes, err := json.Marshal(conf.GlobalConfig.ExternalLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := md5.Sum(append(cfgBytes, externalLabelBytes...))
|
||||
if hash == rws.configHash {
|
||||
level.Debug(rws.logger).Log("msg", "remote write config has not changed, no need to restart QueueManagers")
|
||||
return nil
|
||||
}
|
||||
|
||||
rws.configHash = hash
|
||||
|
||||
// Update write queues
|
||||
newQueues := []*QueueManager{}
|
||||
// TODO: we should only stop & recreate queues which have changes,
|
||||
// as this can be quite disruptive.
|
||||
for i, rwConf := range conf.RemoteWriteConfigs {
|
||||
c, err := NewClient(i, &ClientConfig{
|
||||
URL: rwConf.URL,
|
||||
Timeout: rwConf.RemoteTimeout,
|
||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newQueues = append(newQueues, NewQueueManager(
|
||||
rws.logger,
|
||||
rws.walDir,
|
||||
rws.samplesIn,
|
||||
rwConf.QueueConfig,
|
||||
conf.GlobalConfig.ExternalLabels,
|
||||
rwConf.WriteRelabelConfigs,
|
||||
c,
|
||||
rws.flushDeadline,
|
||||
))
|
||||
}
|
||||
|
||||
for _, q := range rws.queues {
|
||||
q.Stop()
|
||||
}
|
||||
|
||||
rws.queues = newQueues
|
||||
for _, q := range rws.queues {
|
||||
q.Start()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Appender implements storage.Storage.
|
||||
func (rws *WriteStorage) Appender() (storage.Appender, error) {
|
||||
return ×tampTracker{
|
||||
storage: s,
|
||||
writeStorage: rws,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the WriteStorage.
|
||||
func (rws *WriteStorage) Close() error {
|
||||
rws.mtx.Lock()
|
||||
defer rws.mtx.Unlock()
|
||||
for _, q := range rws.queues {
|
||||
q.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type timestampTracker struct {
|
||||
storage *Storage
|
||||
writeStorage *WriteStorage
|
||||
samples int64
|
||||
highestTimestamp int64
|
||||
}
|
||||
|
@ -67,7 +181,7 @@ func (t *timestampTracker) AddFast(l labels.Labels, _ uint64, ts int64, v float6
|
|||
|
||||
// Commit implements storage.Appender.
|
||||
func (t *timestampTracker) Commit() error {
|
||||
t.storage.samplesIn.incr(t.samples)
|
||||
t.writeStorage.samplesIn.incr(t.samples)
|
||||
|
||||
samplesIn.Add(float64(t.samples))
|
||||
highestTimestamp.Set(float64(t.highestTimestamp / 1000))
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func TestWriteStorageLifecycle(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestWriteStorageLifecycle")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
s := NewWriteStorage(nil, dir, defaultFlushDeadline)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
},
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
|
||||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateExternalLabels(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestUpdateExternalLabels")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
s := NewWriteStorage(nil, dir, defaultFlushDeadline)
|
||||
|
||||
externalLabels := labels.FromStrings("external", "true")
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
},
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Equals(t, labels.Labels(nil), s.queues[0].externalLabels)
|
||||
|
||||
conf.GlobalConfig.ExternalLabels = externalLabels
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Equals(t, externalLabels, s.queues[0].externalLabels)
|
||||
|
||||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
|
||||
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "TestWriteStorageApplyConfigsIdempotent")
|
||||
testutil.Ok(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
s := NewWriteStorage(nil, dir, defaultFlushDeadline)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
},
|
||||
}
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
queue := s.queues[0]
|
||||
|
||||
s.ApplyConfig(conf)
|
||||
testutil.Equals(t, 1, len(s.queues))
|
||||
testutil.Assert(t, queue == s.queues[0], "Queue pointer should have remained the same")
|
||||
|
||||
err = s.Close()
|
||||
testutil.Ok(t, err)
|
||||
}
|
|
@ -130,6 +130,9 @@ type Options struct {
|
|||
// When true it disables the overlapping blocks check.
|
||||
// This in-turn enables vertical compaction and vertical query merge.
|
||||
AllowOverlappingBlocks bool
|
||||
|
||||
// When true records in the WAL will be compressed.
|
||||
WALCompression bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -195,6 +198,7 @@ func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*t
|
|||
BlockRanges: rngs,
|
||||
NoLockfile: opts.NoLockfile,
|
||||
AllowOverlappingBlocks: opts.AllowOverlappingBlocks,
|
||||
WALCompression: opts.WALCompression,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -253,9 +257,15 @@ func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storag
|
|||
return seriesSet{set: set}, nil, nil
|
||||
}
|
||||
|
||||
func (q querier) LabelValues(name string) ([]string, error) { return q.q.LabelValues(name) }
|
||||
func (q querier) LabelNames() ([]string, error) { return q.q.LabelNames() }
|
||||
func (q querier) Close() error { return q.q.Close() }
|
||||
func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) {
|
||||
v, err := q.q.LabelValues(name)
|
||||
return v, nil, err
|
||||
}
|
||||
func (q querier) LabelNames() ([]string, storage.Warnings, error) {
|
||||
v, err := q.q.LabelNames()
|
||||
return v, nil, err
|
||||
}
|
||||
func (q querier) Close() error { return q.q.Close() }
|
||||
|
||||
type seriesSet struct {
|
||||
set tsdb.SeriesSet
|
||||
|
|
|
@ -243,6 +243,9 @@ func NewTemplateExpander(
|
|||
}
|
||||
return fmt.Sprintf("%.4g%ss", v, prefix)
|
||||
},
|
||||
"humanizePercentage": func(v float64) string {
|
||||
return fmt.Sprintf("%.4g%%", v*100)
|
||||
},
|
||||
"humanizeTimestamp": func(v float64) string {
|
||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return fmt.Sprintf("%.4g", v)
|
||||
|
|
|
@ -197,6 +197,11 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
input: []float64{math.Inf(1), math.Inf(-1), math.NaN()},
|
||||
output: "+Inf:+Inf:+Inf:+Inf:-Inf:-Inf:-Inf:-Inf:NaN:NaN:NaN:NaN:",
|
||||
},
|
||||
{
|
||||
// HumanizePercentage - model.SampleValue input.
|
||||
text: "{{ -0.22222 | humanizePercentage }}:{{ 0.0 | humanizePercentage }}:{{ 0.1234567 | humanizePercentage }}:{{ 1.23456 | humanizePercentage }}",
|
||||
output: "-22.22%:0%:12.35%:123.5%",
|
||||
},
|
||||
{
|
||||
// HumanizeTimestamp - model.SampleValue input.
|
||||
text: "{{ 1435065584.128 | humanizeTimestamp }}",
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
|
@ -68,6 +69,7 @@ type ZookeeperTreeCache struct {
|
|||
prefix string
|
||||
events chan ZookeeperTreeCacheEvent
|
||||
stop chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
head *zookeeperTreeCacheNode
|
||||
|
||||
logger log.Logger
|
||||
|
@ -94,14 +96,17 @@ func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTree
|
|||
prefix: path,
|
||||
events: events,
|
||||
stop: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
|
||||
logger: logger,
|
||||
}
|
||||
tc.head = &zookeeperTreeCacheNode{
|
||||
events: make(chan zk.Event),
|
||||
children: map[string]*zookeeperTreeCacheNode{},
|
||||
stopped: true,
|
||||
done: make(chan struct{}, 1),
|
||||
stopped: true, // Set head's stop to be true so that recursiveDelete will not stop the head node.
|
||||
}
|
||||
tc.wg.Add(1)
|
||||
go tc.loop(path)
|
||||
return tc
|
||||
}
|
||||
|
@ -109,9 +114,23 @@ func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTree
|
|||
// Stop stops the tree cache.
|
||||
func (tc *ZookeeperTreeCache) Stop() {
|
||||
tc.stop <- struct{}{}
|
||||
go func() {
|
||||
// Drain tc.head.events so that go routines can make progress and exit.
|
||||
for range tc.head.events {
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
tc.wg.Wait()
|
||||
// Close the tc.head.events after all members of the wait group have exited.
|
||||
// This makes the go routine above exit.
|
||||
close(tc.head.events)
|
||||
close(tc.events)
|
||||
}()
|
||||
}
|
||||
|
||||
func (tc *ZookeeperTreeCache) loop(path string) {
|
||||
defer tc.wg.Done()
|
||||
|
||||
failureMode := false
|
||||
retryChan := make(chan struct{})
|
||||
|
||||
|
@ -185,6 +204,8 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
|||
failureMode = false
|
||||
}
|
||||
case <-tc.stop:
|
||||
// Stop head as well.
|
||||
tc.head.done <- struct{}{}
|
||||
tc.recursiveStop(tc.head)
|
||||
return
|
||||
}
|
||||
|
@ -243,6 +264,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
|
|||
}
|
||||
}
|
||||
|
||||
tc.wg.Add(1)
|
||||
go func() {
|
||||
numWatchers.Inc()
|
||||
// Pass up zookeeper events, until the node is deleted.
|
||||
|
@ -254,6 +276,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
|
|||
case <-node.done:
|
||||
}
|
||||
numWatchers.Dec()
|
||||
tc.wg.Done()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
*.sublime-*
|
||||
.DS_Store
|
||||
*.swp
|
||||
*.swo
|
||||
tags
|
|
@ -0,0 +1,7 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
|
@ -0,0 +1,12 @@
|
|||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,187 @@
|
|||
# Purell
|
||||
|
||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
||||
|
||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
||||
|
||||
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/PuerkitoBio/purell`
|
||||
|
||||
## Changelog
|
||||
|
||||
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
* **v0.1.0** : Initial release.
|
||||
|
||||
## Examples
|
||||
|
||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
||||
|
||||
```go
|
||||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
||||
|
||||
```go
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
```
|
||||
|
||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
||||
|
||||
The [full godoc reference is available on gopkgdoc][godoc].
|
||||
|
||||
Some things to note:
|
||||
|
||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
||||
|
||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
||||
- %24 -> $
|
||||
- %26 -> &
|
||||
- %2B-%3B -> +,-./0123456789:;
|
||||
- %3D -> =
|
||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- %5F -> _
|
||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
||||
- %7E -> ~
|
||||
|
||||
|
||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
||||
|
||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
||||
|
||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
||||
|
||||
### Safe vs Usually Safe vs Unsafe
|
||||
|
||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
||||
|
||||
Consider the following URL:
|
||||
|
||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
Normalizing with the `FlagsSafe` gives:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
With the `FlagsUsuallySafeGreedy`:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
And with `FlagsUnsafeGreedy`:
|
||||
|
||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
||||
|
||||
## TODOs
|
||||
|
||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
||||
|
||||
## Thanks / Contributions
|
||||
|
||||
@rogpeppe
|
||||
@jehiah
|
||||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
@beeker1121
|
||||
|
||||
## License
|
||||
|
||||
The [BSD 3-Clause license][bsd].
|
||||
|
||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"golang.org/x/text/width"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
parsed.Host = strings.ToLower(parsed.Host)
|
||||
}
|
||||
|
||||
// The idna package doesn't fully conform to RFC 5895
|
||||
// (https://tools.ietf.org/html/rfc5895), so we do it here.
|
||||
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
|
||||
// TODO: Remove when (if?) idna package conforms to RFC 5895.
|
||||
parsed.Host = width.Fold.String(parsed.Host)
|
||||
parsed.Host = norm.NFC.String(parsed.Host)
|
||||
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k, _ := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
|
@ -1,10 +1,15 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.3.x
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- master
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go build .
|
||||
|
||||
script:
|
||||
- go test -v
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,16 @@
|
|||
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
||||
It contains some parts of the net/url package, modified so as to allow
|
||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/PuerkitoBio/urlesc
|
||||
|
||||
## License
|
||||
|
||||
Go license (BSD-3-Clause)
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
- bwatas@gmail.com
|
|
@ -0,0 +1,63 @@
|
|||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Financial contributions
|
||||
|
||||
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
|
||||
Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
|
||||
### Contributors
|
||||
|
||||
Thank you to all the people who have already contributed to govalidator!
|
||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Alex Saskevich
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,496 @@
|
|||
govalidator
|
||||
===========
|
||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
|
||||
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
|
||||
|
||||
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
||||
|
||||
#### Installation
|
||||
Make sure that Go is installed on your computer.
|
||||
Type the following command in your terminal:
|
||||
|
||||
go get github.com/asaskevich/govalidator
|
||||
|
||||
or you can get specified release of the package with `gopkg.in`:
|
||||
|
||||
go get gopkg.in/asaskevich/govalidator.v4
|
||||
|
||||
After it the package is ready to use.
|
||||
|
||||
|
||||
#### Import package in your project
|
||||
Add following line in your `*.go` file:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
```
|
||||
If you are unhappy to use long `govalidator`, you can do something like this:
|
||||
```go
|
||||
import (
|
||||
valid "github.com/asaskevich/govalidator"
|
||||
)
|
||||
```
|
||||
|
||||
#### Activate behavior to require all fields have a validation tag by default
|
||||
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
||||
|
||||
`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
|
||||
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
func init() {
|
||||
govalidator.SetFieldsRequiredByDefault(true)
|
||||
}
|
||||
```
|
||||
|
||||
Here's some code to explain it:
|
||||
```go
|
||||
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||
type exampleStruct struct {
|
||||
Name string ``
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// this, however, will only fail when Email is empty or an invalid email address:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email,optional"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
||||
##### Custom validator function signature
|
||||
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// old signature
|
||||
func(i interface{}) bool
|
||||
|
||||
// new signature
|
||||
func(i interface{}, o interface{}) bool
|
||||
```
|
||||
|
||||
##### Adding a custom validator
|
||||
This was changed to prevent data races when accessing custom validators.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// before
|
||||
govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
})
|
||||
|
||||
// after
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
}))
|
||||
```
|
||||
|
||||
#### List of functions:
|
||||
```go
|
||||
func Abs(value float64) float64
|
||||
func BlackList(str, chars string) string
|
||||
func ByteLength(str string, params ...string) bool
|
||||
func CamelCaseToUnderscore(str string) string
|
||||
func Contains(str, substring string) bool
|
||||
func Count(array []interface{}, iterator ConditionIterator) int
|
||||
func Each(array []interface{}, iterator Iterator)
|
||||
func ErrorByField(e error, field string) string
|
||||
func ErrorsByField(e error) map[string]string
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
||||
func GetLine(s string, index int) (string, error)
|
||||
func GetLines(s string) []string
|
||||
func InRange(value, left, right float64) bool
|
||||
func IsASCII(str string) bool
|
||||
func IsAlpha(str string) bool
|
||||
func IsAlphanumeric(str string) bool
|
||||
func IsBase64(str string) bool
|
||||
func IsByteLength(str string, min, max int) bool
|
||||
func IsCIDR(str string) bool
|
||||
func IsCreditCard(str string) bool
|
||||
func IsDNSName(str string) bool
|
||||
func IsDataURI(str string) bool
|
||||
func IsDialString(str string) bool
|
||||
func IsDivisibleBy(str, num string) bool
|
||||
func IsEmail(str string) bool
|
||||
func IsFilePath(str string) (bool, int)
|
||||
func IsFloat(str string) bool
|
||||
func IsFullWidth(str string) bool
|
||||
func IsHalfWidth(str string) bool
|
||||
func IsHexadecimal(str string) bool
|
||||
func IsHexcolor(str string) bool
|
||||
func IsHost(str string) bool
|
||||
func IsIP(str string) bool
|
||||
func IsIPv4(str string) bool
|
||||
func IsIPv6(str string) bool
|
||||
func IsISBN(str string, version int) bool
|
||||
func IsISBN10(str string) bool
|
||||
func IsISBN13(str string) bool
|
||||
func IsISO3166Alpha2(str string) bool
|
||||
func IsISO3166Alpha3(str string) bool
|
||||
func IsISO693Alpha2(str string) bool
|
||||
func IsISO693Alpha3b(str string) bool
|
||||
func IsISO4217(str string) bool
|
||||
func IsIn(str string, params ...string) bool
|
||||
func IsInt(str string) bool
|
||||
func IsJSON(str string) bool
|
||||
func IsLatitude(str string) bool
|
||||
func IsLongitude(str string) bool
|
||||
func IsLowerCase(str string) bool
|
||||
func IsMAC(str string) bool
|
||||
func IsMongoID(str string) bool
|
||||
func IsMultibyte(str string) bool
|
||||
func IsNatural(value float64) bool
|
||||
func IsNegative(value float64) bool
|
||||
func IsNonNegative(value float64) bool
|
||||
func IsNonPositive(value float64) bool
|
||||
func IsNull(str string) bool
|
||||
func IsNumeric(str string) bool
|
||||
func IsPort(str string) bool
|
||||
func IsPositive(value float64) bool
|
||||
func IsPrintableASCII(str string) bool
|
||||
func IsRFC3339(str string) bool
|
||||
func IsRFC3339WithoutZone(str string) bool
|
||||
func IsRGBcolor(str string) bool
|
||||
func IsRequestURI(rawurl string) bool
|
||||
func IsRequestURL(rawurl string) bool
|
||||
func IsSSN(str string) bool
|
||||
func IsSemver(str string) bool
|
||||
func IsTime(str string, format string) bool
|
||||
func IsURL(str string) bool
|
||||
func IsUTFDigit(str string) bool
|
||||
func IsUTFLetter(str string) bool
|
||||
func IsUTFLetterNumeric(str string) bool
|
||||
func IsUTFNumeric(str string) bool
|
||||
func IsUUID(str string) bool
|
||||
func IsUUIDv3(str string) bool
|
||||
func IsUUIDv4(str string) bool
|
||||
func IsUUIDv5(str string) bool
|
||||
func IsUpperCase(str string) bool
|
||||
func IsVariableWidth(str string) bool
|
||||
func IsWhole(value float64) bool
|
||||
func LeftTrim(str, chars string) string
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
||||
func Matches(str, pattern string) bool
|
||||
func NormalizeEmail(str string) (string, error)
|
||||
func PadBoth(str string, padStr string, padLen int) string
|
||||
func PadLeft(str string, padStr string, padLen int) string
|
||||
func PadRight(str string, padStr string, padLen int) string
|
||||
func Range(str string, params ...string) bool
|
||||
func RemoveTags(s string) string
|
||||
func ReplacePattern(str, pattern, replace string) string
|
||||
func Reverse(s string) string
|
||||
func RightTrim(str, chars string) string
|
||||
func RuneLength(str string, params ...string) bool
|
||||
func SafeFileName(str string) string
|
||||
func SetFieldsRequiredByDefault(value bool)
|
||||
func Sign(value float64) float64
|
||||
func StringLength(str string, params ...string) bool
|
||||
func StringMatches(s string, params ...string) bool
|
||||
func StripLow(str string, keepNewLines bool) string
|
||||
func ToBoolean(str string) (bool, error)
|
||||
func ToFloat(str string) (float64, error)
|
||||
func ToInt(str string) (int64, error)
|
||||
func ToJSON(obj interface{}) (string, error)
|
||||
func ToString(obj interface{}) string
|
||||
func Trim(str, chars string) string
|
||||
func Truncate(str string, length int, ending string) string
|
||||
func UnderscoreToCamelCase(s string) string
|
||||
func ValidateStruct(s interface{}) (bool, error)
|
||||
func WhiteList(str, chars string) string
|
||||
type ConditionIterator
|
||||
type CustomTypeValidator
|
||||
type Error
|
||||
func (e Error) Error() string
|
||||
type Errors
|
||||
func (es Errors) Error() string
|
||||
func (es Errors) Errors() []error
|
||||
type ISO3166Entry
|
||||
type Iterator
|
||||
type ParamValidator
|
||||
type ResultIterator
|
||||
type UnsupportedTypeError
|
||||
func (e *UnsupportedTypeError) Error() string
|
||||
type Validator
|
||||
```
|
||||
|
||||
#### Examples
|
||||
###### IsURL
|
||||
```go
|
||||
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
||||
```
|
||||
###### ToString
|
||||
```go
|
||||
type User struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
str := govalidator.ToString(&User{"John", "Juan"})
|
||||
println(str)
|
||||
```
|
||||
###### Each, Map, Filter, Count for slices
|
||||
Each iterates over the slice/array and calls Iterator for every item
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.Iterator = func(value interface{}, index int) {
|
||||
println(value.(int))
|
||||
}
|
||||
govalidator.Each(data, fn)
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
||||
return value.(int) * 3
|
||||
}
|
||||
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
||||
return value.(int)%2 == 0
|
||||
}
|
||||
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
||||
_ = govalidator.Count(data, fn) // result = 5
|
||||
```
|
||||
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
||||
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
||||
```go
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
```
|
||||
For completely custom validators (interface-based), see below.
|
||||
|
||||
Here is a list of available validators for struct fields (validator - used function):
|
||||
```go
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
```
|
||||
Validators with parameters
|
||||
|
||||
```go
|
||||
"range(min|max)": Range,
|
||||
"length(min|max)": ByteLength,
|
||||
"runelength(min|max)": RuneLength,
|
||||
"matches(pattern)": StringMatches,
|
||||
"in(string1|string2|...|stringN)": IsIn,
|
||||
```
|
||||
|
||||
And here is small example of usage:
|
||||
```go
|
||||
type Post struct {
|
||||
Title string `valid:"alphanum,required"`
|
||||
Message string `valid:"duck,ascii"`
|
||||
AuthorIP string `valid:"ipv4"`
|
||||
Date string `valid:"-"`
|
||||
}
|
||||
post := &Post{
|
||||
Title: "My Example Post",
|
||||
Message: "duck",
|
||||
AuthorIP: "123.234.54.3",
|
||||
}
|
||||
|
||||
// Add your own struct validation tags
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
|
||||
result, err := govalidator.ValidateStruct(post)
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
###### WhiteList
|
||||
```go
|
||||
// Remove all characters from string ignoring characters between "a" and "z"
|
||||
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
||||
```
|
||||
|
||||
###### Custom validation functions
|
||||
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
type CustomByteArray [6]byte // custom types are supported and can be validated
|
||||
|
||||
type StructWithCustomByteArray struct {
|
||||
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
||||
Email string `valid:"email"`
|
||||
CustomMinLength int `valid:"-"`
|
||||
}
|
||||
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // you can type switch on the context interface being validated
|
||||
case StructWithCustomByteArray:
|
||||
// you can check and validate against some other field in the context,
|
||||
// return early or not validate against the context at all – your choice
|
||||
case SomeOtherType:
|
||||
// ...
|
||||
default:
|
||||
// expecting some other type? Throw/panic here or continue
|
||||
}
|
||||
|
||||
switch v := i.(type) { // type switch on the struct field being validated
|
||||
case CustomByteArray:
|
||||
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
||||
if e != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}))
|
||||
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
||||
case StructWithCustomByteArray:
|
||||
return len(v.ID) >= v.CustomMinLength
|
||||
}
|
||||
return false
|
||||
}))
|
||||
```
|
||||
|
||||
###### Custom error messages
|
||||
Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
|
||||
```go
|
||||
type Ticket struct {
|
||||
Id int64 `json:"id"`
|
||||
FirstName string `json:"firstname" valid:"required~First name is blank"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Notes
|
||||
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
||||
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
||||
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Credits
|
||||
### Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
|
||||
|
||||
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
||||
* [Daniel Lohse](https://github.com/annismckenzie)
|
||||
* [Attila Oláh](https://github.com/attilaolah)
|
||||
* [Daniel Korner](https://github.com/Dadie)
|
||||
* [Steven Wilkin](https://github.com/stevenwilkin)
|
||||
* [Deiwin Sarjas](https://github.com/deiwin)
|
||||
* [Noah Shibley](https://github.com/slugmobile)
|
||||
* [Nathan Davies](https://github.com/nathj07)
|
||||
* [Matt Sanford](https://github.com/mzsanford)
|
||||
* [Simon ccl1115](https://github.com/ccl1115)
|
||||
|
||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
||||
|
||||
|
||||
|
||||
|
||||
## License
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
|
|
@ -0,0 +1,58 @@
|
|||
package govalidator
|
||||
|
||||
// Iterator is the function that accepts element of slice/array and its index
|
||||
type Iterator func(interface{}, int)
|
||||
|
||||
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
||||
type ResultIterator func(interface{}, int) interface{}
|
||||
|
||||
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
||||
type ConditionIterator func(interface{}, int) bool
|
||||
|
||||
// Each iterates over the slice and apply Iterator to every item
|
||||
func Each(array []interface{}, iterator Iterator) {
|
||||
for index, data := range array {
|
||||
iterator(data, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
||||
var result = make([]interface{}, len(array))
|
||||
for index, data := range array {
|
||||
result[index] = iterator(data, index)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
||||
var result = make([]interface{}, 0)
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
result = append(result, data)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
||||
func Count(array []interface{}, iterator ConditionIterator) int {
|
||||
count := 0
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
count = count + 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ToString convert the input to a string.
|
||||
func ToString(obj interface{}) string {
|
||||
res := fmt.Sprintf("%v", obj)
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// ToJSON convert the input to a valid JSON string
|
||||
func ToJSON(obj interface{}) (string, error) {
|
||||
res, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
res = []byte("")
|
||||
}
|
||||
return string(res), err
|
||||
}
|
||||
|
||||
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
||||
func ToFloat(str string) (float64, error) {
|
||||
res, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil {
|
||||
res = 0.0
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
|
||||
func ToInt(value interface{}) (res int64, err error) {
|
||||
val := reflect.ValueOf(value)
|
||||
|
||||
switch value.(type) {
|
||||
case int, int8, int16, int32, int64:
|
||||
res = val.Int()
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
res = int64(val.Uint())
|
||||
case string:
|
||||
if IsInt(val.String()) {
|
||||
res, err = strconv.ParseInt(val.String(), 0, 64)
|
||||
if err != nil {
|
||||
res = 0
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
||||
res = 0
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
||||
res = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ToBoolean convert the input string to a boolean.
|
||||
func ToBoolean(str string) (bool, error) {
|
||||
return strconv.ParseBool(str)
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package govalidator
|
||||
|
||||
import "strings"
|
||||
|
||||
// Errors is an array of multiple errors and conforms to the error interface.
|
||||
type Errors []error
|
||||
|
||||
// Errors returns itself.
|
||||
func (es Errors) Errors() []error {
|
||||
return es
|
||||
}
|
||||
|
||||
func (es Errors) Error() string {
|
||||
var errs []string
|
||||
for _, e := range es {
|
||||
errs = append(errs, e.Error())
|
||||
}
|
||||
return strings.Join(errs, ";")
|
||||
}
|
||||
|
||||
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
||||
type Error struct {
|
||||
Name string
|
||||
Err error
|
||||
CustomErrorMessageExists bool
|
||||
|
||||
// Validator indicates the name of the validator that failed
|
||||
Validator string
|
||||
Path []string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if e.CustomErrorMessageExists {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
errName := e.Name
|
||||
if len(e.Path) > 0 {
|
||||
errName = strings.Join(append(e.Path, e.Name), ".")
|
||||
}
|
||||
|
||||
return errName + ": " + e.Err.Error()
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Abs returns absolute value of number
|
||||
func Abs(value float64) float64 {
|
||||
return math.Abs(value)
|
||||
}
|
||||
|
||||
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
||||
func Sign(value float64) float64 {
|
||||
if value > 0 {
|
||||
return 1
|
||||
} else if value < 0 {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// IsNegative returns true if value < 0
|
||||
func IsNegative(value float64) bool {
|
||||
return value < 0
|
||||
}
|
||||
|
||||
// IsPositive returns true if value > 0
|
||||
func IsPositive(value float64) bool {
|
||||
return value > 0
|
||||
}
|
||||
|
||||
// IsNonNegative returns true if value >= 0
|
||||
func IsNonNegative(value float64) bool {
|
||||
return value >= 0
|
||||
}
|
||||
|
||||
// IsNonPositive returns true if value <= 0
|
||||
func IsNonPositive(value float64) bool {
|
||||
return value <= 0
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeInt(value, left, right interface{}) bool {
|
||||
value64, _ := ToInt(value)
|
||||
left64, _ := ToInt(left)
|
||||
right64, _ := ToInt(right)
|
||||
if left64 > right64 {
|
||||
left64, right64 = right64, left64
|
||||
}
|
||||
return value64 >= left64 && value64 <= right64
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeFloat32(value, left, right float32) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeFloat64(value, left, right float64) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
|
||||
func InRange(value interface{}, left interface{}, right interface{}) bool {
|
||||
|
||||
reflectValue := reflect.TypeOf(value).Kind()
|
||||
reflectLeft := reflect.TypeOf(left).Kind()
|
||||
reflectRight := reflect.TypeOf(right).Kind()
|
||||
|
||||
if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
|
||||
return InRangeInt(value.(int), left.(int), right.(int))
|
||||
} else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
|
||||
return InRangeFloat32(value.(float32), left.(float32), right.(float32))
|
||||
} else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
|
||||
return InRangeFloat64(value.(float64), left.(float64), right.(float64))
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsWhole returns true if value is whole number
|
||||
func IsWhole(value float64) bool {
|
||||
return math.Remainder(value, 1) == 0
|
||||
}
|
||||
|
||||
// IsNatural returns true if value is natural number (positive and whole)
|
||||
func IsNatural(value float64) bool {
|
||||
return IsWhole(value) && IsPositive(value)
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
package govalidator
|
||||
|
||||
import "regexp"
|
||||
|
||||
// Basic regular expressions for validating strings
|
||||
const (
|
||||
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
|
||||
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
||||
ISBN13 string = "^(?:[0-9]{13})$"
|
||||
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
Alpha string = "^[a-zA-Z]+$"
|
||||
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
||||
Numeric string = "^[0-9]+$"
|
||||
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
||||
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
||||
Hexadecimal string = "^[0-9a-fA-F]+$"
|
||||
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
||||
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
||||
ASCII string = "^[\x00-\x7F]+$"
|
||||
Multibyte string = "[^\x00-\x7F]"
|
||||
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
||||
PrintableASCII string = "^[\x20-\x7E]+$"
|
||||
DataURI string = "^data:.+\\/(.+);base64$"
|
||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
|
||||
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
||||
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
||||
URLUsername string = `(\S+(:\S*)?@)`
|
||||
URLPath string = `((\/|\?|#)[^\s]*)`
|
||||
URLPort string = `(:(\d{1,5}))`
|
||||
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
|
||||
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
|
||||
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
|
||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||
UnixPath string = `^(/[^/\x00]*)+/?$`
|
||||
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
||||
tagName string = "valid"
|
||||
hasLowerCase string = ".*[[:lower:]]"
|
||||
hasUpperCase string = ".*[[:upper:]]"
|
||||
hasWhitespace string = ".*[[:space:]]"
|
||||
hasWhitespaceOnly string = "^[[:space:]]+$"
|
||||
)
|
||||
|
||||
// Used by IsFilePath func
|
||||
const (
|
||||
// Unknown is unresolved OS type
|
||||
Unknown = iota
|
||||
// Win is Windows type
|
||||
Win
|
||||
// Unix is *nix OS types
|
||||
Unix
|
||||
)
|
||||
|
||||
var (
|
||||
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
|
||||
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
|
||||
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
|
||||
rxEmail = regexp.MustCompile(Email)
|
||||
rxCreditCard = regexp.MustCompile(CreditCard)
|
||||
rxISBN10 = regexp.MustCompile(ISBN10)
|
||||
rxISBN13 = regexp.MustCompile(ISBN13)
|
||||
rxUUID3 = regexp.MustCompile(UUID3)
|
||||
rxUUID4 = regexp.MustCompile(UUID4)
|
||||
rxUUID5 = regexp.MustCompile(UUID5)
|
||||
rxUUID = regexp.MustCompile(UUID)
|
||||
rxAlpha = regexp.MustCompile(Alpha)
|
||||
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
||||
rxNumeric = regexp.MustCompile(Numeric)
|
||||
rxInt = regexp.MustCompile(Int)
|
||||
rxFloat = regexp.MustCompile(Float)
|
||||
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
||||
rxHexcolor = regexp.MustCompile(Hexcolor)
|
||||
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
||||
rxASCII = regexp.MustCompile(ASCII)
|
||||
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
||||
rxMultibyte = regexp.MustCompile(Multibyte)
|
||||
rxFullWidth = regexp.MustCompile(FullWidth)
|
||||
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
||||
rxBase64 = regexp.MustCompile(Base64)
|
||||
rxDataURI = regexp.MustCompile(DataURI)
|
||||
rxLatitude = regexp.MustCompile(Latitude)
|
||||
rxLongitude = regexp.MustCompile(Longitude)
|
||||
rxDNSName = regexp.MustCompile(DNSName)
|
||||
rxURL = regexp.MustCompile(URL)
|
||||
rxSSN = regexp.MustCompile(SSN)
|
||||
rxWinPath = regexp.MustCompile(WinPath)
|
||||
rxUnixPath = regexp.MustCompile(UnixPath)
|
||||
rxSemver = regexp.MustCompile(Semver)
|
||||
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
|
||||
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
|
||||
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
|
||||
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
|
||||
)
|
|
@ -0,0 +1,636 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
||||
type Validator func(str string) bool
|
||||
|
||||
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
||||
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
||||
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
||||
|
||||
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
|
||||
type ParamValidator func(str string, params ...string) bool
|
||||
type tagOptionsMap map[string]tagOption
|
||||
|
||||
func (t tagOptionsMap) orderedKeys() []string {
|
||||
var keys []string
|
||||
for k := range t {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Slice(keys, func(a, b int) bool {
|
||||
return t[keys[a]].order < t[keys[b]].order
|
||||
})
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
type tagOption struct {
|
||||
name string
|
||||
customErrorMessage string
|
||||
order int
|
||||
}
|
||||
|
||||
// UnsupportedTypeError is a wrapper for reflect.Type
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflect.Value
|
||||
|
||||
// ParamTagMap is a map of functions accept variants parameters
|
||||
var ParamTagMap = map[string]ParamValidator{
|
||||
"length": ByteLength,
|
||||
"range": Range,
|
||||
"runelength": RuneLength,
|
||||
"stringlength": StringLength,
|
||||
"matches": StringMatches,
|
||||
"in": isInRaw,
|
||||
"rsapub": IsRsaPub,
|
||||
}
|
||||
|
||||
// ParamTagRegexMap maps param tags to their respective regexes.
|
||||
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
||||
"range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"in": regexp.MustCompile(`^in\((.*)\)`),
|
||||
"matches": regexp.MustCompile(`^matches\((.+)\)$`),
|
||||
"rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
|
||||
}
|
||||
|
||||
type customTypeTagMap struct {
|
||||
validators map[string]CustomTypeValidator
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
||||
tm.RLock()
|
||||
defer tm.RUnlock()
|
||||
v, ok := tm.validators[name]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
||||
tm.Lock()
|
||||
defer tm.Unlock()
|
||||
tm.validators[name] = ctv
|
||||
}
|
||||
|
||||
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
||||
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
||||
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
||||
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
||||
|
||||
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
||||
var TagMap = map[string]Validator{
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
"ISO4217": IsISO4217,
|
||||
}
|
||||
|
||||
// ISO3166Entry stores country codes
|
||||
type ISO3166Entry struct {
|
||||
EnglishShortName string
|
||||
FrenchShortName string
|
||||
Alpha2Code string
|
||||
Alpha3Code string
|
||||
Numeric string
|
||||
}
|
||||
|
||||
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
||||
var ISO3166List = []ISO3166Entry{
|
||||
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
||||
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
||||
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
||||
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
||||
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
||||
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
||||
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
||||
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
||||
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
||||
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
||||
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
||||
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
||||
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
||||
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
||||
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
||||
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
||||
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
||||
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
||||
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
||||
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
||||
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
||||
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
||||
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
||||
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
||||
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
||||
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
||||
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
||||
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
||||
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
||||
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
||||
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
||||
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
||||
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
||||
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
||||
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
||||
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
||||
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
||||
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
||||
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
||||
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
||||
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
||||
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
||||
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
||||
{"China", "Chine (la)", "CN", "CHN", "156"},
|
||||
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
||||
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
||||
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
||||
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
||||
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
||||
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
||||
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
||||
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
||||
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
||||
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
||||
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
||||
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
||||
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
||||
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
||||
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
||||
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
||||
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
||||
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
||||
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
||||
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
||||
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
||||
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
||||
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
||||
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
||||
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
||||
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
||||
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
||||
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
||||
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
||||
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
||||
{"France", "France (la)", "FR", "FRA", "250"},
|
||||
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
||||
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
||||
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
||||
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
||||
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
||||
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
||||
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
||||
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
||||
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
||||
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
||||
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
||||
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
||||
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
||||
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
||||
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
||||
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
||||
{"Guam", "Guam", "GU", "GUM", "316"},
|
||||
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
||||
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
||||
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
||||
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
||||
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
||||
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
||||
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
||||
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
||||
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
||||
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
||||
{"India", "Inde (l')", "IN", "IND", "356"},
|
||||
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
||||
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
||||
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
||||
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
||||
{"Israel", "Israël", "IL", "ISR", "376"},
|
||||
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
||||
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
||||
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
||||
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
||||
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
||||
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
||||
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
||||
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
||||
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
||||
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
||||
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
||||
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
||||
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
||||
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
||||
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
||||
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
||||
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
||||
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
||||
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
||||
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
||||
{"Macao", "Macao", "MO", "MAC", "446"},
|
||||
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
||||
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
||||
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
||||
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
||||
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
||||
{"Malta", "Malte", "MT", "MLT", "470"},
|
||||
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
||||
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
||||
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
||||
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
||||
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
||||
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
||||
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
||||
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
||||
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
||||
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
||||
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
||||
{"Oman", "Oman", "OM", "OMN", "512"},
|
||||
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
||||
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
||||
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
||||
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
||||
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
||||
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
||||
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
||||
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
||||
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
||||
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
||||
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
||||
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
||||
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
||||
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
||||
{"Niue", "Niue", "NU", "NIU", "570"},
|
||||
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
||||
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
||||
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
||||
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
||||
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
||||
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
||||
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
||||
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
||||
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
||||
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
||||
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
||||
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
||||
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
||||
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
||||
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
||||
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
||||
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
||||
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
||||
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
||||
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
||||
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
||||
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
||||
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
||||
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
||||
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
||||
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
||||
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
||||
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
||||
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
||||
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
||||
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
||||
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
||||
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
||||
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
||||
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
||||
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
||||
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
||||
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
||||
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
||||
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
||||
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
||||
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
||||
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
||||
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
||||
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
||||
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
||||
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
||||
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
||||
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
||||
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
||||
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
||||
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
||||
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
||||
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
||||
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
||||
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
||||
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
||||
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
||||
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
||||
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
||||
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
||||
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
||||
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
||||
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
||||
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
||||
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
||||
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
||||
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
||||
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
||||
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
||||
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
||||
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
||||
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
||||
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
||||
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
||||
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
||||
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
||||
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
||||
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
||||
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
||||
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
||||
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
||||
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
||||
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
||||
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
||||
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
||||
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
||||
}
|
||||
|
||||
// ISO4217List is the list of ISO currency codes
|
||||
var ISO4217List = []string{
|
||||
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
|
||||
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
|
||||
"CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
|
||||
"DJF", "DKK", "DOP", "DZD",
|
||||
"EGP", "ERN", "ETB", "EUR",
|
||||
"FJD", "FKP",
|
||||
"GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
|
||||
"HKD", "HNL", "HRK", "HTG", "HUF",
|
||||
"IDR", "ILS", "INR", "IQD", "IRR", "ISK",
|
||||
"JMD", "JOD", "JPY",
|
||||
"KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
|
||||
"LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
|
||||
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
|
||||
"NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
|
||||
"OMR",
|
||||
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
|
||||
"QAR",
|
||||
"RON", "RSD", "RUB", "RWF",
|
||||
"SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
|
||||
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
|
||||
"UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
|
||||
"VEF", "VND", "VUV",
|
||||
"WST",
|
||||
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
|
||||
"YER",
|
||||
"ZAR", "ZMW", "ZWL",
|
||||
}
|
||||
|
||||
// ISO693Entry stores ISO language codes
|
||||
type ISO693Entry struct {
|
||||
Alpha3bCode string
|
||||
Alpha2Code string
|
||||
English string
|
||||
}
|
||||
|
||||
//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
|
||||
var ISO693List = []ISO693Entry{
|
||||
{Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
|
||||
{Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
|
||||
{Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
|
||||
{Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
|
||||
{Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
|
||||
{Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
|
||||
{Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
|
||||
{Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
|
||||
{Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
|
||||
{Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
|
||||
{Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
|
||||
{Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
|
||||
{Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
|
||||
{Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
|
||||
{Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
|
||||
{Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
|
||||
{Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
|
||||
{Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
|
||||
{Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
|
||||
{Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
|
||||
{Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
|
||||
{Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
|
||||
{Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
|
||||
{Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
|
||||
{Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
|
||||
{Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
|
||||
{Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
|
||||
{Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
|
||||
{Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
|
||||
{Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
|
||||
{Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
|
||||
{Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
|
||||
{Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
|
||||
{Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
|
||||
{Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
|
||||
{Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
|
||||
{Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
|
||||
{Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
|
||||
{Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
|
||||
{Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
|
||||
{Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
|
||||
{Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
|
||||
{Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
|
||||
{Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
|
||||
{Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
|
||||
{Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
|
||||
{Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
|
||||
{Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
|
||||
{Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
|
||||
{Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
|
||||
{Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
|
||||
{Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
|
||||
{Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
|
||||
{Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
|
||||
{Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
|
||||
{Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
|
||||
{Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
|
||||
{Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
|
||||
{Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
|
||||
{Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
|
||||
{Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
|
||||
{Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
|
||||
{Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
|
||||
{Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
|
||||
{Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
|
||||
{Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
|
||||
{Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
|
||||
{Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
|
||||
{Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
|
||||
{Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
|
||||
{Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
|
||||
{Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
|
||||
{Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
|
||||
{Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
|
||||
{Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
|
||||
{Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
|
||||
{Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
|
||||
{Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
|
||||
{Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
|
||||
{Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
|
||||
{Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
|
||||
{Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
|
||||
{Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
|
||||
{Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
|
||||
{Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
|
||||
{Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
|
||||
{Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
|
||||
{Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
|
||||
{Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
|
||||
{Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
|
||||
{Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
|
||||
{Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
|
||||
{Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
|
||||
{Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
|
||||
{Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
|
||||
{Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
|
||||
{Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
|
||||
{Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
|
||||
{Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
|
||||
{Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
|
||||
{Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
|
||||
{Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
|
||||
{Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
|
||||
{Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
|
||||
{Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
|
||||
{Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
|
||||
{Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
|
||||
{Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
|
||||
{Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
|
||||
{Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
|
||||
{Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
|
||||
{Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
|
||||
{Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
|
||||
{Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
|
||||
{Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
|
||||
{Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
|
||||
{Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
|
||||
{Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
|
||||
{Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
|
||||
{Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
|
||||
{Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
|
||||
{Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
|
||||
{Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
|
||||
{Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
|
||||
{Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
|
||||
{Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
|
||||
{Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
|
||||
{Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
|
||||
{Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
|
||||
{Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
|
||||
{Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
|
||||
{Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
|
||||
{Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
|
||||
{Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
|
||||
{Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
|
||||
{Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
|
||||
{Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
|
||||
{Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
|
||||
{Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
|
||||
{Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
|
||||
{Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
|
||||
{Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
|
||||
{Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
|
||||
{Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
|
||||
{Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
|
||||
{Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
|
||||
{Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
|
||||
{Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
|
||||
{Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
|
||||
{Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
|
||||
{Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
|
||||
{Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
|
||||
{Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
|
||||
{Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
|
||||
{Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
|
||||
{Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
|
||||
{Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
|
||||
{Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
|
||||
{Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
|
||||
{Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
|
||||
{Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
|
||||
{Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
|
||||
{Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
|
||||
{Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
|
||||
{Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
|
||||
{Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
|
||||
{Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
|
||||
{Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
|
||||
{Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
|
||||
{Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
|
||||
{Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
|
||||
{Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
|
||||
{Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
|
||||
{Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
|
||||
{Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
|
||||
{Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
|
||||
{Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
|
||||
{Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
|
||||
{Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
|
||||
{Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
|
||||
{Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
|
||||
{Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
|
||||
{Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
|
||||
{Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"math"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Contains check if the string contains the substring.
|
||||
func Contains(str, substring string) bool {
|
||||
return strings.Contains(str, substring)
|
||||
}
|
||||
|
||||
// Matches check if string matches the pattern (pattern is regular expression)
|
||||
// In case of error return false
|
||||
func Matches(str, pattern string) bool {
|
||||
match, _ := regexp.MatchString(pattern, str)
|
||||
return match
|
||||
}
|
||||
|
||||
// LeftTrim trim characters from the left-side of the input.
|
||||
// If second argument is empty, it's will be remove leading spaces.
|
||||
func LeftTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimLeftFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("^[" + chars + "]+")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// RightTrim trim characters from the right-side of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func RightTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimRightFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("[" + chars + "]+$")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Trim trim characters from both sides of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func Trim(str, chars string) string {
|
||||
return LeftTrim(RightTrim(str, chars), chars)
|
||||
}
|
||||
|
||||
// WhiteList remove characters that do not appear in the whitelist.
|
||||
func WhiteList(str, chars string) string {
|
||||
pattern := "[^" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// BlackList remove characters that appear in the blacklist.
|
||||
func BlackList(str, chars string) string {
|
||||
pattern := "[" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
|
||||
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
||||
func StripLow(str string, keepNewLines bool) string {
|
||||
chars := ""
|
||||
if keepNewLines {
|
||||
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
||||
} else {
|
||||
chars = "\x00-\x1F\x7F"
|
||||
}
|
||||
return BlackList(str, chars)
|
||||
}
|
||||
|
||||
// ReplacePattern replace regular expression pattern in string
|
||||
func ReplacePattern(str, pattern, replace string) string {
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, replace)
|
||||
}
|
||||
|
||||
// Escape replace <, >, & and " with HTML entities.
|
||||
var Escape = html.EscapeString
|
||||
|
||||
func addSegment(inrune, segment []rune) []rune {
|
||||
if len(segment) == 0 {
|
||||
return inrune
|
||||
}
|
||||
if len(inrune) != 0 {
|
||||
inrune = append(inrune, '_')
|
||||
}
|
||||
inrune = append(inrune, segment...)
|
||||
return inrune
|
||||
}
|
||||
|
||||
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
||||
// Ex.: my_func => MyFunc
|
||||
func UnderscoreToCamelCase(s string) string {
|
||||
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
||||
// Ex.: MyFunc => my_func
|
||||
func CamelCaseToUnderscore(str string) string {
|
||||
var output []rune
|
||||
var segment []rune
|
||||
for _, r := range str {
|
||||
|
||||
// not treat number as separate segment
|
||||
if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
|
||||
output = addSegment(output, segment)
|
||||
segment = nil
|
||||
}
|
||||
segment = append(segment, unicode.ToLower(r))
|
||||
}
|
||||
output = addSegment(output, segment)
|
||||
return string(output)
|
||||
}
|
||||
|
||||
// Reverse return reversed string
|
||||
func Reverse(s string) string {
|
||||
r := []rune(s)
|
||||
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// GetLines split string by "\n" and return array of lines
|
||||
func GetLines(s string) []string {
|
||||
return strings.Split(s, "\n")
|
||||
}
|
||||
|
||||
// GetLine return specified line of multiline string
|
||||
func GetLine(s string, index int) (string, error) {
|
||||
lines := GetLines(s)
|
||||
if index < 0 || index >= len(lines) {
|
||||
return "", errors.New("line index out of bounds")
|
||||
}
|
||||
return lines[index], nil
|
||||
}
|
||||
|
||||
// RemoveTags remove all tags from HTML string
|
||||
func RemoveTags(s string) string {
|
||||
return ReplacePattern(s, "<[^>]*>", "")
|
||||
}
|
||||
|
||||
// SafeFileName return safe string that can be used in file names
|
||||
func SafeFileName(str string) string {
|
||||
name := strings.ToLower(str)
|
||||
name = path.Clean(path.Base(name))
|
||||
name = strings.Trim(name, " ")
|
||||
separators, err := regexp.Compile(`[ &_=+:]`)
|
||||
if err == nil {
|
||||
name = separators.ReplaceAllString(name, "-")
|
||||
}
|
||||
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
||||
if err == nil {
|
||||
name = legal.ReplaceAllString(name, "")
|
||||
}
|
||||
for strings.Contains(name, "--") {
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NormalizeEmail canonicalize an email address.
|
||||
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
||||
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
||||
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
||||
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
||||
// normalized to @gmail.com.
|
||||
func NormalizeEmail(str string) (string, error) {
|
||||
if !IsEmail(str) {
|
||||
return "", fmt.Errorf("%s is not an email", str)
|
||||
}
|
||||
parts := strings.Split(str, "@")
|
||||
parts[0] = strings.ToLower(parts[0])
|
||||
parts[1] = strings.ToLower(parts[1])
|
||||
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
||||
parts[1] = "gmail.com"
|
||||
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
||||
}
|
||||
return strings.Join(parts, "@"), nil
|
||||
}
|
||||
|
||||
// Truncate a string to the closest length without breaking words.
|
||||
func Truncate(str string, length int, ending string) string {
|
||||
var aftstr, befstr string
|
||||
if len(str) > length {
|
||||
words := strings.Fields(str)
|
||||
before, present := 0, 0
|
||||
for i := range words {
|
||||
befstr = aftstr
|
||||
before = present
|
||||
aftstr = aftstr + words[i] + " "
|
||||
present = len(aftstr)
|
||||
if present > length && i != 0 {
|
||||
if (length - before) < (present - length) {
|
||||
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// PadLeft pad left side of string if size of string is less then indicated pad length
|
||||
func PadLeft(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, false)
|
||||
}
|
||||
|
||||
// PadRight pad right side of string if size of string is less then indicated pad length
|
||||
func PadRight(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, false, true)
|
||||
}
|
||||
|
||||
// PadBoth pad sides of string if size of string is less then indicated pad length
|
||||
func PadBoth(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, true)
|
||||
}
|
||||
|
||||
// PadString either left, right or both sides, not the padding string can be unicode and more then one
|
||||
// character
|
||||
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
|
||||
|
||||
// When padded length is less then the current string size
|
||||
if padLen < utf8.RuneCountInString(str) {
|
||||
return str
|
||||
}
|
||||
|
||||
padLen -= utf8.RuneCountInString(str)
|
||||
|
||||
targetLen := padLen
|
||||
|
||||
targetLenLeft := targetLen
|
||||
targetLenRight := targetLen
|
||||
if padLeft && padRight {
|
||||
targetLenLeft = padLen / 2
|
||||
targetLenRight = padLen - targetLenLeft
|
||||
}
|
||||
|
||||
strToRepeatLen := utf8.RuneCountInString(padStr)
|
||||
|
||||
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
|
||||
repeatedString := strings.Repeat(padStr, repeatTimes)
|
||||
|
||||
leftSide := ""
|
||||
if padLeft {
|
||||
leftSide = repeatedString[0:targetLenLeft]
|
||||
}
|
||||
|
||||
rightSide := ""
|
||||
if padRight {
|
||||
rightSide = repeatedString[0:targetLenRight]
|
||||
}
|
||||
|
||||
return leftSide + str + rightSide
|
||||
}
|
||||
|
||||
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
|
||||
func TruncatingErrorf(str string, args ...interface{}) error {
|
||||
n := strings.Count(str, "%s")
|
||||
return fmt.Errorf(str, args[:n]...)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,15 @@
|
|||
box: golang
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
go version
|
||||
go get -t ./...
|
||||
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test -race ./...
|
|
@ -1,419 +0,0 @@
|
|||
CockroachDB Community License Agreement
|
||||
|
||||
Please read this CockroachDB Community License Agreement (the "Agreement")
|
||||
carefully before using CockroachDB (as defined below), which is offered by
|
||||
Cockroach Labs, Inc. or its affiliated Legal Entities ("Cockroach Labs").
|
||||
|
||||
By downloading CockroachDB or using it in any manner, You agree that You have
|
||||
read and agree to be bound by the terms of this Agreement. If You are
|
||||
accessing CockroachDB on behalf of a Legal Entity, You represent and warrant
|
||||
that You have the authority to agree to these terms on its behalf and the
|
||||
right to bind that Legal Entity to this Agreement. Use of CockroachDB is
|
||||
expressly conditioned upon Your assent to all the terms of this Agreement, to
|
||||
the exclusion of all other terms.
|
||||
|
||||
1. Definitions. In addition to other terms defined elsewhere in this
|
||||
Agreement, the terms below have the following meanings.
|
||||
|
||||
(a) "CockroachDB" shall mean the SQL database software provided by Cockroach
|
||||
Labs, including both CockroachDB Community and CockroachDB Enterprise
|
||||
editions, as defined below.
|
||||
|
||||
(b) "CockroachDB Community Edition" shall mean the open source version of
|
||||
CockroachDB, available free of charge at
|
||||
|
||||
https://github.com/cockroachdb/cockroach
|
||||
|
||||
(c) "CockroachDB Enterprise Edition" shall mean the additional features made
|
||||
available by Cockroach Labs, the use of which is subject to additional
|
||||
terms set out below.
|
||||
|
||||
(d) "Contribution" shall mean any work of authorship, including the original
|
||||
version of the Work and any modifications or additions to that Work or
|
||||
Derivative Works thereof, that is intentionally submitted Cockroach Labs
|
||||
for inclusion in the Work by the copyright owner or by an individual or
|
||||
Legal Entity authorized to submit on behalf of the copyright owner. For
|
||||
the purposes of this definition, "submitted" means any form of
|
||||
electronic, verbal, or written communication sent to Cockroach Labs or
|
||||
its representatives, including but not limited to communication on
|
||||
electronic mailing lists, source code control systems, and issue
|
||||
tracking systems that are managed by, or on behalf of, Cockroach Labs
|
||||
for the purpose of discussing and improving the Work, but excluding
|
||||
communication that is conspicuously marked or otherwise designated in
|
||||
writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
(e) "Contributor" shall mean any copyright owner or individual or Legal
|
||||
Entity authorized by the copyright owner, other than Cockroach Labs,
|
||||
from whom Cockroach Labs receives a Contribution that Cockroach Labs
|
||||
subsequently incorporates within the Work.
|
||||
|
||||
(f) "Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work, such as a
|
||||
translation, abridgement, condensation, or any other recasting,
|
||||
transformation, or adaptation for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole,
|
||||
an original work of authorship. For the purposes of this License,
|
||||
Derivative Works shall not include works that remain separable from, or
|
||||
merely link (or bind by name) to the interfaces of, the Work and
|
||||
Derivative Works thereof.
|
||||
|
||||
(g) "Legal Entity" shall mean the union of the acting entity and all other
|
||||
entities that control, are controlled by, or are under common control
|
||||
with that entity. For the purposes of this definition, "control" means
|
||||
(i) the power, direct or indirect, to cause the direction or management
|
||||
of such entity, whether by contract or otherwise, or (ii) ownership of
|
||||
fifty percent (50%) or more of the outstanding shares, or (iii)
|
||||
beneficial ownership of such entity.
|
||||
|
||||
(h) "License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution of a Work as defined by this Agreement.
|
||||
|
||||
(i) "Licensor" shall mean Cockroach Labs or a Contributor, as applicable.
|
||||
|
||||
(j) "Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but not
|
||||
limited to compiled object code, generated documentation, and
|
||||
conversions to other media types.
|
||||
|
||||
(k) "Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation source,
|
||||
and configuration files.
|
||||
|
||||
(l) "Third Party Works" shall mean Works, including Contributions, and other
|
||||
technology owned by a person or Legal Entity other than Cockroach Labs,
|
||||
as indicated by a copyright notice that is included in or attached to
|
||||
such Works or technology.
|
||||
|
||||
(m) "Work" shall mean the work of authorship, whether in Source or Object
|
||||
form, made available under a License, as indicated by a copyright notice
|
||||
that is included in or attached to the work.
|
||||
|
||||
(n) "You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
2. Licenses.
|
||||
|
||||
(a) License to CockroachDB Community Edition. The License for CockroachDB
|
||||
Community Edition is the Apache License, Version 2.0 ("Apache License").
|
||||
The Apache License includes a grant of patent license, as well as
|
||||
redistribution rights that are contingent on several requirements.
|
||||
Please see
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
for full terms. CockroachDB Community Edition is a no-cost, entry-level
|
||||
license and as such, contains the following disclaimers: NOTWITHSTANDING
|
||||
ANYTHING TO THE CONTRARY HEREIN, COCKROACHDB COMMUNITY EDITION IS
|
||||
PROVIDED "AS IS" AND "AS AVAILABLE", AND ALL EXPRESS OR IMPLIED
|
||||
WARRANTIES ARE EXCLUDED AND DISCLAIMED, INCLUDING WITHOUT LIMITATION THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
|
||||
NON-INFRINGEMENT, AND ANY WARRANTIES ARISING BY STATUTE OR OTHERWISE IN
|
||||
LAW OR FROM COURSE OF DEALING, COURSE OF PERFORMANCE, OR USE IN TRADE.
|
||||
For clarity, the terms of this Agreement, other than the relevant
|
||||
definitions in Section 1 and this Section 2(a) do not apply to
|
||||
CockroachDB Community Edition.
|
||||
|
||||
(b) License to CockroachDB Enterprise Edition.
|
||||
|
||||
i Grant of Copyright License: Subject to the terms of this Agreement,
|
||||
Licensor hereby grants to You a worldwide, non-exclusive,
|
||||
non-transferable limited license to reproduce, prepare Enterprise
|
||||
Derivative Works (as defined below) of, publicly display, publicly
|
||||
perform, sublicense, and distribute CockroachDB Enterprise Edition
|
||||
for Your business purposes, for so long as You are not in violation
|
||||
of this Section 2(b) and are current on all payments required by
|
||||
Section 4 below.
|
||||
|
||||
ii Grant of Patent License: Subject to the terms of this Agreement,
|
||||
Licensor hereby grants to You a worldwide, non-exclusive,
|
||||
non-transferable limited patent license to make, have made, use,
|
||||
offer to sell, sell, import, and otherwise transfer CockroachDB
|
||||
Enterprise Edition, where such license applies only to those patent
|
||||
claims licensable by Licensor that are necessarily infringed by
|
||||
their Contribution(s) alone or by combination of their
|
||||
Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
the Work or a Contribution incorporated within the Work constitutes
|
||||
direct or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate as
|
||||
of the date such litigation is filed.
|
||||
|
||||
iii License to Third Party Works: From time to time Cockroach Labs may
|
||||
use, or provide You access to, Third Party Works in connection
|
||||
CockroachDB Enterprise Edition. You acknowledge and agree that in
|
||||
addition to this Agreement, Your use of Third Party Works is subject
|
||||
to all other terms and conditions set forth in the License provided
|
||||
with or contained in such Third Party Works. Some Third Party Works
|
||||
may be licensed to You solely for use with CockroachDB Enterprise
|
||||
Edition under the terms of a third party License, or as otherwise
|
||||
notified by Cockroach Labs, and not under the terms of this
|
||||
Agreement. You agree that the owners and third party licensors of
|
||||
Third Party Works are intended third party beneficiaries to this
|
||||
Agreement.
|
||||
|
||||
3. Support. From time to time, in its sole discretion, Cockroach Labs may
|
||||
offer professional services or support for CockroachDB, which may now or in
|
||||
the future be subject to additional fees.
|
||||
|
||||
4. Fees for CockroachDB Enterprise Edition or CockroachDB Support.
|
||||
|
||||
(a) Fees. The License to CockroachDB Enterprise Edition is conditioned upon
|
||||
Your payment of the fees specified on
|
||||
|
||||
https://cockroachlabs.com/pricing
|
||||
|
||||
which You agree to pay to Cockroach Labs in accordance with the payment
|
||||
terms set out on that page. Any professional services or support for
|
||||
CockroachDB may also be subject to Your payment of fees, which will be
|
||||
specified by Cockroach Labs when you sign up to receive such
|
||||
professional services or support. Cockroach Labs reserves the right to
|
||||
change the fees at any time with prior written notice; for recurring
|
||||
fees, any such adjustments will take effect as of the next pay period.
|
||||
|
||||
(b) Overdue Payments and Taxes. Overdue payments are subject to a service
|
||||
charge equal to the lesser of 1.5% per month or the maximum legal
|
||||
interest rate allowed by law, and You shall pay all Cockroach Labs’
|
||||
reasonable costs of collection, including court costs and attorneys’
|
||||
fees. Fees are stated and payable in U.S. dollars and are exclusive of
|
||||
all sales, use, value added and similar taxes, duties, withholdings and
|
||||
other governmental assessments (but excluding taxes based on Cockroach
|
||||
Labs’ income) that may be levied on the transactions contemplated by
|
||||
this Agreement in any jurisdiction, all of which are Your responsibility
|
||||
unless you have provided Cockroach Labs with a valid tax-exempt
|
||||
certificate.
|
||||
|
||||
(c) Record-keeping and Audit. If fees for CockroachDB Enterprise Edition
|
||||
are based on the number of cores or servers running on CockroachDB
|
||||
Enterprise Edition or another use-based unit of measurement, You must
|
||||
maintain complete and accurate records with respect to Your use of
|
||||
CockroachDB Enterprise Edition and will provide such records to
|
||||
Cockroach Labs for inspection or audit upon Cockroach Labs’ reasonable
|
||||
request. If an inspection or audit uncovers additional usage by You for
|
||||
which fees are owed under this Agreement, then You shall pay for such
|
||||
additional usage at Cockroach Labs’ then-current rates.
|
||||
|
||||
5. Trial License. If You have signed up for a trial or evaluation of
|
||||
CockroachDB Enterprise Edition, Your License to CockroachDB Enterprise
|
||||
Edition is granted without charge for the trial or evaluation period
|
||||
specified when You signed up, or if no term was specified, for thirty (30)
|
||||
calendar days, provided that Your License is granted solely for purposes of
|
||||
Your internal evaluation of CockroachDB Enterprise Edition during the trial
|
||||
or evaluation period (a "Trial License"). You may not use CockroachDB
|
||||
Enterprise Edition under a Trial License more than once in any twelve (12)
|
||||
month period. Cockroach Labs may revoke a Trial License at any time and
|
||||
for any reason. Sections 3, 4, 9 and 11 of this Agreement do not apply to
|
||||
Trial Licenses.
|
||||
|
||||
6. Redistribution. You may reproduce and distribute copies of the Work or
|
||||
Derivative Works thereof in any medium, with or without modifications, and
|
||||
in Source or Object form, provided that You meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or Derivative Works a
|
||||
copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices stating
|
||||
that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works that You
|
||||
distribute, all copyright, patent, trademark, and attribution notices
|
||||
from the Source form of the Work, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its distribution,
|
||||
then any Derivative Works that You distribute must include a readable
|
||||
copy of the attribution notices contained within such NOTICE file,
|
||||
excluding those notices that do not pertain to any part of the
|
||||
Derivative Works, in at least one of the following places: within a
|
||||
NOTICE text file distributed as part of the Derivative Works; within the
|
||||
Source form or documentation, if provided along with the Derivative
|
||||
Works; or, within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents of the
|
||||
NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative
|
||||
Works that You distribute, alongside or as an addendum to the NOTICE
|
||||
text from the Work, provided that such additional attribution notices
|
||||
cannot be construed as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and may
|
||||
provide additional or different license terms and conditions for use,
|
||||
reproduction, or distribution of Your modifications, or for any such
|
||||
Derivative Works as a whole, provided Your use, reproduction, and
|
||||
distribution of the Work otherwise complies with the conditions stated
|
||||
in this License.
|
||||
|
||||
(e) Enterprise Derivative Works: Derivative Works of CockroachDB Enterprise
|
||||
Edition ("Enterprise Derivative Works") may be made, reproduced and
|
||||
distributed in any medium, with or without modifications, in Source or
|
||||
Object form, provided that each Enterprise Derivative Work will be
|
||||
considered to include a License to CockroachDB Enterprise Edition and
|
||||
thus will be subject to the payment of fees to Cockroach Labs by any
|
||||
user of the Enterprise Derivative Work.
|
||||
|
||||
7. Submission of Contributions. Unless You explicitly state otherwise, any
|
||||
Contribution intentionally submitted for inclusion in CockroachDB by You to
|
||||
Cockroach Labs shall be under the terms and conditions of
|
||||
|
||||
https://cla-assistant.io/cockroachdb/cockroach
|
||||
|
||||
(which is based off of the Apache License), without any additional terms or
|
||||
conditions, payments of royalties or otherwise to Your benefit.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the
|
||||
terms of any separate license agreement You may have executed with
|
||||
Cockroach Labs regarding such Contributions.
|
||||
|
||||
8. Trademarks. This License does not grant permission to use the trade names,
|
||||
trademarks, service marks, or product names of Licensor, except as required
|
||||
for reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
9. Limited Warranty.
|
||||
|
||||
(a) Warranties. Cockroach Labs warrants to You that: (i) CockroachDB
|
||||
Enterprise Edition will materially perform in accordance with the
|
||||
applicable documentation for ninety (90) days after initial delivery to
|
||||
You; and (ii) any professional services performed by Cockroach Labs
|
||||
under this Agreement will be performed in a workmanlike manner, in
|
||||
accordance with general industry standards.
|
||||
|
||||
(b) Exclusions. Cockroach Labs’ warranties in this Section 9 do not extend
|
||||
to problems that result from: (i) Your failure to implement updates
|
||||
issued by Cockroach Labs during the warranty period; (ii) any
|
||||
alterations or additions (including Enterprise Derivative Works and
|
||||
Contributions) to CockroachDB not performed by or at the direction of
|
||||
Cockroach Labs; (iii) failures that are not reproducible by Cockroach
|
||||
Labs; (iv) operation of CockroachDB Enterprise Edition in violation of
|
||||
this Agreement or not in accordance with its documentation; (v) failures
|
||||
caused by software, hardware or products not licensed or provided by
|
||||
Cockroach Labs hereunder; or (vi) Third Party Works.
|
||||
|
||||
(c) Remedies. In the event of a breach of a warranty under this Section 9,
|
||||
Cockroach Labs will, at its discretion and cost, either repair, replace
|
||||
or re-perform the applicable Works or services or refund a portion of
|
||||
fees previously paid to Cockroach Labs that are associated with the
|
||||
defective Works or services. This is Your exclusive remedy, and
|
||||
Cockroach Labs’ sole liability, arising in connection with the limited
|
||||
warranties herein.
|
||||
|
||||
10. Disclaimer of Warranty. Except as set out in Section 9, unless required
|
||||
by applicable law, Licensor provides the Work (and each Contributor
|
||||
provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied, arising out of course
|
||||
of dealing, course of performance, or usage in trade, including, without
|
||||
limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
|
||||
MERCHANTABILITY, CORRECTNESS, RELIABILITY, or FITNESS FOR A PARTICULAR
|
||||
PURPOSE, all of which are hereby disclaimed. You are solely responsible
|
||||
for determining the appropriateness of using or redistributing Works and
|
||||
assume any risks associated with Your exercise of permissions under the
|
||||
applicable License for such Works.
|
||||
|
||||
11. Limited Indemnity.
|
||||
|
||||
(a) Indemnity. Cockroach Labs will defend, indemnify and hold You harmless
|
||||
against any third party claims, liabilities or expenses incurred
|
||||
(including reasonable attorneys’ fees), as well as amounts finally
|
||||
awarded in a settlement or a non-appealable judgement by a court
|
||||
("Losses"), to the extent arising from any claim or allegation by a
|
||||
third party that CockroachDB Enterprise Edition infringes or
|
||||
misappropriates a valid United States patent, copyright or trade secret
|
||||
right of a third party; provided that You give Cockroach Labs: (i)
|
||||
prompt written notice of any such claim or allegation; (ii) sole control
|
||||
of the defense and settlement thereof; and (iii) reasonable cooperation
|
||||
and assistance in such defense or settlement. If any Work within
|
||||
CockroachDB Enterprise Edition becomes or, in Cockroach Labs’ opinion,
|
||||
is likely to become, the subject of an injunction, Cockroach Labs may,
|
||||
at its option, (A) procure for You the right to continue using such
|
||||
Work, (B) replace or modify such Work so that it becomes non-infringing
|
||||
without substantially compromising its functionality, or, if (A) and (B)
|
||||
are not commercially practicable, then (C) terminate Your license to the
|
||||
allegedly infringing Work and refund to You a prorated portion of the
|
||||
prepaid and unearned fees for such infringing Work. The foregoing
|
||||
states the entire liability of Cockroach Labs with respect to
|
||||
infringement of patents, copyrights, trade secrets or other intellectual
|
||||
property rights.
|
||||
|
||||
(b) Exclusions. The foregoing obligations shall not apply to: (i) Works
|
||||
modified by any party other than Cockroach Labs (including Enterprise
|
||||
Derivative Works and Contributions), if the alleged infringement relates
|
||||
to such modification, (ii) Works combined or bundled with any products,
|
||||
processes or materials not provided by Cockroach Labs where the alleged
|
||||
infringement relates to such combination, (iii) use of a version of
|
||||
CockroachDB Enterprise Edition other than the version that was current
|
||||
at the time of such use, as long as a non-infringing version had been
|
||||
released, (iv) any Works created to Your specifications, (v)
|
||||
infringement or misappropriation of any proprietary right in which You
|
||||
have an interest, or (vi) Third Party Works. You will defend, indemnify
|
||||
and hold Cockroach Labs harmless against any Losses arising from any
|
||||
such claim or allegation, subject to conditions reciprocal to those in
|
||||
Section 11(a).
|
||||
|
||||
12. Limitation of Liability. In no event and under no legal or equitable
|
||||
theory, whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts), and notwithstanding anything in this Agreement to the
|
||||
contrary, shall Licensor or any Contributor be liable to You for (i) any
|
||||
amounts in excess, in the aggregate, of the fees paid by You to Cockroach
|
||||
Labs under this Agreement in the twelve (12) months preceding the date the
|
||||
first cause of liability arose), or (ii) any indirect, special,
|
||||
incidental, punitive, exemplary, reliance, or consequential damages of any
|
||||
character arising as a result of this Agreement or out of the use or
|
||||
inability to use the Work (including but not limited to damages for loss
|
||||
of goodwill, profits, data or data use, work stoppage, computer failure or
|
||||
malfunction, cost of procurement of substitute goods, technology or
|
||||
services, or any and all other commercial damages or losses), even if such
|
||||
Licensor or Contributor has been advised of the possibility of such
|
||||
damages. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE
|
||||
ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
|
||||
|
||||
13. Accepting Warranty or Additional Liability. While redistributing Works or
|
||||
Derivative Works thereof, and without limiting your obligations under
|
||||
Section 6, You may choose to offer, and charge a fee for, acceptance of
|
||||
support, warranty, indemnity, or other liability obligations and/or rights
|
||||
consistent with this License. However, in accepting such obligations, You
|
||||
may act only on Your own behalf and on Your sole responsibility, not on
|
||||
behalf of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold Cockroach Labs and each other Contributor harmless for
|
||||
any liability incurred by, or claims asserted against, such Contributor by
|
||||
reason of your accepting any such warranty or additional liability.
|
||||
|
||||
14. General.
|
||||
|
||||
(a) Relationship of Parties. You and Cockroach Labs are independent
|
||||
contractors, and nothing herein shall be deemed to constitute either
|
||||
party as the agent or representative of the other or both parties as
|
||||
joint venturers or partners for any purpose.
|
||||
|
||||
(b) Export Control. You shall comply with the U.S. Foreign Corrupt
|
||||
Practices Act and all applicable export laws, restrictions and
|
||||
regulations of the U.S. Department of Commerce, and any other applicable
|
||||
U.S. and foreign authority.
|
||||
|
||||
(c) Assignment. This Agreement and the rights and obligations herein may
|
||||
not be assigned or transferred, in whole or in part, by You without the
|
||||
prior written consent of Cockroach Labs. Any assignment in violation of
|
||||
this provision is void. This Agreement shall be binding upon, and inure
|
||||
to the benefit of, the successors and permitted assigns of the parties.
|
||||
|
||||
(d) Governing Law. This Agreement shall be governed by and construed under
|
||||
the laws of the State of New York and the United States without regard
|
||||
to conflicts of laws provisions thereof, and without regard to the
|
||||
Uniform Computer Information Transactions Act.
|
||||
|
||||
(e) Attorneys’ Fees. In any action or proceeding to enforce rights under
|
||||
this Agreement, the prevailing party shall be entitled to recover its
|
||||
costs, expenses and attorneys’ fees.
|
||||
|
||||
(f) Severability. If any provision of this Agreement is held to be invalid,
|
||||
illegal or unenforceable in any respect, that provision shall be limited
|
||||
or eliminated to the minimum extent necessary so that this Agreement
|
||||
otherwise remains in full force and effect and enforceable.
|
||||
|
||||
(g) Entire Agreement; Waivers; Modification. This Agreement constitutes the
|
||||
entire agreement between the parties relating to the subject matter
|
||||
hereof and supersedes all proposals, understandings, or discussions,
|
||||
whether written or oral, relating to the subject matter of this
|
||||
Agreement and all past dealing or industry custom. The failure of either
|
||||
party to enforce its rights under this Agreement at any time for any
|
||||
period shall not be construed as a waiver of such rights. No changes,
|
||||
modifications or waivers to this Agreement will be effective unless in
|
||||
writing and signed by both parties.
|
|
@ -1,95 +0,0 @@
|
|||
// Copyright 2014 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Spencer Kimball (spencer.kimball@gmail.com)
|
||||
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// AcceptHeader is the canonical header name for accept.
|
||||
AcceptHeader = "Accept"
|
||||
// AcceptEncodingHeader is the canonical header name for accept encoding.
|
||||
AcceptEncodingHeader = "Accept-Encoding"
|
||||
// ContentEncodingHeader is the canonical header name for content type.
|
||||
ContentEncodingHeader = "Content-Encoding"
|
||||
// ContentTypeHeader is the canonical header name for content type.
|
||||
ContentTypeHeader = "Content-Type"
|
||||
// JSONContentType is the JSON content type.
|
||||
JSONContentType = "application/json"
|
||||
// AltJSONContentType is the alternate JSON content type.
|
||||
AltJSONContentType = "application/x-json"
|
||||
// ProtoContentType is the protobuf content type.
|
||||
ProtoContentType = "application/x-protobuf"
|
||||
// AltProtoContentType is the alternate protobuf content type.
|
||||
AltProtoContentType = "application/x-google-protobuf"
|
||||
// PlaintextContentType is the plaintext content type.
|
||||
PlaintextContentType = "text/plain"
|
||||
// GzipEncoding is the gzip encoding.
|
||||
GzipEncoding = "gzip"
|
||||
)
|
||||
|
||||
// GetJSON uses the supplied client to GET the URL specified by the parameters
|
||||
// and unmarshals the result into response.
|
||||
func GetJSON(httpClient http.Client, path string, response proto.Message) error {
|
||||
req, err := http.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return doJSONRequest(httpClient, req, response)
|
||||
}
|
||||
|
||||
// PostJSON uses the supplied client to POST request to the URL specified by
|
||||
// the parameters and unmarshals the result into response.
|
||||
func PostJSON(httpClient http.Client, path string, request, response proto.Message) error {
|
||||
// Hack to avoid upsetting TestProtoMarshal().
|
||||
marshalFn := (&jsonpb.Marshaler{}).Marshal
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := marshalFn(&buf, request); err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", path, &buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return doJSONRequest(httpClient, req, response)
|
||||
}
|
||||
|
||||
func doJSONRequest(httpClient http.Client, req *http.Request, response proto.Message) error {
|
||||
if timeout := httpClient.Timeout; timeout > 0 {
|
||||
req.Header.Set("Grpc-Timeout", strconv.FormatInt(timeout.Nanoseconds(), 10)+"n")
|
||||
}
|
||||
req.Header.Set(AcceptHeader, JSONContentType)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if contentType := resp.Header.Get(ContentTypeHeader); !(resp.StatusCode == http.StatusOK && contentType == JSONContentType) {
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
return errors.Errorf("status: %s, content-type: %s, body: %s, error: %v", resp.Status, contentType, b, err)
|
||||
}
|
||||
return jsonpb.Unmarshal(resp.Body, response)
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
package protoutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
var verbotenKinds = [...]reflect.Kind{
|
||||
reflect.Array,
|
||||
}
|
||||
|
||||
type typeKey struct {
|
||||
typ reflect.Type
|
||||
verboten reflect.Kind
|
||||
}
|
||||
|
||||
var types struct {
|
||||
syncutil.Mutex
|
||||
known map[typeKey]reflect.Type
|
||||
}
|
||||
|
||||
func init() {
|
||||
types.known = make(map[typeKey]reflect.Type)
|
||||
}
|
||||
|
||||
// Clone uses proto.Clone to return a deep copy of pb. It panics if pb
|
||||
// recursively contains any instances of types which are known to be
|
||||
// unsupported by proto.Clone.
|
||||
//
|
||||
// This function and its associated lint (see build/style_test.go) exist to
|
||||
// ensure we do not attempt to proto.Clone types which are not supported by
|
||||
// proto.Clone. This hackery is necessary because proto.Clone gives no direct
|
||||
// indication that it has incompletely cloned a type; it merely logs to standard
|
||||
// output (see
|
||||
// https://github.com/golang/protobuf/blob/89238a3/proto/clone.go#L204).
|
||||
//
|
||||
// The concrete case against which this is currently guarding may be resolved
|
||||
// upstream, see https://github.com/gogo/protobuf/issues/147.
|
||||
func Clone(pb proto.Message) proto.Message {
|
||||
for _, verbotenKind := range verbotenKinds {
|
||||
if t := typeIsOrContainsVerboten(reflect.TypeOf(pb), verbotenKind); t != nil {
|
||||
panic(fmt.Sprintf("attempt to clone %T, which contains uncloneable field of type %s", pb, t))
|
||||
}
|
||||
}
|
||||
|
||||
return proto.Clone(pb)
|
||||
}
|
||||
|
||||
func typeIsOrContainsVerboten(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||
types.Lock()
|
||||
defer types.Unlock()
|
||||
|
||||
return typeIsOrContainsVerbotenLocked(t, verboten)
|
||||
}
|
||||
|
||||
func typeIsOrContainsVerbotenLocked(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||
key := typeKey{t, verboten}
|
||||
knownTypeIsOrContainsVerboten, ok := types.known[key]
|
||||
if !ok {
|
||||
knownTypeIsOrContainsVerboten = typeIsOrContainsVerbotenImpl(t, verboten)
|
||||
types.known[key] = knownTypeIsOrContainsVerboten
|
||||
}
|
||||
return knownTypeIsOrContainsVerboten
|
||||
}
|
||||
|
||||
func typeIsOrContainsVerbotenImpl(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||
switch t.Kind() {
|
||||
case verboten:
|
||||
return t
|
||||
|
||||
case reflect.Map:
|
||||
if key := typeIsOrContainsVerbotenLocked(t.Key(), verboten); key != nil {
|
||||
return key
|
||||
}
|
||||
if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
|
||||
return value
|
||||
}
|
||||
|
||||
case reflect.Array, reflect.Ptr, reflect.Slice:
|
||||
if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
|
||||
return value
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if field := typeIsOrContainsVerbotenLocked(t.Field(i).Type, verboten); field != nil {
|
||||
return field
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Chan, reflect.Func:
|
||||
// Not strictly correct, but cloning these kinds is not allowed.
|
||||
return t
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
package protoutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/cockroachdb/cockroach/pkg/util/httputil"
|
||||
)
|
||||
|
||||
var _ gwruntime.Marshaler = (*JSONPb)(nil)
|
||||
|
||||
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
|
||||
// JSONPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/jsonpb.
|
||||
type JSONPb jsonpb.Marshaler
|
||||
|
||||
// ContentType implements gwruntime.Marshaler.
|
||||
func (*JSONPb) ContentType() string {
|
||||
return httputil.JSONContentType
|
||||
}
|
||||
|
||||
// Marshal implements gwruntime.Marshaler.
|
||||
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||
return j.marshal(v)
|
||||
}
|
||||
|
||||
// a lower-case version of marshal to allow for a call from
|
||||
// marshalNonProtoField without upsetting TestProtoMarshal().
|
||||
func (j *JSONPb) marshal(v interface{}) ([]byte, error) {
|
||||
if pb, ok := v.(proto.Message); ok {
|
||||
var buf bytes.Buffer
|
||||
marshalFn := (*jsonpb.Marshaler)(j).Marshal
|
||||
if err := marshalFn(&buf, pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
return j.marshalNonProtoField(v)
|
||||
}
|
||||
|
||||
// Cribbed verbatim from grpc-gateway.
|
||||
type protoEnum interface {
|
||||
fmt.Stringer
|
||||
EnumDescriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// Cribbed verbatim from grpc-gateway.
|
||||
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Map {
|
||||
m := make(map[string]*json.RawMessage)
|
||||
for _, k := range rv.MapKeys() {
|
||||
buf, err := j.marshal(rv.MapIndex(k).Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||
}
|
||||
if j.Indent != "" {
|
||||
return json.MarshalIndent(m, "", j.Indent)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||
return json.Marshal(enum.String())
|
||||
}
|
||||
return json.Marshal(rv.Interface())
|
||||
}
|
||||
|
||||
// Unmarshal implements gwruntime.Marshaler.
|
||||
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||
if pb, ok := v.(proto.Message); ok {
|
||||
return jsonpb.Unmarshal(bytes.NewReader(data), pb)
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
}
|
||||
|
||||
// NewDecoder implements gwruntime.Marshaler.
|
||||
func (j *JSONPb) NewDecoder(r io.Reader) gwruntime.Decoder {
|
||||
return gwruntime.DecoderFunc(func(v interface{}) error {
|
||||
if pb, ok := v.(proto.Message); ok {
|
||||
return jsonpb.Unmarshal(r, pb)
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
})
|
||||
}
|
||||
|
||||
// NewEncoder implements gwruntime.Marshaler.
|
||||
func (j *JSONPb) NewEncoder(w io.Writer) gwruntime.Encoder {
|
||||
return gwruntime.EncoderFunc(func(v interface{}) error {
|
||||
if pb, ok := v.(proto.Message); ok {
|
||||
marshalFn := (*jsonpb.Marshaler)(j).Marshal
|
||||
return marshalFn(w, pb)
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
})
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
package protoutil
|
||||
|
||||
import "github.com/gogo/protobuf/proto"
|
||||
|
||||
// Interceptor will be called with every proto before it is marshalled.
|
||||
// Interceptor is not safe to modify concurrently with calls to Marshal.
|
||||
var Interceptor = func(_ proto.Message) {}
|
||||
|
||||
// Marshal uses proto.Marshal to encode pb into the wire format. It is used in
|
||||
// some tests to intercept calls to proto.Marshal.
|
||||
func Marshal(pb proto.Message) ([]byte, error) {
|
||||
Interceptor(pb)
|
||||
|
||||
return proto.Marshal(pb)
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
package protoutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/cockroachdb/cockroach/pkg/util/httputil"
|
||||
)
|
||||
|
||||
var _ gwruntime.Marshaler = (*ProtoPb)(nil)
|
||||
|
||||
// ProtoPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/proto.
|
||||
type ProtoPb struct{}
|
||||
|
||||
// ContentType implements gwruntime.Marshaler.
|
||||
func (*ProtoPb) ContentType() string {
|
||||
return httputil.ProtoContentType
|
||||
}
|
||||
|
||||
// Marshal implements gwruntime.Marshaler.
|
||||
func (*ProtoPb) Marshal(v interface{}) ([]byte, error) {
|
||||
if p, ok := v.(proto.Message); ok {
|
||||
return Marshal(p)
|
||||
}
|
||||
return nil, errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
}
|
||||
|
||||
// Unmarshal implements gwruntime.Marshaler.
|
||||
func (*ProtoPb) Unmarshal(data []byte, v interface{}) error {
|
||||
if p, ok := v.(proto.Message); ok {
|
||||
return proto.Unmarshal(data, p)
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
}
|
||||
|
||||
type protoDecoder struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// NewDecoder implements gwruntime.Marshaler.
|
||||
func (*ProtoPb) NewDecoder(r io.Reader) gwruntime.Decoder {
|
||||
return &protoDecoder{r: r}
|
||||
}
|
||||
|
||||
// Decode implements gwruntime.Marshaler.
|
||||
func (d *protoDecoder) Decode(v interface{}) error {
|
||||
if p, ok := v.(proto.Message); ok {
|
||||
bytes, err := ioutil.ReadAll(d.r)
|
||||
if err == nil {
|
||||
err = proto.Unmarshal(bytes, p)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
}
|
||||
|
||||
type protoEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewEncoder implements gwruntime.Marshaler.
|
||||
func (*ProtoPb) NewEncoder(w io.Writer) gwruntime.Encoder {
|
||||
return &protoEncoder{w: w}
|
||||
}
|
||||
|
||||
// Encode implements gwruntime.Marshaler.
|
||||
func (e *protoEncoder) Encode(v interface{}) error {
|
||||
if p, ok := v.(proto.Message); ok {
|
||||
bytes, err := Marshal(p)
|
||||
if err == nil {
|
||||
_, err = e.w.Write(bytes)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
// +build deadlock
|
||||
|
||||
package syncutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
deadlock "github.com/sasha-s/go-deadlock"
|
||||
)
|
||||
|
||||
func init() {
|
||||
deadlock.Opts.DeadlockTimeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
// A Mutex is a mutual exclusion lock.
|
||||
type Mutex struct {
|
||||
deadlock.Mutex
|
||||
}
|
||||
|
||||
// AssertHeld is a no-op for deadlock mutexes.
|
||||
func (m *Mutex) AssertHeld() {
|
||||
}
|
||||
|
||||
// An RWMutex is a reader/writer mutual exclusion lock.
|
||||
type RWMutex struct {
|
||||
deadlock.RWMutex
|
||||
}
|
||||
|
||||
// AssertHeld is a no-op for deadlock mutexes.
|
||||
func (m *RWMutex) AssertHeld() {
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
// Copyright 2016 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
//
|
||||
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||
|
||||
// +build !deadlock
|
||||
|
||||
package syncutil
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A Mutex is a mutual exclusion lock.
|
||||
type Mutex struct {
|
||||
mu sync.Mutex
|
||||
isLocked int32 // updated atomically
|
||||
}
|
||||
|
||||
// Lock implements sync.Locker.
|
||||
func (m *Mutex) Lock() {
|
||||
m.mu.Lock()
|
||||
atomic.StoreInt32(&m.isLocked, 1)
|
||||
}
|
||||
|
||||
// Unlock implements sync.Locker.
|
||||
func (m *Mutex) Unlock() {
|
||||
atomic.StoreInt32(&m.isLocked, 0)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// AssertHeld may panic if the mutex is not locked (but it is not required to
|
||||
// do so). Functions which require that their callers hold a particular lock
|
||||
// may use this to enforce this requirement more directly than relying on the
|
||||
// race detector.
|
||||
//
|
||||
// Note that we do not require the lock to be held by any particular thread,
|
||||
// just that some thread holds the lock. This is both more efficient and allows
|
||||
// for rare cases where a mutex is locked in one thread and used in another.
|
||||
func (m *Mutex) AssertHeld() {
|
||||
if atomic.LoadInt32(&m.isLocked) == 0 {
|
||||
panic("mutex is not locked")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(pmattis): Mutex.AssertHeld is neither used or tested. Silence unused
|
||||
// warning.
|
||||
var _ = (*Mutex).AssertHeld
|
||||
|
||||
// An RWMutex is a reader/writer mutual exclusion lock.
|
||||
type RWMutex struct {
|
||||
sync.RWMutex
|
||||
isLocked int32 // updated atomically
|
||||
}
|
||||
|
||||
// Lock implements sync.Locker.
|
||||
func (m *RWMutex) Lock() {
|
||||
m.RWMutex.Lock()
|
||||
atomic.StoreInt32(&m.isLocked, 1)
|
||||
}
|
||||
|
||||
// Unlock implements sync.Locker.
|
||||
func (m *RWMutex) Unlock() {
|
||||
atomic.StoreInt32(&m.isLocked, 0)
|
||||
m.RWMutex.Unlock()
|
||||
}
|
||||
|
||||
// AssertHeld may panic if the mutex is not locked for writing (but it is not
|
||||
// required to do so). Functions which require that their callers hold a
|
||||
// particular lock may use this to enforce this requirement more directly than
|
||||
// relying on the race detector.
|
||||
//
|
||||
// Note that we do not require the lock to be held by any particular thread,
|
||||
// just that some thread holds the lock. This is both more efficient and allows
|
||||
// for rare cases where a mutex is locked in one thread and used in another.
|
||||
func (m *RWMutex) AssertHeld() {
|
||||
if atomic.LoadInt32(&m.isLocked) == 0 {
|
||||
panic("mutex is not locked")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
mgo - MongoDB driver for Go
|
||||
|
||||
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,25 @@
|
|||
BSON library for Go
|
||||
|
||||
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,12 @@
|
|||
[![GoDoc](https://godoc.org/github.com/globalsign/mgo/bson?status.svg)](https://godoc.org/github.com/globalsign/mgo/bson)
|
||||
|
||||
An Implementation of BSON for Go
|
||||
--------------------------------
|
||||
|
||||
Package bson is an implementation of the [BSON specification](http://bsonspec.org) for Go.
|
||||
|
||||
While the BSON package implements the BSON spec as faithfully as possible, there
|
||||
is some MongoDB specific behaviour (such as map keys `$in`, `$all`, etc) in the
|
||||
`bson` package. The priority is for backwards compatibility for the `mgo`
|
||||
driver, though fixes for obviously buggy behaviour is welcome (and features, etc
|
||||
behind feature flags).
|
|
@ -0,0 +1,836 @@
|
|||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package bson is an implementation of the BSON specification for Go:
|
||||
//
|
||||
// http://bsonspec.org
|
||||
//
|
||||
// It was created as part of the mgo MongoDB driver for Go, but is standalone
|
||||
// and may be used on its own without the driver.
|
||||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate go run bson_corpus_spec_test_generator.go
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// The public API.
|
||||
|
||||
// Element types constants from BSON specification.
|
||||
const (
|
||||
ElementFloat64 byte = 0x01
|
||||
ElementString byte = 0x02
|
||||
ElementDocument byte = 0x03
|
||||
ElementArray byte = 0x04
|
||||
ElementBinary byte = 0x05
|
||||
Element06 byte = 0x06
|
||||
ElementObjectId byte = 0x07
|
||||
ElementBool byte = 0x08
|
||||
ElementDatetime byte = 0x09
|
||||
ElementNil byte = 0x0A
|
||||
ElementRegEx byte = 0x0B
|
||||
ElementDBPointer byte = 0x0C
|
||||
ElementJavaScriptWithoutScope byte = 0x0D
|
||||
ElementSymbol byte = 0x0E
|
||||
ElementJavaScriptWithScope byte = 0x0F
|
||||
ElementInt32 byte = 0x10
|
||||
ElementTimestamp byte = 0x11
|
||||
ElementInt64 byte = 0x12
|
||||
ElementDecimal128 byte = 0x13
|
||||
ElementMinKey byte = 0xFF
|
||||
ElementMaxKey byte = 0x7F
|
||||
|
||||
BinaryGeneric byte = 0x00
|
||||
BinaryFunction byte = 0x01
|
||||
BinaryBinaryOld byte = 0x02
|
||||
BinaryUUIDOld byte = 0x03
|
||||
BinaryUUID byte = 0x04
|
||||
BinaryMD5 byte = 0x05
|
||||
BinaryUserDefined byte = 0x80
|
||||
)
|
||||
|
||||
// Getter interface: a value implementing the bson.Getter interface will have its GetBSON
|
||||
// method called when the given value has to be marshalled, and the result
|
||||
// of this method will be marshaled in place of the actual object.
|
||||
//
|
||||
// If GetBSON returns return a non-nil error, the marshalling procedure
|
||||
// will stop and error out with the provided value.
|
||||
type Getter interface {
|
||||
GetBSON() (interface{}, error)
|
||||
}
|
||||
|
||||
// Setter interface: a value implementing the bson.Setter interface will receive the BSON
|
||||
// value via the SetBSON method during unmarshaling, and the object
|
||||
// itself will not be changed as usual.
|
||||
//
|
||||
// If setting the value works, the method should return nil or alternatively
|
||||
// bson.ErrSetZero to set the respective field to its zero value (nil for
|
||||
// pointer types). If SetBSON returns a value of type bson.TypeError, the
|
||||
// BSON value will be omitted from a map or slice being decoded and the
|
||||
// unmarshalling will continue. If it returns any other non-nil error, the
|
||||
// unmarshalling procedure will stop and error out with the provided value.
|
||||
//
|
||||
// This interface is generally useful in pointer receivers, since the method
|
||||
// will want to change the receiver. A type field that implements the Setter
|
||||
// interface doesn't have to be a pointer, though.
|
||||
//
|
||||
// Unlike the usual behavior, unmarshalling onto a value that implements a
|
||||
// Setter interface will NOT reset the value to its zero state. This allows
|
||||
// the value to decide by itself how to be unmarshalled.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type MyString string
|
||||
//
|
||||
// func (s *MyString) SetBSON(raw bson.Raw) error {
|
||||
// return raw.Unmarshal(s)
|
||||
// }
|
||||
//
|
||||
type Setter interface {
|
||||
SetBSON(raw Raw) error
|
||||
}
|
||||
|
||||
// ErrSetZero may be returned from a SetBSON method to have the value set to
|
||||
// its respective zero value. When used in pointer values, this will set the
|
||||
// field to nil rather than to the pre-allocated value.
|
||||
var ErrSetZero = errors.New("set to zero")
|
||||
|
||||
// M is a convenient alias for a map[string]interface{} map, useful for
|
||||
// dealing with BSON in a native way. For instance:
|
||||
//
|
||||
// bson.M{"a": 1, "b": true}
|
||||
//
|
||||
// There's no special handling for this type in addition to what's done anyway
|
||||
// for an equivalent map type. Elements in the map will be dumped in an
|
||||
// undefined ordered. See also the bson.D type for an ordered alternative.
|
||||
type M map[string]interface{}
|
||||
|
||||
// D represents a BSON document containing ordered elements. For example:
|
||||
//
|
||||
// bson.D{{"a", 1}, {"b", true}}
|
||||
//
|
||||
// In some situations, such as when creating indexes for MongoDB, the order in
|
||||
// which the elements are defined is important. If the order is not important,
|
||||
// using a map is generally more comfortable. See bson.M and bson.RawD.
|
||||
type D []DocElem
|
||||
|
||||
// DocElem is an element of the bson.D document representation.
|
||||
type DocElem struct {
|
||||
Name string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Map returns a map out of the ordered element name/value pairs in d.
|
||||
func (d D) Map() (m M) {
|
||||
m = make(M, len(d))
|
||||
for _, item := range d {
|
||||
m[item.Name] = item.Value
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// The Raw type represents raw unprocessed BSON documents and elements.
|
||||
// Kind is the kind of element as defined per the BSON specification, and
|
||||
// Data is the raw unprocessed data for the respective element.
|
||||
// Using this type it is possible to unmarshal or marshal values partially.
|
||||
//
|
||||
// Relevant documentation:
|
||||
//
|
||||
// http://bsonspec.org/#/specification
|
||||
//
|
||||
type Raw struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RawD represents a BSON document containing raw unprocessed elements.
|
||||
// This low-level representation may be useful when lazily processing
|
||||
// documents of uncertain content, or when manipulating the raw content
|
||||
// documents in general.
|
||||
type RawD []RawDocElem
|
||||
|
||||
// RawDocElem elements of RawD type.
|
||||
type RawDocElem struct {
|
||||
Name string
|
||||
Value Raw
|
||||
}
|
||||
|
||||
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
|
||||
// long. MongoDB objects by default have such a property set in their "_id"
|
||||
// property.
|
||||
//
|
||||
// http://www.mongodb.org/display/DOCS/Object+Ids
|
||||
type ObjectId string
|
||||
|
||||
// ObjectIdHex returns an ObjectId from the provided hex representation.
|
||||
// Calling this function with an invalid hex representation will
|
||||
// cause a runtime panic. See the IsObjectIdHex function.
|
||||
func ObjectIdHex(s string) ObjectId {
|
||||
d, err := hex.DecodeString(s)
|
||||
if err != nil || len(d) != 12 {
|
||||
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
|
||||
}
|
||||
return ObjectId(d)
|
||||
}
|
||||
|
||||
// IsObjectIdHex returns whether s is a valid hex representation of
|
||||
// an ObjectId. See the ObjectIdHex function.
|
||||
func IsObjectIdHex(s string) bool {
|
||||
if len(s) != 24 {
|
||||
return false
|
||||
}
|
||||
_, err := hex.DecodeString(s)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// objectIdCounter is atomically incremented when generating a new ObjectId
|
||||
// using NewObjectId() function. It's used as a counter part of an id.
|
||||
var objectIdCounter = readRandomUint32()
|
||||
|
||||
// readRandomUint32 returns a random objectIdCounter.
|
||||
func readRandomUint32() uint32 {
|
||||
var b [4]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot read random object id: %v", err))
|
||||
}
|
||||
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
|
||||
}
|
||||
|
||||
// machineId stores machine id generated once and used in subsequent calls
|
||||
// to NewObjectId function.
|
||||
var machineId = readMachineId()
|
||||
var processId = os.Getpid()
|
||||
|
||||
// readMachineId generates and returns a machine id.
|
||||
// If this function fails to get the hostname it will cause a runtime error.
|
||||
func readMachineId() []byte {
|
||||
var sum [3]byte
|
||||
id := sum[:]
|
||||
hostname, err1 := os.Hostname()
|
||||
if err1 != nil {
|
||||
_, err2 := io.ReadFull(rand.Reader, id)
|
||||
if err2 != nil {
|
||||
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
|
||||
}
|
||||
return id
|
||||
}
|
||||
hw := md5.New()
|
||||
hw.Write([]byte(hostname))
|
||||
copy(id, hw.Sum(nil))
|
||||
return id
|
||||
}
|
||||
|
||||
// NewObjectId returns a new unique ObjectId.
|
||||
func NewObjectId() ObjectId {
|
||||
var b [12]byte
|
||||
// Timestamp, 4 bytes, big endian
|
||||
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
|
||||
// Machine, first 3 bytes of md5(hostname)
|
||||
b[4] = machineId[0]
|
||||
b[5] = machineId[1]
|
||||
b[6] = machineId[2]
|
||||
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||
b[7] = byte(processId >> 8)
|
||||
b[8] = byte(processId)
|
||||
// Increment, 3 bytes, big endian
|
||||
i := atomic.AddUint32(&objectIdCounter, 1)
|
||||
b[9] = byte(i >> 16)
|
||||
b[10] = byte(i >> 8)
|
||||
b[11] = byte(i)
|
||||
return ObjectId(b[:])
|
||||
}
|
||||
|
||||
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
|
||||
// with the provided number of seconds from epoch UTC, and all other parts
|
||||
// filled with zeroes. It's not safe to insert a document with an id generated
|
||||
// by this method, it is useful only for queries to find documents with ids
|
||||
// generated before or after the specified timestamp.
|
||||
func NewObjectIdWithTime(t time.Time) ObjectId {
|
||||
var b [12]byte
|
||||
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
|
||||
return ObjectId(string(b[:]))
|
||||
}
|
||||
|
||||
// String returns a hex string representation of the id.
|
||||
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
|
||||
func (id ObjectId) String() string {
|
||||
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
|
||||
}
|
||||
|
||||
// Hex returns a hex representation of the ObjectId.
|
||||
func (id ObjectId) Hex() string {
|
||||
return hex.EncodeToString([]byte(id))
|
||||
}
|
||||
|
||||
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
|
||||
func (id ObjectId) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
|
||||
}
|
||||
|
||||
var nullBytes = []byte("null")
|
||||
|
||||
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
|
||||
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
|
||||
var v struct {
|
||||
Id json.RawMessage `json:"$oid"`
|
||||
Func struct {
|
||||
Id json.RawMessage
|
||||
} `json:"$oidFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err == nil {
|
||||
if len(v.Id) > 0 {
|
||||
data = []byte(v.Id)
|
||||
} else {
|
||||
data = []byte(v.Func.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
|
||||
return fmt.Errorf("invalid ObjectId in JSON: %s", string(data))
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[1:25])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ObjectId in JSON: %s (%s)", string(data), err)
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
|
||||
func (id ObjectId) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%x", string(id))), nil
|
||||
}
|
||||
|
||||
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
|
||||
func (id *ObjectId) UnmarshalText(data []byte) error {
|
||||
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
|
||||
*id = ""
|
||||
return nil
|
||||
}
|
||||
if len(data) != 24 {
|
||||
return fmt.Errorf("invalid ObjectId: %s", data)
|
||||
}
|
||||
var buf [12]byte
|
||||
_, err := hex.Decode(buf[:], data[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
|
||||
}
|
||||
*id = ObjectId(string(buf[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
|
||||
func (id ObjectId) Valid() bool {
|
||||
return len(id) == 12
|
||||
}
|
||||
|
||||
// byteSlice returns byte slice of id from start to end.
|
||||
// Calling this function with an invalid id will cause a runtime panic.
|
||||
func (id ObjectId) byteSlice(start, end int) []byte {
|
||||
if len(id) != 12 {
|
||||
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
|
||||
}
|
||||
return []byte(string(id)[start:end])
|
||||
}
|
||||
|
||||
// Time returns the timestamp part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Time() time.Time {
|
||||
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
|
||||
return time.Unix(secs, 0)
|
||||
}
|
||||
|
||||
// Machine returns the 3-byte machine id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Machine() []byte {
|
||||
return id.byteSlice(4, 7)
|
||||
}
|
||||
|
||||
// Pid returns the process id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Pid() uint16 {
|
||||
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
|
||||
}
|
||||
|
||||
// Counter returns the incrementing value part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ObjectId) Counter() int32 {
|
||||
b := id.byteSlice(9, 12)
|
||||
// Counter is stored as big-endian 3-byte value
|
||||
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||
}
|
||||
|
||||
// The Symbol type is similar to a string and is used in languages with a
|
||||
// distinct symbol type.
|
||||
type Symbol string
|
||||
|
||||
// Now returns the current time with millisecond precision. MongoDB stores
|
||||
// timestamps with the same precision, so a Time returned from this method
|
||||
// will not change after a roundtrip to the database. That's the only reason
|
||||
// why this function exists. Using the time.Now function also works fine
|
||||
// otherwise.
|
||||
func Now() time.Time {
|
||||
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
|
||||
}
|
||||
|
||||
// MongoTimestamp is a special internal type used by MongoDB that for some
|
||||
// strange reason has its own datatype defined in BSON.
|
||||
type MongoTimestamp int64
|
||||
|
||||
// Time returns the time part of ts which is stored with second precision.
|
||||
func (ts MongoTimestamp) Time() time.Time {
|
||||
return time.Unix(int64(uint64(ts)>>32), 0)
|
||||
}
|
||||
|
||||
// Counter returns the counter part of ts.
|
||||
func (ts MongoTimestamp) Counter() uint32 {
|
||||
return uint32(ts)
|
||||
}
|
||||
|
||||
// NewMongoTimestamp creates a timestamp using the given
|
||||
// date `t` (with second precision) and counter `c` (unique for `t`).
|
||||
//
|
||||
// Returns an error if time `t` is not between 1970-01-01T00:00:00Z
|
||||
// and 2106-02-07T06:28:15Z (inclusive).
|
||||
//
|
||||
// Note that two MongoTimestamps should never have the same (time, counter) combination:
|
||||
// the caller must ensure the counter `c` is increased if creating multiple MongoTimestamp
|
||||
// values for the same time `t` (ignoring fractions of seconds).
|
||||
func NewMongoTimestamp(t time.Time, c uint32) (MongoTimestamp, error) {
|
||||
u := t.Unix()
|
||||
if u < 0 || u > math.MaxUint32 {
|
||||
return -1, errors.New("invalid value for time")
|
||||
}
|
||||
|
||||
i := int64(u<<32 | int64(c))
|
||||
|
||||
return MongoTimestamp(i), nil
|
||||
}
|
||||
|
||||
type orderKey int64
|
||||
|
||||
// MaxKey is a special value that compares higher than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MaxKey = orderKey(1<<63 - 1)
|
||||
|
||||
// MinKey is a special value that compares lower than all other possible BSON
|
||||
// values in a MongoDB database.
|
||||
var MinKey = orderKey(-1 << 63)
|
||||
|
||||
type undefined struct{}
|
||||
|
||||
// Undefined represents the undefined BSON value.
|
||||
var Undefined undefined
|
||||
|
||||
// Binary is a representation for non-standard binary values. Any kind should
|
||||
// work, but the following are known as of this writing:
|
||||
//
|
||||
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
|
||||
// 0x01 - Function (!?)
|
||||
// 0x02 - Obsolete generic.
|
||||
// 0x03 - UUID
|
||||
// 0x05 - MD5
|
||||
// 0x80 - User defined.
|
||||
//
|
||||
type Binary struct {
|
||||
Kind byte
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// RegEx represents a regular expression. The Options field may contain
|
||||
// individual characters defining the way in which the pattern should be
|
||||
// applied, and must be sorted. Valid options as of this writing are 'i' for
|
||||
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
|
||||
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
|
||||
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
|
||||
// unicode. The value of the Options parameter is not verified before being
|
||||
// marshaled into the BSON format.
|
||||
type RegEx struct {
|
||||
Pattern string
|
||||
Options string
|
||||
}
|
||||
|
||||
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
|
||||
// will be marshaled as a mapping from identifiers to values that may be
|
||||
// used when evaluating the provided Code.
|
||||
type JavaScript struct {
|
||||
Code string
|
||||
Scope interface{}
|
||||
}
|
||||
|
||||
// DBPointer refers to a document id in a namespace.
|
||||
//
|
||||
// This type is deprecated in the BSON specification and should not be used
|
||||
// except for backwards compatibility with ancient applications.
|
||||
type DBPointer struct {
|
||||
Namespace string
|
||||
Id ObjectId
|
||||
}
|
||||
|
||||
const initialBufferSize = 64
|
||||
|
||||
func handleErr(err *error) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
} else if _, ok := r.(externalPanic); ok {
|
||||
panic(r)
|
||||
} else if s, ok := r.(string); ok {
|
||||
*err = errors.New(s)
|
||||
} else if e, ok := r.(error); ok {
|
||||
*err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal serializes the in value, which may be a map or a struct value.
|
||||
// In the case of struct values, only exported fields will be serialized,
|
||||
// and the order of serialized fields will match that of the struct itself.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
//
|
||||
// minsize Marshal an int64 value as an int32, if that's feasible
|
||||
// while preserving the numeric value.
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the bson keys of other struct fields.
|
||||
//
|
||||
// Some examples:
|
||||
//
|
||||
// type T struct {
|
||||
// A bool
|
||||
// B int "myb"
|
||||
// C string "myc,omitempty"
|
||||
// D string `bson:",omitempty" json:"jsonkey"`
|
||||
// E int64 ",minsize"
|
||||
// F int64 "myf,omitempty,minsize"
|
||||
// }
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
return MarshalBuffer(in, make([]byte, 0, initialBufferSize))
|
||||
}
|
||||
|
||||
// MarshalBuffer behaves the same way as Marshal, except that instead of
|
||||
// allocating a new byte slice it tries to use the received byte slice and
|
||||
// only allocates more memory if necessary to fit the marshaled value.
|
||||
func MarshalBuffer(in interface{}, buf []byte) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := &encoder{buf}
|
||||
e.addDoc(reflect.ValueOf(in))
|
||||
return e.out, nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes data from in into the out value. The out value
|
||||
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
|
||||
// In the case of struct values, only exported fields will be deserialized.
|
||||
// The lowercased field name is used as the key for each exported field,
|
||||
// but this behavior may be changed using the respective field tag.
|
||||
// The tag may also contain flags to tweak the marshalling behavior for
|
||||
// the field. The tag formats accepted are:
|
||||
//
|
||||
// "[<key>][,<flag1>[,<flag2>]]"
|
||||
//
|
||||
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported during unmarshal (see the
|
||||
// Marshal method for other flags):
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map.
|
||||
// Inlined structs are handled as if its fields were part
|
||||
// of the outer struct. An inlined map causes keys that do
|
||||
// not match any other struct field to be inserted in the
|
||||
// map rather than being discarded as usual.
|
||||
//
|
||||
// The target field or element types of out may not necessarily match
|
||||
// the BSON values of the provided data. The following conversions are
|
||||
// made automatically:
|
||||
//
|
||||
// - Numeric types are converted if at least the integer part of the
|
||||
// value would be preserved correctly
|
||||
// - Bools are converted to numeric types as 1 or 0
|
||||
// - Numeric types are converted to bools as true if not 0 or false otherwise
|
||||
// - Binary and string BSON data is converted to a string, array or byte slice
|
||||
//
|
||||
// If the value would not fit the type and cannot be converted, it's
|
||||
// silently skipped.
|
||||
//
|
||||
// Pointer values are initialized when necessary.
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
if raw, ok := out.(*Raw); ok {
|
||||
raw.Kind = 3
|
||||
raw.Data = in
|
||||
return nil
|
||||
}
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(in)
|
||||
d.readDocTo(v)
|
||||
if d.i < len(d.in) {
|
||||
return errors.New("document is corrupted")
|
||||
}
|
||||
case reflect.Struct:
|
||||
return errors.New("unmarshal can't deal with struct values. Use a pointer")
|
||||
default:
|
||||
return errors.New("unmarshal needs a map or a pointer to a struct")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal deserializes raw into the out value. If the out value type
|
||||
// is not compatible with raw, a *bson.TypeError is returned.
|
||||
//
|
||||
// See the Unmarshal function documentation for more details on the
|
||||
// unmarshalling process.
|
||||
func (raw Raw) Unmarshal(out interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
v := reflect.ValueOf(out)
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
v = v.Elem()
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
d := newDecoder(raw.Data)
|
||||
good := d.readElemTo(v, raw.Kind)
|
||||
if !good {
|
||||
return &TypeError{v.Type(), raw.Kind}
|
||||
}
|
||||
case reflect.Struct:
|
||||
return errors.New("raw Unmarshal can't deal with struct values. Use a pointer")
|
||||
default:
|
||||
return errors.New("raw Unmarshal needs a map or a valid pointer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TypeError store details for type error occuring
|
||||
// during unmarshaling
|
||||
type TypeError struct {
|
||||
Type reflect.Type
|
||||
Kind byte
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
InlineMap int
|
||||
Zero reflect.Value
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
MinSize bool
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var structMapMutex sync.RWMutex
|
||||
|
||||
type externalPanic string
|
||||
|
||||
func (e externalPanic) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
structMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
structMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("bson")
|
||||
|
||||
// Fall-back to JSON struct tag, if feature flag is set.
|
||||
if tag == "" && useJSONTagFallback {
|
||||
tag = field.Tag.Get("json")
|
||||
}
|
||||
|
||||
// If there's no bson/json tag available.
|
||||
if tag == "" {
|
||||
// If there's no tag, and also no tag: value splits (i.e. no colon)
|
||||
// then assume the entire tag is the value
|
||||
if strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
}
|
||||
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "minsize":
|
||||
info.MinSize = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||
panic(externalPanic(msg))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Ptr:
|
||||
// allow only pointer to struct
|
||||
if kind := field.Type.Elem().Kind(); kind != reflect.Struct {
|
||||
return nil, errors.New("Option ,inline allows a pointer only to a struct, was given pointer to " + kind.String())
|
||||
}
|
||||
|
||||
field.Type = field.Type.Elem()
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
panic("Option ,inline needs a struct value or a pointer to a struct or map field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
sinfo = &structInfo{
|
||||
fieldsMap,
|
||||
fieldsList,
|
||||
inlineMap,
|
||||
reflect.New(st).Elem(),
|
||||
}
|
||||
structMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
structMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
|||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/globalsign/mgo/internal/json"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(name + ": ")
|
||||
|
||||
var g Generator
|
||||
|
||||
fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name)
|
||||
|
||||
src := g.generate()
|
||||
|
||||
err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("writing output: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generator holds the state of the analysis. Primarily used to buffer
|
||||
// the output for format.Source.
|
||||
type Generator struct {
|
||||
bytes.Buffer // Accumulated output.
|
||||
}
|
||||
|
||||
// format returns the gofmt-ed contents of the Generator's buffer.
|
||||
func (g *Generator) format() []byte {
|
||||
src, err := format.Source(g.Bytes())
|
||||
if err != nil {
|
||||
// Should never happen, but can arise when developing this code.
|
||||
// The user can compile the output to see the error.
|
||||
log.Printf("warning: internal error: invalid Go generated: %s", err)
|
||||
log.Printf("warning: compile the package to analyze the error")
|
||||
return g.Bytes()
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS
|
||||
|
||||
const name = "bson_corpus_spec_test_generator"
|
||||
|
||||
func (g *Generator) generate() []byte {
|
||||
|
||||
testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json")
|
||||
if err != nil {
|
||||
log.Fatalf("error reading bson-corpus files: %s", err)
|
||||
}
|
||||
|
||||
tests, err := g.loadTests(testFiles)
|
||||
if err != nil {
|
||||
log.Fatalf("error loading tests: %s", err)
|
||||
}
|
||||
|
||||
tmpl, err := g.getTemplate()
|
||||
if err != nil {
|
||||
log.Fatalf("error loading template: %s", err)
|
||||
}
|
||||
|
||||
tmpl.Execute(&g.Buffer, tests)
|
||||
|
||||
return g.format()
|
||||
}
|
||||
|
||||
func (g *Generator) loadTests(filenames []string) ([]*testDef, error) {
|
||||
var tests []*testDef
|
||||
for _, filename := range filenames {
|
||||
test, err := g.loadTest(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tests = append(tests, test)
|
||||
}
|
||||
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
func (g *Generator) loadTest(filename string) (*testDef, error) {
|
||||
content, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var testDef testDef
|
||||
err = json.Unmarshal(content, &testDef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make(map[string]struct{})
|
||||
|
||||
for i := len(testDef.Valid) - 1; i >= 0; i-- {
|
||||
if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" {
|
||||
testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description)
|
||||
nameIdx := name
|
||||
j := 1
|
||||
for {
|
||||
if _, ok := names[nameIdx]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||
}
|
||||
|
||||
names[nameIdx] = struct{}{}
|
||||
|
||||
testDef.Valid[i].TestDef = &testDef
|
||||
testDef.Valid[i].Name = nameIdx
|
||||
testDef.Valid[i].StructTest = testDef.TestKey != "" &&
|
||||
(testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) &&
|
||||
!testDef.Deprecated
|
||||
}
|
||||
|
||||
for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- {
|
||||
if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") {
|
||||
testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description)
|
||||
nameIdx := name
|
||||
j := 1
|
||||
for {
|
||||
if _, ok := names[nameIdx]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||
}
|
||||
names[nameIdx] = struct{}{}
|
||||
|
||||
testDef.DecodeErrors[i].Name = nameIdx
|
||||
}
|
||||
|
||||
return &testDef, nil
|
||||
}
|
||||
|
||||
func (g *Generator) getTemplate() (*template.Template, error) {
|
||||
content := `package bson_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"github.com/globalsign/mgo/bson"
|
||||
)
|
||||
|
||||
func testValid(c *C, in []byte, expected []byte, result interface{}) {
|
||||
err := bson.Unmarshal(in, result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
out, err := bson.Marshal(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out))
|
||||
}
|
||||
|
||||
func testDecodeSkip(c *C, in []byte) {
|
||||
err := bson.Unmarshal(in, &struct{}{})
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func testDecodeError(c *C, in []byte, result interface{}) {
|
||||
err := bson.Unmarshal(in, result)
|
||||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
{{range .}}
|
||||
{{range .Valid}}
|
||||
func (s *S) Test{{.Name}}(c *C) {
|
||||
b, err := hex.DecodeString("{{.Bson}}")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
{{if .CanonicalBson}}
|
||||
cb, err := hex.DecodeString("{{.CanonicalBson}}")
|
||||
c.Assert(err, IsNil)
|
||||
{{else}}
|
||||
cb := b
|
||||
{{end}}
|
||||
|
||||
var resultD bson.D
|
||||
testValid(c, b, cb, &resultD)
|
||||
{{if .StructTest}}var resultS struct {
|
||||
Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + `
|
||||
}
|
||||
testValid(c, b, cb, &resultS){{end}}
|
||||
|
||||
testDecodeSkip(c, b)
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .DecodeErrors}}
|
||||
func (s *S) Test{{.Name}}(c *C) {
|
||||
b, err := hex.DecodeString("{{.Bson}}")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
var resultD bson.D
|
||||
testDecodeError(c, b, &resultD)
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
tmpl, err := template.New("").Parse(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tmpl, nil
|
||||
}
|
||||
|
||||
func cleanupFuncName(name string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}, name)
|
||||
}
|
||||
|
||||
type testDef struct {
|
||||
Description string `json:"description"`
|
||||
BsonType string `json:"bson_type"`
|
||||
TestKey string `json:"test_key"`
|
||||
Valid []*valid `json:"valid"`
|
||||
DecodeErrors []*decodeError `json:"decodeErrors"`
|
||||
Deprecated bool `json:"deprecated"`
|
||||
}
|
||||
|
||||
func (t *testDef) GoType() string {
|
||||
switch t.BsonType {
|
||||
case "0x01":
|
||||
return "float64"
|
||||
case "0x02":
|
||||
return "string"
|
||||
case "0x03":
|
||||
return "bson.D"
|
||||
case "0x04":
|
||||
return "[]interface{}"
|
||||
case "0x05":
|
||||
return "[]byte"
|
||||
case "0x07":
|
||||
return "bson.ObjectId"
|
||||
case "0x08":
|
||||
return "bool"
|
||||
case "0x09":
|
||||
return "time.Time"
|
||||
case "0x0E":
|
||||
return "string"
|
||||
case "0x10":
|
||||
return "int32"
|
||||
case "0x12":
|
||||
return "int64"
|
||||
case "0x13":
|
||||
return "bson.Decimal"
|
||||
default:
|
||||
return "interface{}"
|
||||
}
|
||||
}
|
||||
|
||||
type valid struct {
|
||||
Description string `json:"description"`
|
||||
Bson string `json:"bson"`
|
||||
CanonicalBson string `json:"canonical_bson"`
|
||||
|
||||
Name string
|
||||
StructTest bool
|
||||
TestDef *testDef
|
||||
}
|
||||
|
||||
type decodeError struct {
|
||||
Description string `json:"description"`
|
||||
Bson string `json:"bson"`
|
||||
|
||||
Name string
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package bson
|
||||
|
||||
// Current state of the JSON tag fallback option.
|
||||
var useJSONTagFallback = false
|
||||
var useRespectNilValues = false
|
||||
|
||||
// SetJSONTagFallback enables or disables the JSON-tag fallback for structure tagging. When this is enabled, structures
|
||||
// without BSON tags on a field will fall-back to using the JSON tag (if present).
|
||||
func SetJSONTagFallback(state bool) {
|
||||
useJSONTagFallback = state
|
||||
}
|
||||
|
||||
// JSONTagFallbackState returns the current status of the JSON tag fallback compatability option. See SetJSONTagFallback
|
||||
// for more information.
|
||||
func JSONTagFallbackState() bool {
|
||||
return useJSONTagFallback
|
||||
}
|
||||
|
||||
// SetRespectNilValues enables or disables serializing nil slices or maps to `null` values.
|
||||
// In other words it enables `encoding/json` compatible behaviour.
|
||||
func SetRespectNilValues(state bool) {
|
||||
useRespectNilValues = state
|
||||
}
|
||||
|
||||
// RespectNilValuesState returns the current status of the JSON nil slices and maps fallback compatibility option.
|
||||
// See SetRespectNilValues for more information.
|
||||
func RespectNilValuesState() bool {
|
||||
return useRespectNilValues
|
||||
}
|
|
@ -0,0 +1,312 @@
|
|||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Decimal128 holds decimal128 BSON values.
|
||||
type Decimal128 struct {
|
||||
h, l uint64
|
||||
}
|
||||
|
||||
func (d Decimal128) String() string {
|
||||
var pos int // positive sign
|
||||
var e int // exponent
|
||||
var h, l uint64 // significand high/low
|
||||
|
||||
if d.h>>63&1 == 0 {
|
||||
pos = 1
|
||||
}
|
||||
|
||||
switch d.h >> 58 & (1<<5 - 1) {
|
||||
case 0x1F:
|
||||
return "NaN"
|
||||
case 0x1E:
|
||||
return "-Inf"[pos:]
|
||||
}
|
||||
|
||||
l = d.l
|
||||
if d.h>>61&3 == 3 {
|
||||
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
|
||||
// Implicit 0b100 prefix in significand.
|
||||
e = int(d.h>>47&(1<<14-1)) - 6176
|
||||
//h = 4<<47 | d.h&(1<<47-1)
|
||||
// Spec says all of these values are out of range.
|
||||
h, l = 0, 0
|
||||
} else {
|
||||
// Bits: 1*sign 14*exponent 113*significand
|
||||
e = int(d.h>>49&(1<<14-1)) - 6176
|
||||
h = d.h & (1<<49 - 1)
|
||||
}
|
||||
|
||||
// Would be handled by the logic below, but that's trivial and common.
|
||||
if h == 0 && l == 0 && e == 0 {
|
||||
return "-0"[pos:]
|
||||
}
|
||||
|
||||
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
|
||||
var last = len(repr)
|
||||
var i = len(repr)
|
||||
var dot = len(repr) + e
|
||||
var rem uint32
|
||||
Loop:
|
||||
for d9 := 0; d9 < 5; d9++ {
|
||||
h, l, rem = divmod(h, l, 1e9)
|
||||
for d1 := 0; d1 < 9; d1++ {
|
||||
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
|
||||
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
|
||||
e += len(repr) - i
|
||||
i--
|
||||
repr[i] = '.'
|
||||
last = i - 1
|
||||
dot = len(repr) // Unmark.
|
||||
}
|
||||
c := '0' + byte(rem%10)
|
||||
rem /= 10
|
||||
i--
|
||||
repr[i] = c
|
||||
// Handle "0E+3", "1E+3", etc.
|
||||
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
|
||||
last = i
|
||||
break Loop
|
||||
}
|
||||
if c != '0' {
|
||||
last = i
|
||||
}
|
||||
// Break early. Works without it, but why.
|
||||
if dot > i && l == 0 && h == 0 && rem == 0 {
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
}
|
||||
repr[last-1] = '-'
|
||||
last--
|
||||
|
||||
if e > 0 {
|
||||
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
|
||||
}
|
||||
if e < 0 {
|
||||
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
|
||||
}
|
||||
return string(repr[last+pos:])
|
||||
}
|
||||
|
||||
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
|
||||
div64 := uint64(div)
|
||||
a := h >> 32
|
||||
aq := a / div64
|
||||
ar := a % div64
|
||||
b := ar<<32 + h&(1<<32-1)
|
||||
bq := b / div64
|
||||
br := b % div64
|
||||
c := br<<32 + l>>32
|
||||
cq := c / div64
|
||||
cr := c % div64
|
||||
d := cr<<32 + l&(1<<32-1)
|
||||
dq := d / div64
|
||||
dr := d % div64
|
||||
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
|
||||
}
|
||||
|
||||
var dNaN = Decimal128{0x1F << 58, 0}
|
||||
var dPosInf = Decimal128{0x1E << 58, 0}
|
||||
var dNegInf = Decimal128{0x3E << 58, 0}
|
||||
|
||||
func dErr(s string) (Decimal128, error) {
|
||||
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
|
||||
}
|
||||
|
||||
// ParseDecimal128 parse a string and return the corresponding value as
|
||||
// a decimal128
|
||||
func ParseDecimal128(s string) (Decimal128, error) {
|
||||
orig := s
|
||||
if s == "" {
|
||||
return dErr(orig)
|
||||
}
|
||||
neg := s[0] == '-'
|
||||
if neg || s[0] == '+' {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
|
||||
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
|
||||
return dNaN, nil
|
||||
}
|
||||
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
|
||||
if neg {
|
||||
return dNegInf, nil
|
||||
}
|
||||
return dPosInf, nil
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
var h, l uint64
|
||||
var e int
|
||||
|
||||
var add, ovr uint32
|
||||
var mul uint32 = 1
|
||||
var dot = -1
|
||||
var digits = 0
|
||||
var i = 0
|
||||
for i < len(s) {
|
||||
c := s[i]
|
||||
if mul == 1e9 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
mul, add = 1, 0
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
i++
|
||||
if c > '0' || digits > 0 {
|
||||
digits++
|
||||
}
|
||||
if digits > 34 {
|
||||
if c == '0' {
|
||||
// Exact rounding.
|
||||
e++
|
||||
continue
|
||||
}
|
||||
return dErr(orig)
|
||||
}
|
||||
mul *= 10
|
||||
add *= 10
|
||||
add += uint32(c - '0')
|
||||
continue
|
||||
}
|
||||
if c == '.' {
|
||||
i++
|
||||
if dot >= 0 || i == 1 && len(s) == 1 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if i == len(s) {
|
||||
break
|
||||
}
|
||||
if s[i] < '0' || s[i] > '9' || e > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
dot = i
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if i == 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
if mul > 1 {
|
||||
h, l, ovr = muladd(h, l, mul, add)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if dot >= 0 {
|
||||
e += dot - i
|
||||
}
|
||||
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
|
||||
i++
|
||||
eneg := s[i] == '-'
|
||||
if eneg || s[i] == '+' {
|
||||
i++
|
||||
if i == len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
n := 0
|
||||
for i < len(s) && n < 1e4 {
|
||||
c := s[i]
|
||||
i++
|
||||
if c < '0' || c > '9' {
|
||||
return dErr(orig)
|
||||
}
|
||||
n *= 10
|
||||
n += int(c - '0')
|
||||
}
|
||||
if eneg {
|
||||
n = -n
|
||||
}
|
||||
e += n
|
||||
for e < -6176 {
|
||||
// Subnormal.
|
||||
var div uint32 = 1
|
||||
for div < 1e9 && e < -6176 {
|
||||
div *= 10
|
||||
e++
|
||||
}
|
||||
var rem uint32
|
||||
h, l, rem = divmod(h, l, div)
|
||||
if rem > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
for e > 6111 {
|
||||
// Clamped.
|
||||
var mul uint32 = 1
|
||||
for mul < 1e9 && e > 6111 {
|
||||
mul *= 10
|
||||
e--
|
||||
}
|
||||
h, l, ovr = muladd(h, l, mul, 0)
|
||||
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
if e < -6176 || e > 6111 {
|
||||
return dErr(orig)
|
||||
}
|
||||
}
|
||||
|
||||
if i < len(s) {
|
||||
return dErr(orig)
|
||||
}
|
||||
|
||||
h |= uint64(e+6176) & uint64(1<<14-1) << 49
|
||||
if neg {
|
||||
h |= 1 << 63
|
||||
}
|
||||
return Decimal128{h, l}, nil
|
||||
}
|
||||
|
||||
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
|
||||
mul64 := uint64(mul)
|
||||
a := mul64 * (l & (1<<32 - 1))
|
||||
b := a>>32 + mul64*(l>>32)
|
||||
c := b>>32 + mul64*(h&(1<<32-1))
|
||||
d := c>>32 + mul64*(h>>32)
|
||||
|
||||
a = a&(1<<32-1) + uint64(add)
|
||||
b = b&(1<<32-1) + a>>32
|
||||
c = c&(1<<32-1) + b>>32
|
||||
d = d&(1<<32-1) + c>>32
|
||||
|
||||
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,645 @@
|
|||
// BSON library for Go
|
||||
//
|
||||
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// gobson - BSON library for Go.
|
||||
|
||||
package bson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Some internal infrastructure.
|
||||
|
||||
var (
|
||||
typeBinary = reflect.TypeOf(Binary{})
|
||||
typeObjectId = reflect.TypeOf(ObjectId(""))
|
||||
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
|
||||
typeSymbol = reflect.TypeOf(Symbol(""))
|
||||
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
|
||||
typeOrderKey = reflect.TypeOf(MinKey)
|
||||
typeDocElem = reflect.TypeOf(DocElem{})
|
||||
typeRawDocElem = reflect.TypeOf(RawDocElem{})
|
||||
typeRaw = reflect.TypeOf(Raw{})
|
||||
typeRawPtr = reflect.PtrTo(reflect.TypeOf(Raw{}))
|
||||
typeURL = reflect.TypeOf(url.URL{})
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeString = reflect.TypeOf("")
|
||||
typeJSONNumber = reflect.TypeOf(json.Number(""))
|
||||
typeTimeDuration = reflect.TypeOf(time.Duration(0))
|
||||
)
|
||||
|
||||
var (
|
||||
// spec for []uint8 or []byte encoding
|
||||
arrayOps = map[string]bool{
|
||||
"$in": true,
|
||||
"$nin": true,
|
||||
"$all": true,
|
||||
}
|
||||
)
|
||||
|
||||
const itoaCacheSize = 32
|
||||
|
||||
const (
|
||||
getterUnknown = iota
|
||||
getterNone
|
||||
getterTypeVal
|
||||
getterTypePtr
|
||||
getterAddr
|
||||
)
|
||||
|
||||
var itoaCache []string
|
||||
|
||||
var getterStyles map[reflect.Type]int
|
||||
var getterIface reflect.Type
|
||||
var getterMutex sync.RWMutex
|
||||
|
||||
func init() {
|
||||
itoaCache = make([]string, itoaCacheSize)
|
||||
for i := 0; i != itoaCacheSize; i++ {
|
||||
itoaCache[i] = strconv.Itoa(i)
|
||||
}
|
||||
var iface Getter
|
||||
getterIface = reflect.TypeOf(&iface).Elem()
|
||||
getterStyles = make(map[reflect.Type]int)
|
||||
}
|
||||
|
||||
func itoa(i int) string {
|
||||
if i < itoaCacheSize {
|
||||
return itoaCache[i]
|
||||
}
|
||||
return strconv.Itoa(i)
|
||||
}
|
||||
|
||||
func getterStyle(outt reflect.Type) int {
|
||||
getterMutex.RLock()
|
||||
style := getterStyles[outt]
|
||||
getterMutex.RUnlock()
|
||||
if style != getterUnknown {
|
||||
return style
|
||||
}
|
||||
|
||||
getterMutex.Lock()
|
||||
defer getterMutex.Unlock()
|
||||
if outt.Implements(getterIface) {
|
||||
vt := outt
|
||||
for vt.Kind() == reflect.Ptr {
|
||||
vt = vt.Elem()
|
||||
}
|
||||
if vt.Implements(getterIface) {
|
||||
style = getterTypeVal
|
||||
} else {
|
||||
style = getterTypePtr
|
||||
}
|
||||
} else if reflect.PtrTo(outt).Implements(getterIface) {
|
||||
style = getterAddr
|
||||
} else {
|
||||
style = getterNone
|
||||
}
|
||||
getterStyles[outt] = style
|
||||
return style
|
||||
}
|
||||
|
||||
func getGetter(outt reflect.Type, out reflect.Value) Getter {
|
||||
style := getterStyle(outt)
|
||||
if style == getterNone {
|
||||
return nil
|
||||
}
|
||||
if style == getterAddr {
|
||||
if !out.CanAddr() {
|
||||
return nil
|
||||
}
|
||||
return out.Addr().Interface().(Getter)
|
||||
}
|
||||
if style == getterTypeVal && out.Kind() == reflect.Ptr && out.IsNil() {
|
||||
return nil
|
||||
}
|
||||
return out.Interface().(Getter)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of the document value itself.
|
||||
|
||||
type encoder struct {
|
||||
out []byte
|
||||
}
|
||||
|
||||
func (e *encoder) addDoc(v reflect.Value) {
|
||||
for {
|
||||
if vi, ok := v.Interface().(Getter); ok {
|
||||
getv, err := vi.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
v = reflect.ValueOf(getv)
|
||||
continue
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if v.Type() == typeRaw {
|
||||
raw := v.Interface().(Raw)
|
||||
if raw.Kind != 0x03 && raw.Kind != 0x00 {
|
||||
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
|
||||
}
|
||||
if len(raw.Data) == 0 {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addBytes(raw.Data...)
|
||||
return
|
||||
}
|
||||
|
||||
start := e.reserveInt32()
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
e.addMap(v)
|
||||
case reflect.Struct:
|
||||
e.addStruct(v)
|
||||
case reflect.Array, reflect.Slice:
|
||||
e.addSlice(v)
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " as a BSON document")
|
||||
}
|
||||
|
||||
e.addBytes(0)
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
func (e *encoder) addMap(v reflect.Value) {
|
||||
for _, k := range v.MapKeys() {
|
||||
e.addElem(fmt.Sprint(k), v.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) addStruct(v reflect.Value) {
|
||||
sinfo, err := getStructInfo(v.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var value reflect.Value
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := v.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
for _, k := range m.MapKeys() {
|
||||
ks := k.String()
|
||||
if _, found := sinfo.FieldsMap[ks]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
|
||||
}
|
||||
e.addElem(ks, m.MapIndex(k), false)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, info := range sinfo.FieldsList {
|
||||
if info.Inline == nil {
|
||||
value = v.Field(info.Num)
|
||||
} else {
|
||||
// as pointers to struct are allowed here,
|
||||
// there is no guarantee that pointer won't be nil.
|
||||
//
|
||||
// It is expected allowed behaviour
|
||||
// so info.Inline MAY consist index to a nil pointer
|
||||
// and that is why we safely call v.FieldByIndex and just continue on panic
|
||||
field, errField := safeFieldByIndex(v, info.Inline)
|
||||
if errField != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
value = field
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
if useRespectNilValues &&
|
||||
(value.Kind() == reflect.Slice || value.Kind() == reflect.Map) &&
|
||||
value.IsNil() {
|
||||
e.addElem(info.Key, reflect.ValueOf(nil), info.MinSize)
|
||||
continue
|
||||
}
|
||||
e.addElem(info.Key, value, info.MinSize)
|
||||
}
|
||||
}
|
||||
|
||||
func safeFieldByIndex(v reflect.Value, index []int) (result reflect.Value, err error) {
|
||||
defer func() {
|
||||
if recovered := recover(); recovered != nil {
|
||||
switch r := recovered.(type) {
|
||||
case string:
|
||||
err = fmt.Errorf("%s", r)
|
||||
case error:
|
||||
err = r
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
result = v.FieldByIndex(index)
|
||||
return
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
if vt == typeTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *encoder) addSlice(v reflect.Value) {
|
||||
vi := v.Interface()
|
||||
if d, ok := vi.(D); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if d, ok := vi.(RawD); ok {
|
||||
for _, elem := range d {
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
l := v.Len()
|
||||
et := v.Type().Elem()
|
||||
if et == typeDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(DocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
if et == typeRawDocElem {
|
||||
for i := 0; i < l; i++ {
|
||||
elem := v.Index(i).Interface().(RawDocElem)
|
||||
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||
}
|
||||
return
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
e.addElem(itoa(i), v.Index(i), false)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of elements in a document.
|
||||
|
||||
func (e *encoder) addElemName(kind byte, name string) {
|
||||
e.addBytes(kind)
|
||||
e.addBytes([]byte(name)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
|
||||
|
||||
if !v.IsValid() {
|
||||
e.addElemName(0x0A, name)
|
||||
return
|
||||
}
|
||||
|
||||
if getter := getGetter(v.Type(), v); getter != nil {
|
||||
getv, err := getter.GetBSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.addElem(name, reflect.ValueOf(getv), minSize)
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
|
||||
case reflect.Interface:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.Ptr:
|
||||
e.addElem(name, v.Elem(), minSize)
|
||||
|
||||
case reflect.String:
|
||||
s := v.String()
|
||||
switch v.Type() {
|
||||
case typeObjectId:
|
||||
if len(s) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s)) + ")")
|
||||
}
|
||||
e.addElemName(0x07, name)
|
||||
e.addBytes([]byte(s)...)
|
||||
case typeSymbol:
|
||||
e.addElemName(0x0E, name)
|
||||
e.addStr(s)
|
||||
case typeJSONNumber:
|
||||
n := v.Interface().(json.Number)
|
||||
if i, err := n.Int64(); err == nil {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(i)
|
||||
} else if f, err := n.Float64(); err == nil {
|
||||
e.addElemName(0x01, name)
|
||||
e.addFloat64(f)
|
||||
} else {
|
||||
panic("failed to convert json.Number to a number: " + s)
|
||||
}
|
||||
default:
|
||||
e.addElemName(0x02, name)
|
||||
e.addStr(s)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.addElemName(0x01, name)
|
||||
e.addFloat64(v.Float())
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
u := v.Uint()
|
||||
if int64(u) < 0 {
|
||||
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
|
||||
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
|
||||
e.addElemName(0x10, name)
|
||||
e.addInt32(int32(u))
|
||||
} else {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(int64(u))
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch v.Type() {
|
||||
case typeMongoTimestamp:
|
||||
e.addElemName(0x11, name)
|
||||
e.addInt64(v.Int())
|
||||
|
||||
case typeOrderKey:
|
||||
if v.Int() == int64(MaxKey) {
|
||||
e.addElemName(0x7F, name)
|
||||
} else {
|
||||
e.addElemName(0xFF, name)
|
||||
}
|
||||
case typeTimeDuration:
|
||||
// Stored as int64
|
||||
e.addElemName(0x12, name)
|
||||
|
||||
e.addInt64(int64(v.Int() / 1e6))
|
||||
default:
|
||||
i := v.Int()
|
||||
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
// It fits into an int32, encode as such.
|
||||
e.addElemName(0x10, name)
|
||||
e.addInt32(int32(i))
|
||||
} else {
|
||||
e.addElemName(0x12, name)
|
||||
e.addInt64(i)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
e.addElemName(0x08, name)
|
||||
if v.Bool() {
|
||||
e.addBytes(1)
|
||||
} else {
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
|
||||
case reflect.Slice:
|
||||
vt := v.Type()
|
||||
et := vt.Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
if arrayOps[name] {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName(0x05, name)
|
||||
e.addBinary(0x00, v.Bytes())
|
||||
}
|
||||
} else if et == typeDocElem || et == typeRawDocElem {
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Array:
|
||||
et := v.Type().Elem()
|
||||
if et.Kind() == reflect.Uint8 {
|
||||
if arrayOps[name] {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
} else {
|
||||
e.addElemName(0x05, name)
|
||||
if v.CanAddr() {
|
||||
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
|
||||
} else {
|
||||
n := v.Len()
|
||||
e.addInt32(int32(n))
|
||||
e.addBytes(0x00)
|
||||
for i := 0; i < n; i++ {
|
||||
el := v.Index(i)
|
||||
e.addBytes(byte(el.Uint()))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
e.addElemName(0x04, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
switch s := v.Interface().(type) {
|
||||
|
||||
case Raw:
|
||||
kind := s.Kind
|
||||
if kind == 0x00 {
|
||||
kind = 0x03
|
||||
}
|
||||
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
|
||||
panic("Attempted to marshal empty Raw document")
|
||||
}
|
||||
e.addElemName(kind, name)
|
||||
e.addBytes(s.Data...)
|
||||
|
||||
case Binary:
|
||||
e.addElemName(0x05, name)
|
||||
e.addBinary(s.Kind, s.Data)
|
||||
|
||||
case Decimal128:
|
||||
e.addElemName(0x13, name)
|
||||
e.addInt64(int64(s.l))
|
||||
e.addInt64(int64(s.h))
|
||||
|
||||
case DBPointer:
|
||||
e.addElemName(0x0C, name)
|
||||
e.addStr(s.Namespace)
|
||||
if len(s.Id) != 12 {
|
||||
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||
strconv.Itoa(len(s.Id)) + ")")
|
||||
}
|
||||
e.addBytes([]byte(s.Id)...)
|
||||
|
||||
case RegEx:
|
||||
e.addElemName(0x0B, name)
|
||||
e.addCStr(s.Pattern)
|
||||
options := runes(s.Options)
|
||||
sort.Sort(options)
|
||||
e.addCStr(string(options))
|
||||
|
||||
case JavaScript:
|
||||
if s.Scope == nil {
|
||||
e.addElemName(0x0D, name)
|
||||
e.addStr(s.Code)
|
||||
} else {
|
||||
e.addElemName(0x0F, name)
|
||||
start := e.reserveInt32()
|
||||
e.addStr(s.Code)
|
||||
e.addDoc(reflect.ValueOf(s.Scope))
|
||||
e.setInt32(start, int32(len(e.out)-start))
|
||||
}
|
||||
|
||||
case time.Time:
|
||||
// MongoDB handles timestamps as milliseconds.
|
||||
e.addElemName(0x09, name)
|
||||
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
|
||||
|
||||
case url.URL:
|
||||
e.addElemName(0x02, name)
|
||||
e.addStr(s.String())
|
||||
|
||||
case undefined:
|
||||
e.addElemName(0x06, name)
|
||||
|
||||
default:
|
||||
e.addElemName(0x03, name)
|
||||
e.addDoc(v)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("Can't marshal " + v.Type().String() + " in a BSON document")
|
||||
}
|
||||
}
|
||||
|
||||
// -------------
|
||||
// Helper method for sorting regex options
|
||||
type runes []rune
|
||||
|
||||
func (a runes) Len() int { return len(a) }
|
||||
func (a runes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a runes) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Marshaling of base types.
|
||||
|
||||
func (e *encoder) addBinary(subtype byte, v []byte) {
|
||||
if subtype == 0x02 {
|
||||
// Wonder how that brilliant idea came to life. Obsolete, luckily.
|
||||
e.addInt32(int32(len(v) + 4))
|
||||
e.addBytes(subtype)
|
||||
e.addInt32(int32(len(v)))
|
||||
} else {
|
||||
e.addInt32(int32(len(v)))
|
||||
e.addBytes(subtype)
|
||||
}
|
||||
e.addBytes(v...)
|
||||
}
|
||||
|
||||
func (e *encoder) addStr(v string) {
|
||||
e.addInt32(int32(len(v) + 1))
|
||||
e.addCStr(v)
|
||||
}
|
||||
|
||||
func (e *encoder) addCStr(v string) {
|
||||
e.addBytes([]byte(v)...)
|
||||
e.addBytes(0)
|
||||
}
|
||||
|
||||
func (e *encoder) reserveInt32() (pos int) {
|
||||
pos = len(e.out)
|
||||
e.addBytes(0, 0, 0, 0)
|
||||
return pos
|
||||
}
|
||||
|
||||
func (e *encoder) setInt32(pos int, v int32) {
|
||||
e.out[pos+0] = byte(v)
|
||||
e.out[pos+1] = byte(v >> 8)
|
||||
e.out[pos+2] = byte(v >> 16)
|
||||
e.out[pos+3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func (e *encoder) addInt32(v int32) {
|
||||
u := uint32(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
|
||||
}
|
||||
|
||||
func (e *encoder) addInt64(v int64) {
|
||||
u := uint64(v)
|
||||
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
|
||||
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
|
||||
}
|
||||
|
||||
func (e *encoder) addFloat64(v float64) {
|
||||
e.addInt64(int64(math.Float64bits(v)))
|
||||
}
|
||||
|
||||
func (e *encoder) addBytes(v ...byte) {
|
||||
e.out = append(e.out, v...)
|
||||
}
|
|
@ -0,0 +1,384 @@
|
|||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/globalsign/mgo/internal/json"
|
||||
)
|
||||
|
||||
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
|
||||
// syntax as defined in BSON's extended JSON specification.
|
||||
func UnmarshalJSON(data []byte, value interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||
d.Extend(&jsonExt)
|
||||
return d.Decode(value)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a JSON value that may hold non-standard
|
||||
// syntax as defined in BSON's extended JSON specification.
|
||||
func MarshalJSON(value interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
e := json.NewEncoder(&buf)
|
||||
e.Extend(&jsonExt)
|
||||
err := e.Encode(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// jdec is used internally by the JSON decoding functions
|
||||
// so they may unmarshal functions without getting into endless
|
||||
// recursion due to keyed objects.
|
||||
func jdec(data []byte, value interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||
d.Extend(&funcExt)
|
||||
return d.Decode(value)
|
||||
}
|
||||
|
||||
var jsonExt json.Extension
|
||||
var funcExt json.Extension
|
||||
|
||||
// TODO
|
||||
// - Shell regular expressions ("/regexp/opts")
|
||||
|
||||
func init() {
|
||||
jsonExt.DecodeUnquotedKeys(true)
|
||||
jsonExt.DecodeTrailingCommas(true)
|
||||
|
||||
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
|
||||
jsonExt.DecodeKeyed("$binary", jdecBinary)
|
||||
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
|
||||
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
|
||||
jsonExt.EncodeType(Binary{}, jencBinaryType)
|
||||
|
||||
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
|
||||
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
|
||||
jsonExt.DecodeKeyed("$date", jdecDate)
|
||||
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
|
||||
jsonExt.EncodeType(time.Time{}, jencDate)
|
||||
|
||||
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
|
||||
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
|
||||
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
|
||||
|
||||
funcExt.DecodeConst("undefined", Undefined)
|
||||
|
||||
jsonExt.DecodeKeyed("$regex", jdecRegEx)
|
||||
jsonExt.EncodeType(RegEx{}, jencRegEx)
|
||||
|
||||
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
|
||||
jsonExt.DecodeKeyed("$oid", jdecObjectId)
|
||||
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
|
||||
jsonExt.EncodeType(ObjectId(""), jencObjectId)
|
||||
|
||||
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
|
||||
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
|
||||
|
||||
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
|
||||
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
|
||||
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
|
||||
jsonExt.EncodeType(int64(0), jencNumberLong)
|
||||
jsonExt.EncodeType(int(0), jencInt)
|
||||
|
||||
funcExt.DecodeConst("MinKey", MinKey)
|
||||
funcExt.DecodeConst("MaxKey", MaxKey)
|
||||
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
|
||||
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
|
||||
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
|
||||
|
||||
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
|
||||
jsonExt.EncodeType(Undefined, jencUndefined)
|
||||
|
||||
jsonExt.Extend(&funcExt)
|
||||
}
|
||||
|
||||
func fbytes(format string, args ...interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, format, args...)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func jdecBinary(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Binary []byte `json:"$binary"`
|
||||
Type string `json:"$type"`
|
||||
Func struct {
|
||||
Binary []byte `json:"$binary"`
|
||||
Type int64 `json:"$type"`
|
||||
} `json:"$binaryFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var binData []byte
|
||||
var binKind int64
|
||||
if v.Type == "" && v.Binary == nil {
|
||||
binData = v.Func.Binary
|
||||
binKind = v.Func.Type
|
||||
} else if v.Type == "" {
|
||||
return v.Binary, nil
|
||||
} else {
|
||||
binData = v.Binary
|
||||
binKind, err = strconv.ParseInt(v.Type, 0, 64)
|
||||
if err != nil {
|
||||
binKind = -1
|
||||
}
|
||||
}
|
||||
|
||||
if binKind == 0 {
|
||||
return binData, nil
|
||||
}
|
||||
if binKind < 0 || binKind > 255 {
|
||||
return nil, fmt.Errorf("invalid type in binary object: %s", data)
|
||||
}
|
||||
|
||||
return Binary{Kind: byte(binKind), Data: binData}, nil
|
||||
}
|
||||
|
||||
func jencBinarySlice(v interface{}) ([]byte, error) {
|
||||
in := v.([]byte)
|
||||
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
|
||||
base64.StdEncoding.Encode(out, in)
|
||||
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
|
||||
}
|
||||
|
||||
func jencBinaryType(v interface{}) ([]byte, error) {
|
||||
in := v.(Binary)
|
||||
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
|
||||
base64.StdEncoding.Encode(out, in.Data)
|
||||
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
|
||||
}
|
||||
|
||||
const jdateFormat = "2006-01-02T15:04:05.999Z07:00"
|
||||
|
||||
func jdecDate(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
S string `json:"$date"`
|
||||
Func struct {
|
||||
S string
|
||||
} `json:"$dateFunc"`
|
||||
}
|
||||
_ = jdec(data, &v)
|
||||
if v.S == "" {
|
||||
v.S = v.Func.S
|
||||
}
|
||||
if v.S != "" {
|
||||
var errs []string
|
||||
for _, format := range []string{jdateFormat, "2006-01-02"} {
|
||||
t, err := time.Parse(format, v.S)
|
||||
if err == nil {
|
||||
return t, nil
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("cannot parse date: %q [%s]", v.S, strings.Join(errs, ", "))
|
||||
}
|
||||
|
||||
var vn struct {
|
||||
Date struct {
|
||||
N int64 `json:"$numberLong,string"`
|
||||
} `json:"$date"`
|
||||
Func struct {
|
||||
S int64
|
||||
} `json:"$dateFunc"`
|
||||
}
|
||||
err := jdec(data, &vn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse date: %q", data)
|
||||
}
|
||||
n := vn.Date.N
|
||||
if n == 0 {
|
||||
n = vn.Func.S
|
||||
}
|
||||
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
func jencDate(v interface{}) ([]byte, error) {
|
||||
t := v.(time.Time)
|
||||
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
|
||||
}
|
||||
|
||||
func jdecTimestamp(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Func struct {
|
||||
T int32 `json:"t"`
|
||||
I int32 `json:"i"`
|
||||
} `json:"$timestamp"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
|
||||
}
|
||||
|
||||
func jencTimestamp(v interface{}) ([]byte, error) {
|
||||
ts := uint64(v.(MongoTimestamp))
|
||||
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
|
||||
}
|
||||
|
||||
func jdecRegEx(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Regex string `json:"$regex"`
|
||||
Options string `json:"$options"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return RegEx{v.Regex, v.Options}, nil
|
||||
}
|
||||
|
||||
func jencRegEx(v interface{}) ([]byte, error) {
|
||||
re := v.(RegEx)
|
||||
type regex struct {
|
||||
Regex string `json:"$regex"`
|
||||
Options string `json:"$options"`
|
||||
}
|
||||
return json.Marshal(regex{re.Pattern, re.Options})
|
||||
}
|
||||
|
||||
func jdecObjectId(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
Id string `json:"$oid"`
|
||||
Func struct {
|
||||
Id string
|
||||
} `json:"$oidFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.Id == "" {
|
||||
v.Id = v.Func.Id
|
||||
}
|
||||
return ObjectIdHex(v.Id), nil
|
||||
}
|
||||
|
||||
func jencObjectId(v interface{}) ([]byte, error) {
|
||||
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
|
||||
}
|
||||
|
||||
func jdecDBRef(data []byte) (interface{}, error) {
|
||||
// TODO Support unmarshaling $ref and $id into the input value.
|
||||
var v struct {
|
||||
Obj map[string]interface{} `json:"$dbrefFunc"`
|
||||
}
|
||||
// TODO Fix this. Must not be required.
|
||||
v.Obj = make(map[string]interface{})
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.Obj, nil
|
||||
}
|
||||
|
||||
func jdecNumberLong(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$numberLong,string"`
|
||||
Func struct {
|
||||
N int64 `json:",string"`
|
||||
} `json:"$numberLongFunc"`
|
||||
}
|
||||
var vn struct {
|
||||
N int64 `json:"$numberLong"`
|
||||
Func struct {
|
||||
N int64
|
||||
} `json:"$numberLongFunc"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
err = jdec(data, &vn)
|
||||
v.N = vn.N
|
||||
v.Func.N = vn.Func.N
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 0 {
|
||||
return v.N, nil
|
||||
}
|
||||
return v.Func.N, nil
|
||||
}
|
||||
|
||||
func jencNumberLong(v interface{}) ([]byte, error) {
|
||||
n := v.(int64)
|
||||
f := `{"$numberLong":"%d"}`
|
||||
if n <= 1<<53 {
|
||||
f = `{"$numberLong":%d}`
|
||||
}
|
||||
return fbytes(f, n), nil
|
||||
}
|
||||
|
||||
func jencInt(v interface{}) ([]byte, error) {
|
||||
n := v.(int)
|
||||
f := `{"$numberLong":"%d"}`
|
||||
if int64(n) <= 1<<53 {
|
||||
f = `%d`
|
||||
}
|
||||
return fbytes(f, n), nil
|
||||
}
|
||||
|
||||
func jdecMinKey(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$minKey"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 1 {
|
||||
return nil, fmt.Errorf("invalid $minKey object: %s", data)
|
||||
}
|
||||
return MinKey, nil
|
||||
}
|
||||
|
||||
func jdecMaxKey(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
N int64 `json:"$maxKey"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.N != 1 {
|
||||
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
|
||||
}
|
||||
return MaxKey, nil
|
||||
}
|
||||
|
||||
func jencMinMaxKey(v interface{}) ([]byte, error) {
|
||||
switch v.(orderKey) {
|
||||
case MinKey:
|
||||
return []byte(`{"$minKey":1}`), nil
|
||||
case MaxKey:
|
||||
return []byte(`{"$maxKey":1}`), nil
|
||||
}
|
||||
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
|
||||
}
|
||||
|
||||
func jdecUndefined(data []byte) (interface{}, error) {
|
||||
var v struct {
|
||||
B bool `json:"$undefined"`
|
||||
}
|
||||
err := jdec(data, &v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !v.B {
|
||||
return nil, fmt.Errorf("invalid $undefined object: %s", data)
|
||||
}
|
||||
return Undefined, nil
|
||||
}
|
||||
|
||||
func jencUndefined(v interface{}) ([]byte, error) {
|
||||
return []byte(`{"$undefined":true}`), nil
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package bson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinDocumentSize is the size of the smallest possible valid BSON document:
|
||||
// an int32 size header + 0x00 (end of document).
|
||||
MinDocumentSize = 5
|
||||
|
||||
// MaxDocumentSize is the largest possible size for a BSON document allowed by MongoDB,
|
||||
// that is, 16 MiB (see https://docs.mongodb.com/manual/reference/limits/).
|
||||
MaxDocumentSize = 16777216
|
||||
)
|
||||
|
||||
// ErrInvalidDocumentSize is an error returned when a BSON document's header
|
||||
// contains a size smaller than MinDocumentSize or greater than MaxDocumentSize.
|
||||
type ErrInvalidDocumentSize struct {
|
||||
DocumentSize int32
|
||||
}
|
||||
|
||||
func (e ErrInvalidDocumentSize) Error() string {
|
||||
return fmt.Sprintf("invalid document size %d", e.DocumentSize)
|
||||
}
|
||||
|
||||
// A Decoder reads and decodes BSON values from an input stream.
|
||||
type Decoder struct {
|
||||
source io.Reader
|
||||
}
|
||||
|
||||
// NewDecoder returns a new Decoder that reads from source.
|
||||
// It does not add any extra buffering, and may not read data from source beyond the BSON values requested.
|
||||
func NewDecoder(source io.Reader) *Decoder {
|
||||
return &Decoder{source: source}
|
||||
}
|
||||
|
||||
// Decode reads the next BSON-encoded value from its input and stores it in the value pointed to by v.
|
||||
// See the documentation for Unmarshal for details about the conversion of BSON into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
||||
// BSON documents start with their size as a *signed* int32.
|
||||
var docSize int32
|
||||
if err = binary.Read(dec.source, binary.LittleEndian, &docSize); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if docSize < MinDocumentSize || docSize > MaxDocumentSize {
|
||||
return ErrInvalidDocumentSize{DocumentSize: docSize}
|
||||
}
|
||||
|
||||
docBuffer := bytes.NewBuffer(make([]byte, 0, docSize))
|
||||
if err = binary.Write(docBuffer, binary.LittleEndian, docSize); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// docSize is the *full* document's size (including the 4-byte size header,
|
||||
// which has already been read).
|
||||
if _, err = io.CopyN(docBuffer, dec.source, int64(docSize-4)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Let Unmarshal handle the rest.
|
||||
defer handleErr(&err)
|
||||
return Unmarshal(docBuffer.Bytes(), v)
|
||||
}
|
||||
|
||||
// An Encoder encodes and writes BSON values to an output stream.
|
||||
type Encoder struct {
|
||||
target io.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder that writes to target.
|
||||
func NewEncoder(target io.Writer) *Encoder {
|
||||
return &Encoder{target: target}
|
||||
}
|
||||
|
||||
// Encode encodes v to BSON, and if successful writes it to the Encoder's output stream.
|
||||
// See the documentation for Marshal for details about the conversion of Go values to BSON.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
data, err := Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = enc.target.Write(data)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,95 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Extension holds a set of additional rules to be used when unmarshaling
|
||||
// strict JSON or JSON-like content.
|
||||
type Extension struct {
|
||||
funcs map[string]funcExt
|
||||
consts map[string]interface{}
|
||||
keyed map[string]func([]byte) (interface{}, error)
|
||||
encode map[reflect.Type]func(v interface{}) ([]byte, error)
|
||||
|
||||
unquotedKeys bool
|
||||
trailingCommas bool
|
||||
}
|
||||
|
||||
type funcExt struct {
|
||||
key string
|
||||
args []string
|
||||
}
|
||||
|
||||
// Extend changes the decoder behavior to consider the provided extension.
|
||||
func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
|
||||
|
||||
// Extend changes the encoder behavior to consider the provided extension.
|
||||
func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
|
||||
|
||||
// Extend includes in e the extensions defined in ext.
|
||||
func (e *Extension) Extend(ext *Extension) {
|
||||
for name, fext := range ext.funcs {
|
||||
e.DecodeFunc(name, fext.key, fext.args...)
|
||||
}
|
||||
for name, value := range ext.consts {
|
||||
e.DecodeConst(name, value)
|
||||
}
|
||||
for key, decode := range ext.keyed {
|
||||
e.DecodeKeyed(key, decode)
|
||||
}
|
||||
for typ, encode := range ext.encode {
|
||||
if e.encode == nil {
|
||||
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
|
||||
}
|
||||
e.encode[typ] = encode
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeFunc defines a function call that may be observed inside JSON content.
|
||||
// A function with the provided name will be unmarshaled as the document
|
||||
// {key: {args[0]: ..., args[N]: ...}}.
|
||||
func (e *Extension) DecodeFunc(name string, key string, args ...string) {
|
||||
if e.funcs == nil {
|
||||
e.funcs = make(map[string]funcExt)
|
||||
}
|
||||
e.funcs[name] = funcExt{key, args}
|
||||
}
|
||||
|
||||
// DecodeConst defines a constant name that may be observed inside JSON content
|
||||
// and will be decoded with the provided value.
|
||||
func (e *Extension) DecodeConst(name string, value interface{}) {
|
||||
if e.consts == nil {
|
||||
e.consts = make(map[string]interface{})
|
||||
}
|
||||
e.consts[name] = value
|
||||
}
|
||||
|
||||
// DecodeKeyed defines a key that when observed as the first element inside a
|
||||
// JSON document triggers the decoding of that document via the provided
|
||||
// decode function.
|
||||
func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
|
||||
if e.keyed == nil {
|
||||
e.keyed = make(map[string]func([]byte) (interface{}, error))
|
||||
}
|
||||
e.keyed[key] = decode
|
||||
}
|
||||
|
||||
// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
|
||||
func (e *Extension) DecodeUnquotedKeys(accept bool) {
|
||||
e.unquotedKeys = accept
|
||||
}
|
||||
|
||||
// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
|
||||
func (e *Extension) DecodeTrailingCommas(accept bool) {
|
||||
e.trailingCommas = accept
|
||||
}
|
||||
|
||||
// EncodeType registers a function to encode values with the same type of the
|
||||
// provided sample.
|
||||
func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
|
||||
if e.encode == nil {
|
||||
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
|
||||
}
|
||||
e.encode[reflect.TypeOf(sample)] = encode
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue