diff --git a/.travis.yml b/.travis.yml
index 14b47a7e..4e12787a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,16 +2,18 @@ sudo: false
language: go
go:
- - 1.5.2
- - tip
+- 1.5.3
+- tip
+
+env:
+- GO15VENDOREXPERIMENT=1
install:
- - go get -d
- - go get golang.org/x/tools/cmd/vet
+- go get golang.org/x/tools/cmd/vet
script:
- - '! gofmt -l **/*.go | read nothing'
- - go vet
- - go test -v ./...
- - go build
- - ./end-to-end-test.sh
+- "! gofmt -l $(find . -path ./vendor -prune -o -name '*.go' -print) | read nothing"
+- go vet
+- go test -v $(go list ./... | grep -v /vendor/)
+- go build
+- ./end-to-end-test.sh
diff --git a/Makefile.COMMON b/Makefile.COMMON
index 1b829bf3..f8aa1b3d 100644
--- a/Makefile.COMMON
+++ b/Makefile.COMMON
@@ -45,21 +45,32 @@ GOOS ?= $(shell uname | tr A-Z a-z)
GOARCH ?= $(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m)))
GO_VERSION ?= 1.5.3
-GOURL ?= https://golang.org/dl
-GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH).tar.gz
-GOPATH := $(CURDIR)/.build/gopath
# Check for the correct version of go in the path. If we find it, use it.
# Otherwise, prepare to build go locally.
ifeq ($(shell command -v "go" >/dev/null && go version | sed -e 's/^[^0-9.]*\([0-9.]*\).*/\1/'), $(GO_VERSION))
GOCC ?= $(shell command -v "go")
GOFMT ?= $(shell command -v "gofmt")
- GO ?= GOPATH=$(GOPATH) $(GOCC)
+ GO ?= $(GOCC)
else
+ GOURL ?= https://golang.org/dl
+ GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH).tar.gz
GOROOT ?= $(CURDIR)/.build/go$(GO_VERSION)
GOCC ?= $(GOROOT)/bin/go
GOFMT ?= $(GOROOT)/bin/gofmt
- GO ?= GOROOT=$(GOROOT) GOPATH=$(GOPATH) $(GOCC)
+ GO ?= GOROOT=$(GOROOT) $(GOCC)
+endif
+
+# Use vendored dependencies if available. Otherwise try to download them.
+ifneq (,$(wildcard vendor))
+ DEPENDENCIES := $(shell find vendor/ -type f -iname '*.go')
+ GO := GO15VENDOREXPERIMENT=1 $(GO)
+else
+ GOPATH := $(CURDIR)/.build/gopath
+ ROOTPKG ?= github.com/prometheus/$(TARGET)
+ SELFLINK ?= $(GOPATH)/src/$(ROOTPKG)
+ DEPENDENCIES := dependencies-stamp
+ GO := GOPATH=$(GOPATH) $(GO)
endif
# Never honor GOBIN, should it be set at all.
@@ -68,11 +79,37 @@ unexport GOBIN
SUFFIX ?= $(GOOS)-$(GOARCH)
BINARY ?= $(TARGET)
ARCHIVE ?= $(TARGET)-$(VERSION).$(SUFFIX).tar.gz
-ROOTPKG ?= github.com/prometheus/$(TARGET)
-SELFLINK ?= $(GOPATH)/src/$(ROOTPKG)
default: $(BINARY)
+$(BINARY): $(GOCC) $(SRC) $(DEPENDENCIES) Makefile Makefile.COMMON
+ $(GO) build $(GOFLAGS) -o $@
+
+.PHONY: archive
+archive: $(ARCHIVE)
+
+$(ARCHIVE): $(BINARY)
+ tar -czf $@ $<
+
+.PHONY: tag
+tag:
+ git tag $(VERSION)
+ git push --tags
+
+.PHONY: test
+test: $(GOCC) $(DEPENDENCIES)
+ $(GO) test $$($(GO) list ./... | grep -v /vendor/)
+
+.PHONY: format
+format: $(GOCC)
+ find . -iname '*.go' | egrep -v "^\./\.build|./generated|\./vendor|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true
+
+.PHONY: clean
+clean:
+ rm -rf $(BINARY) $(ARCHIVE) .build *-stamp
+
+
+
$(GOCC):
@echo Go version $(GO_VERSION) required but not found in PATH.
@echo About to download and install go$(GO_VERSION) to $(GOROOT)
@@ -89,32 +126,7 @@ $(SELFLINK):
mkdir -p $(dir $@)
ln -s $(CURDIR) $@
+# Download dependencies if project doesn't vendor them.
dependencies-stamp: $(GOCC) $(SRC) | $(SELFLINK)
$(GO) get -d
touch $@
-
-$(BINARY): $(GOCC) $(SRC) dependencies-stamp Makefile Makefile.COMMON
- $(GO) build $(GOFLAGS) -o $@
-
-.PHONY: archive
-archive: $(ARCHIVE)
-
-$(ARCHIVE): $(BINARY)
- tar -czf $@ $<
-
-.PHONY: tag
-tag:
- git tag $(VERSION)
- git push --tags
-
-.PHONY: test
-test: $(GOCC) dependencies-stamp
- $(GO) test ./...
-
-.PHONY: format
-format: $(GOCC)
- find . -iname '*.go' | egrep -v "^\./\.build|./generated|\./Godeps|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true
-
-.PHONY: clean
-clean:
- rm -rf $(BINARY) $(ARCHIVE) .build *-stamp
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 00000000..ecc84327
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,55 @@
+# 0.9.0 (Unreleased)
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 00000000..f090cb42
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 00000000..55d3a8d5
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,365 @@
+# Logrus [](https://travis-ci.org/Sirupsen/logrus) [][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
+
+ ```go
+ logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 00000000..dddd5f87
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 00000000..9ae900bc
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,264 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 00000000..9a0120ac
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 00000000..104d689f
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 00000000..3f151cdc
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 00000000..2ad6dc5c
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 00000000..2fdb2317
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,212 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ return NewEntry(logger).WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 00000000..0c09fbc2
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 00000000..71f8d67a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,9 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 00000000..a2c0b40d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000..b343b3a3
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 00000000..3e70bf7b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000..0146845d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 00000000..06ef2023
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
+ case string:
+ if needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+
+ b.WriteByte(' ')
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 00000000..1e30b1c7
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/beevik/ntp/LICENSE b/vendor/github.com/beevik/ntp/LICENSE
new file mode 100644
index 00000000..e14ad682
--- /dev/null
+++ b/vendor/github.com/beevik/ntp/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2015 Brett Vickers. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/beevik/ntp/README.md b/vendor/github.com/beevik/ntp/README.md
new file mode 100644
index 00000000..17457522
--- /dev/null
+++ b/vendor/github.com/beevik/ntp/README.md
@@ -0,0 +1,12 @@
+ntp
+===
+
+The ntp package is a small implementation of a limited NTP client. It
+requests the current time from a remote NTP server according to
+selected version of the NTP protocol. Client uses version 4 of the NTP
+protocol RFC5905 by default.
+
+The approach was inspired by a post to the go-nuts mailing list by
+Michael Hofmann:
+
+https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ
diff --git a/vendor/github.com/beevik/ntp/ntp.go b/vendor/github.com/beevik/ntp/ntp.go
new file mode 100644
index 00000000..e6c2efc0
--- /dev/null
+++ b/vendor/github.com/beevik/ntp/ntp.go
@@ -0,0 +1,113 @@
+// Copyright 2015 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ntp provides a simple mechanism for querying the current time
+// from a remote NTP server. This package only supports NTP client mode
+// behavior and version 4 of the NTP protocol. See RFC 5905.
+// Approach inspired by go-nuts post by Michael Hofmann:
+// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ
+package ntp
+
+import (
+ "encoding/binary"
+ "net"
+ "time"
+)
+
+type mode byte
+
+const (
+ reserved mode = 0 + iota
+ symmetricActive
+ symmetricPassive
+ client
+ server
+ broadcast
+ controlMessage
+ reservedPrivate
+)
+
+type ntpTime struct {
+ Seconds uint32
+ Fraction uint32
+}
+
+func (t ntpTime) UTC() time.Time {
+ nsec := uint64(t.Seconds)*1e9 + (uint64(t.Fraction) * 1e9 >> 32)
+ return time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nsec))
+}
+
+type msg struct {
+ LiVnMode byte // Leap Indicator (2) + Version (3) + Mode (3)
+ Stratum byte
+ Poll byte
+ Precision byte
+ RootDelay uint32
+ RootDispersion uint32
+ ReferenceId uint32
+ ReferenceTime ntpTime
+ OriginTime ntpTime
+ ReceiveTime ntpTime
+ TransmitTime ntpTime
+}
+
+// SetVersion sets the NTP protocol version on the message.
+func (m *msg) SetVersion(v byte) {
+ m.LiVnMode = (m.LiVnMode & 0xc7) | v<<3
+}
+
+// SetMode sets the NTP protocol mode on the message.
+func (m *msg) SetMode(md mode) {
+ m.LiVnMode = (m.LiVnMode & 0xf8) | byte(md)
+}
+
+// Time returns the "receive time" from the remote NTP server
+// specifed as host. NTP client mode is used.
+func getTime(host string, version byte) (time.Time, error) {
+ if version < 2 || version > 4 {
+ panic("ntp: invalid version number")
+ }
+
+ raddr, err := net.ResolveUDPAddr("udp", host+":123")
+ if err != nil {
+ return time.Now(), err
+ }
+
+ con, err := net.DialUDP("udp", nil, raddr)
+ if err != nil {
+ return time.Now(), err
+ }
+ defer con.Close()
+ con.SetDeadline(time.Now().Add(5 * time.Second))
+
+ m := new(msg)
+ m.SetMode(client)
+ m.SetVersion(version)
+
+ err = binary.Write(con, binary.BigEndian, m)
+ if err != nil {
+ return time.Now(), err
+ }
+
+ err = binary.Read(con, binary.BigEndian, m)
+ if err != nil {
+ return time.Now(), err
+ }
+
+ t := m.ReceiveTime.UTC().Local()
+ return t, nil
+}
+
+// TimeV returns the "receive time" from the remote NTP server
+// specifed as host. Use the NTP client mode with the requested
+// version number (2, 3, or 4).
+func TimeV(host string, version byte) (time.Time, error) {
+ return getTime(host, version)
+}
+
+// Time returns the "receive time" from the remote NTP server
+// specifed as host. NTP client mode version 4 is used.
+func Time(host string) (time.Time, error) {
+ return getTime(host, 4)
+}
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 00000000..1602287d
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 00000000..587b1fc5
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 00000000..37ec93a1
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
new file mode 100644
index 00000000..94043347
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
@@ -0,0 +1,198 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
+ num = `0123456789`
+ alphanum = alpha + num
+ signalBuffer = 100
+)
+
+// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
+func needsEscape(i int, b byte) bool {
+ // Escape everything that is not a-z-A-Z-0-9
+ // Also escape 0-9 if it's the first character
+ return strings.IndexByte(alphanum, b) == -1 ||
+ (i == 0 && strings.IndexByte(num, b) != -1)
+}
+
+// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
+// rules that systemd uses for serializing special characters.
+func PathBusEscape(path string) string {
+ // Special case the empty string
+ if len(path) == 0 {
+ return "_"
+ }
+ n := []byte{}
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if needsEscape(i, c) {
+ e := fmt.Sprintf("_%x", c)
+ n = append(n, []byte(e)...)
+ } else {
+ n = append(n, c)
+ }
+ }
+ return string(n)
+}
+
+// Conn is a connection to systemd's dbus endpoint.
+type Conn struct {
+ // sysconn/sysobj are only used to call dbus methods
+ sysconn *dbus.Conn
+ sysobj dbus.BusObject
+
+ // sigconn/sigobj are only used to receive dbus signals
+ sigconn *dbus.Conn
+ sigobj dbus.BusObject
+
+ jobListener struct {
+ jobs map[dbus.ObjectPath]chan<- string
+ sync.Mutex
+ }
+ subscriber struct {
+ updateCh chan<- *SubStateUpdate
+ errCh chan<- error
+ sync.Mutex
+ ignore map[dbus.ObjectPath]int64
+ cleanIgnore int64
+ }
+}
+
+// New establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection.
+func New() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+ })
+}
+
+// NewUserConnection establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
+func NewUserConnection() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+ })
+}
+
+// NewSystemdConnection establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
+func NewSystemdConnection() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ // We skip Hello when talking directly to systemd.
+ return dbusAuthConnection(func() (*dbus.Conn, error) {
+ return dbus.Dial("unix:path=/run/systemd/private")
+ })
+ })
+}
+
+// Close closes an established connection
+func (c *Conn) Close() {
+ c.sysconn.Close()
+ c.sigconn.Close()
+}
+
+func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
+ sysconn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ sigconn, err := createBus()
+ if err != nil {
+ sysconn.Close()
+ return nil, err
+ }
+
+ c := &Conn{
+ sysconn: sysconn,
+ sysobj: systemdObject(sysconn),
+ sigconn: sigconn,
+ sigobj: systemdObject(sigconn),
+ }
+
+ c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
+ c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
+
+ // Setup the listeners on jobs so that we can get completions
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+
+ c.dispatch()
+ return c, nil
+}
+
+// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
+// interface. The value is returned in its string representation, as defined at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html
+func (c *Conn) GetManagerProperty(prop string) (string, error) {
+ variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
+ if err != nil {
+ return "", err
+ }
+ return variant.String(), nil
+}
+
+func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ // Only use EXTERNAL method, and hardcode the uid (not username)
+ // to avoid a username lookup (which requires a dynamically linked
+ // libc)
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := dbusAuthConnection(createBus)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func systemdObject(conn *dbus.Conn) dbus.BusObject {
+ return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go
new file mode 100644
index 00000000..f9552a33
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go
@@ -0,0 +1,442 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "path"
+ "strconv"
+
+ "github.com/godbus/dbus"
+)
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+ var id uint32
+ var job dbus.ObjectPath
+ var unit string
+ var result string
+ dbus.Store(signal.Body, &id, &job, &unit, &result)
+ c.jobListener.Lock()
+ out, ok := c.jobListener.jobs[job]
+ if ok {
+ out <- result
+ delete(c.jobListener.jobs, job)
+ }
+ c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+ if ch != nil {
+ c.jobListener.Lock()
+ defer c.jobListener.Unlock()
+ }
+
+ var p dbus.ObjectPath
+ err := c.sysobj.Call(job, 0, args...).Store(&p)
+ if err != nil {
+ return 0, err
+ }
+
+ if ch != nil {
+ c.jobListener.jobs[p] = ch
+ }
+
+ // ignore error since 0 is fine if conversion fails
+ jobID, _ := strconv.Atoi(path.Base(string(p)))
+
+ return jobID, nil
+}
+
+// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// If the provided channel is non-nil, a result string will be sent to it upon
+// job completion: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+//
+// If no error occurs, the ID of the underlying systemd job will be returned. There
+// does exist the possibility for no error to be returned, but for the returned job
+// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
+// should not be considered authoritative.
+//
+// If an error does occur, it will be returned to the user alongside a job ID of 0.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service. If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// ResetFailedUnit resets the "failed" state of a specific unit.
+func (c *Conn) ResetFailedUnit(name string) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+}
+
+// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
+ var err error
+ var props map[string]dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(map[string]interface{}, len(props))
+ for k, v := range props {
+ out[k] = v.Value()
+ }
+
+ return out, nil
+}
+
+// GetUnitProperties takes the unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+}
+
+type UnitStatus struct {
+ Name string // The primary unit name as string
+ Description string // The human readable description string
+ LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
+ ActiveState string // The active state (i.e. whether the unit is currently started or not)
+ SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+ Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+ Path dbus.ObjectPath // The unit object path
+ JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
+ JobType string // The job type as string
+ JobPath dbus.ObjectPath // The job object path
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]UnitStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ err = dbus.Store(resultInterface, statusInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+type UnitFile struct {
+ Path string
+ Type string
+}
+
+// ListUnitFiles returns an array of all available units on disk.
+func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ files := make([]UnitFile, len(result))
+ fileInterface := make([]interface{}, len(files))
+ for i := range files {
+ fileInterface[i] = &files[i]
+ }
+
+ err = dbus.Store(resultInterface, fileInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
+
+type LinkUnitFileChange EnableUnitFileChange
+
+// LinkUnitFiles() links unit files (that are located outside of the
+// usual unit search paths) into the unit search path.
+//
+// It takes a list of absolute paths to unit files to link and two
+// booleans. The first boolean controls whether the unit shall be
+// enabled for runtime only (true, /run), or persistently (false,
+// /etc).
+// The second controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns a list of the changes made. The list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]LinkUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ var carries_install_info bool
+
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]EnableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return false, nil, err
+ }
+
+ return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]DisableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type DisableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
+
+func unitPath(name string) dbus.ObjectPath {
+ return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go
new file mode 100644
index 00000000..75200115
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go
@@ -0,0 +1,218 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "github.com/godbus/dbus"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+ Name string
+ Value dbus.Variant
+}
+
+type PropertyCollection struct {
+ Name string
+ Properties []Property
+}
+
+type execStart struct {
+ Path string // the binary path to execute
+ Args []string // an array with all arguments to pass to the executed command, starting with argument 0
+ UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property. The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+ execStarts := []execStart{
+ execStart{
+ Path: command[0],
+ Args: command,
+ UncleanIsFailure: uncleanIsFailure,
+ },
+ }
+
+ return Property{
+ Name: "ExecStart",
+ Value: dbus.MakeVariant(execStarts),
+ }
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+ return Property{
+ Name: "RemainAfterExit",
+ Value: dbus.MakeVariant(b),
+ }
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+ return Property{
+ Name: "Description",
+ Value: dbus.MakeVariant(desc),
+ }
+}
+
+func propDependency(name string, units []string) Property {
+ return Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+// PropRequires sets the Requires unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+ return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+ return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+ return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+ return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+ return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+ return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+ return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+ return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+ return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property. See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+ return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+ return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+ return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+ return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+ return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+ return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+ return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+ return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+ return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+ return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+ return Property{
+ Name: "Slice",
+ Value: dbus.MakeVariant(slice),
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go
new file mode 100644
index 00000000..f92e6fbe
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/set.go
@@ -0,0 +1,47 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+type set struct {
+ data map[string]bool
+}
+
+func (s *set) Add(value string) {
+ s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+ delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+ _, exists = s.data[value]
+ return
+}
+
+func (s *set) Length() int {
+ return len(s.data)
+}
+
+func (s *set) Values() (values []string) {
+ for val, _ := range s.data {
+ values = append(values, val)
+ }
+ return
+}
+
+func newSet() *set {
+ return &set{make(map[string]bool)}
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
new file mode 100644
index 00000000..99645144
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
@@ -0,0 +1,250 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "time"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ cleanIgnoreInterval = int64(10 * time.Second)
+ ignoreInterval = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Conn) dispatch() {
+ ch := make(chan *dbus.Signal, signalBuffer)
+
+ c.sigconn.Signal(ch)
+
+ go func() {
+ for {
+ signal, ok := <-ch
+ if !ok {
+ return
+ }
+
+ if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
+ c.jobComplete(signal)
+ }
+
+ if c.subscriber.updateCh == nil {
+ continue
+ }
+
+ var unitPath dbus.ObjectPath
+ switch signal.Name {
+ case "org.freedesktop.systemd1.Manager.JobRemoved":
+ unitName := signal.Body[2].(string)
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+ case "org.freedesktop.systemd1.Manager.UnitNew":
+ unitPath = signal.Body[1].(dbus.ObjectPath)
+ case "org.freedesktop.DBus.Properties.PropertiesChanged":
+ if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+ unitPath = signal.Path
+ }
+ }
+
+ if unitPath == dbus.ObjectPath("") {
+ continue
+ }
+
+ c.sendSubStateUpdate(unitPath)
+ }
+ }()
+}
+
+// Returns two unbuffered channels which will receive all changed units every
+// interval. Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+ return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+ old := make(map[string]*UnitStatus)
+ statusChan := make(chan map[string]*UnitStatus, buffer)
+ errChan := make(chan error, buffer)
+
+ go func() {
+ for {
+ timerChan := time.After(interval)
+
+ units, err := c.ListUnits()
+ if err == nil {
+ cur := make(map[string]*UnitStatus)
+ for i := range units {
+ if filterUnit != nil && filterUnit(units[i].Name) {
+ continue
+ }
+ cur[units[i].Name] = &units[i]
+ }
+
+ // add all new or changed units
+ changed := make(map[string]*UnitStatus)
+ for n, u := range cur {
+ if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+ changed[n] = u
+ }
+ delete(old, n)
+ }
+
+ // add all deleted units
+ for oldN := range old {
+ changed[oldN] = nil
+ }
+
+ old = cur
+
+ if len(changed) != 0 {
+ statusChan <- changed
+ }
+ } else {
+ errChan <- err
+ }
+
+ <-timerChan
+ }
+ }()
+
+ return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+ UnitName string
+ SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface). That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through. Furthermore, state changes
+// will only be written to the channel with non-blocking writes. If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+ c.subscriber.updateCh = updateCh
+ c.subscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+
+ if c.shouldIgnore(path) {
+ return
+ }
+
+ info, err := c.GetUnitProperties(string(path))
+ if err != nil {
+ select {
+ case c.subscriber.errCh <- err:
+ default:
+ }
+ }
+
+ name := info["Id"].(string)
+ substate := info["SubState"].(string)
+
+ update := &SubStateUpdate{name, substate}
+ select {
+ case c.subscriber.updateCh <- update:
+ default:
+ select {
+ case c.subscriber.errCh <- errors.New("update channel full!"):
+ default:
+ }
+ }
+
+ c.updateIgnore(path, info)
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals. The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+ t, ok := c.subscriber.ignore[path]
+ return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+ c.cleanIgnore()
+
+ // unit is unloaded - it will trigger bad systemd dbus behavior
+ if info["LoadState"].(string) == "not-found" {
+ c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+ }
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+ now := time.Now().UnixNano()
+ if c.subscriber.cleanIgnore < now {
+ c.subscriber.cleanIgnore = now + cleanIgnoreInterval
+
+ for p, t := range c.subscriber.ignore {
+ if t < now {
+ delete(c.subscriber.ignore, p)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
new file mode 100644
index 00000000..5b408d58
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
@@ -0,0 +1,57 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+ *set
+ conn *Conn
+}
+
+func (s *SubscriptionSet) filter(unit string) bool {
+ return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+ // TODO: Make fully evented by using systemd 209 with properties changed values
+ return s.conn.SubscribeUnitsCustom(time.Second, 0,
+ mismatchUnitStatus,
+ func(unit string) bool { return s.filter(unit) },
+ )
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
+ return &SubscriptionSet{newSet(), conn}
+}
+
+// mismatchUnitStatus returns true if the provided UnitStatus objects
+// are not equivalent. false is returned if the objects are equivalent.
+// Only the Name, Description and state-related fields are used in
+// the comparison.
+func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
+ return u1.Name != u2.Name ||
+ u1.Description != u2.Description ||
+ u1.LoadState != u2.LoadState ||
+ u1.ActiveState != u2.ActiveState ||
+ u1.SubState != u2.SubState
+}
diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md
new file mode 100644
index 00000000..c88f9b2b
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# How to Contribute
+
+## Getting Started
+
+- Fork the repository on GitHub
+- Read the [README](README.markdown) for build and test instructions
+- Play with the project, submit bugs, submit patches!
+
+## Contribution Flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where you want to base your work (usually master).
+- Make commits of logical units.
+- Make sure your commit messages are in the proper format (see below).
+- Push your changes to a topic branch in your fork of the repository.
+- Make sure the tests pass, and add any new tests as appropriate.
+- Submit a pull request to the original repository.
+
+Thanks for your contributions!
+
+### Format of the Commit Message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that you can easily kill and
+start for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+:
+
+
+
+
+```
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at 80 characters.
+This allows the message to be easier to read on GitHub as well as in various
+git tools.
diff --git a/vendor/github.com/godbus/dbus/LICENSE b/vendor/github.com/godbus/dbus/LICENSE
new file mode 100644
index 00000000..670d88fc
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke (), Google
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/godbus/dbus/MAINTAINERS b/vendor/github.com/godbus/dbus/MAINTAINERS
new file mode 100644
index 00000000..e8968ece
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/MAINTAINERS
@@ -0,0 +1,2 @@
+Brandon Philips (@philips)
+Brian Waldon (@bcwaldon)
diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown
new file mode 100644
index 00000000..0a6e7e5b
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/README.markdown
@@ -0,0 +1,41 @@
+dbus
+----
+
+dbus is a simple library that implements native Go client bindings for the
+D-Bus message bus system.
+
+### Features
+
+* Complete native implementation of the D-Bus message protocol
+* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
+* Subpackages that help with the introspection / property interfaces
+
+### Installation
+
+This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
+
+```
+go get github.com/godbus/dbus
+```
+
+If you want to use the subpackages, you can install them the same way.
+
+### Usage
+
+The complete package documentation and some simple examples are available at
+[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
+[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
+gives a short overview over the basic usage.
+
+#### Projects using godbus
+- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
+
+Please note that the API is considered unstable for now and may change without
+further notice.
+
+### License
+
+go.dbus is available under the Simplified BSD License; see LICENSE for the full
+text.
+
+Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go
new file mode 100644
index 00000000..98017b69
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth.go
@@ -0,0 +1,253 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+)
+
+// AuthStatus represents the Status of an authentication mechanism.
+type AuthStatus byte
+
+const (
+ // AuthOk signals that authentication is finished; the next command
+ // from the server should be an OK.
+ AuthOk AuthStatus = iota
+
+ // AuthContinue signals that additional data is needed; the next command
+ // from the server should be a DATA.
+ AuthContinue
+
+ // AuthError signals an error; the server sent invalid data or some
+ // other unexpected thing happened and the current authentication
+ // process should be aborted.
+ AuthError
+)
+
+type authState byte
+
+const (
+ waitingForData authState = iota
+ waitingForOk
+ waitingForReject
+)
+
+// Auth defines the behaviour of an authentication mechanism.
+type Auth interface {
+ // Return the name of the mechnism, the argument to the first AUTH command
+ // and the next status.
+ FirstData() (name, resp []byte, status AuthStatus)
+
+ // Process the given DATA command, and return the argument to the DATA
+ // command and the next status. If len(resp) == 0, no DATA command is sent.
+ HandleData(data []byte) (resp []byte, status AuthStatus)
+}
+
+// Auth authenticates the connection, trying the given list of authentication
+// mechanisms (in that order). If nil is passed, the EXTERNAL and
+// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
+// connections, this method must be called before sending any messages to the
+// bus. Auth must not be called on shared connections.
+func (conn *Conn) Auth(methods []Auth) error {
+ if methods == nil {
+ uid := strconv.Itoa(os.Getuid())
+ methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
+ }
+ in := bufio.NewReader(conn.transport)
+ err := conn.transport.SendNullByte()
+ if err != nil {
+ return err
+ }
+ err = authWriteLine(conn.transport, []byte("AUTH"))
+ if err != nil {
+ return err
+ }
+ s, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
+ return errors.New("dbus: authentication protocol error")
+ }
+ s = s[1:]
+ for _, v := range s {
+ for _, m := range methods {
+ if name, data, status := m.FirstData(); bytes.Equal(v, name) {
+ var ok bool
+ err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
+ if err != nil {
+ return err
+ }
+ switch status {
+ case AuthOk:
+ err, ok = conn.tryAuth(m, waitingForOk, in)
+ case AuthContinue:
+ err, ok = conn.tryAuth(m, waitingForData, in)
+ default:
+ panic("dbus: invalid authentication status")
+ }
+ if err != nil {
+ return err
+ }
+ if ok {
+ if conn.transport.SupportsUnixFDs() {
+ err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
+ if err != nil {
+ return err
+ }
+ line, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
+ conn.EnableUnixFDs()
+ conn.unixFD = true
+ case bytes.Equal(line[0], []byte("ERROR")):
+ default:
+ return errors.New("dbus: authentication protocol error")
+ }
+ }
+ err = authWriteLine(conn.transport, []byte("BEGIN"))
+ if err != nil {
+ return err
+ }
+ go conn.inWorker()
+ go conn.outWorker()
+ return nil
+ }
+ }
+ }
+ }
+ return errors.New("dbus: authentication failed")
+}
+
+// tryAuth tries to authenticate with m as the mechanism, using state as the
+// initial authState and in for reading input. It returns (nil, true) on
+// success, (nil, false) on a REJECTED and (someErr, false) if some other
+// error occured.
+func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
+ for {
+ s, err := authReadLine(in)
+ if err != nil {
+ return err, false
+ }
+ switch {
+ case state == waitingForData && string(s[0]) == "DATA":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ continue
+ }
+ data, status := m.HandleData(s[1])
+ switch status {
+ case AuthOk, AuthContinue:
+ if len(data) != 0 {
+ err = authWriteLine(conn.transport, []byte("DATA"), data)
+ if err != nil {
+ return err, false
+ }
+ }
+ if status == AuthOk {
+ state = waitingForOk
+ }
+ case AuthError:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ }
+ case state == waitingForData && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForData && string(s[0]) == "ERROR":
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForData && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForData:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForOk && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForOk && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForOk && (string(s[0]) == "DATA" ||
+ string(s[0]) == "ERROR"):
+
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForOk:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForReject && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForReject:
+ return errors.New("dbus: authentication protocol error"), false
+ default:
+ panic("dbus: invalid auth state")
+ }
+ }
+}
+
+// authReadLine reads a line and separates it into its fields.
+func authReadLine(in *bufio.Reader) ([][]byte, error) {
+ data, err := in.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = bytes.TrimSuffix(data, []byte("\r\n"))
+ return bytes.Split(data, []byte{' '}), nil
+}
+
+// authWriteLine writes the given line in the authentication protocol format
+// (elements of data separated by a " " and terminated by "\r\n").
+func authWriteLine(out io.Writer, data ...[]byte) error {
+ buf := make([]byte, 0)
+ for i, v := range data {
+ buf = append(buf, v...)
+ if i != len(data)-1 {
+ buf = append(buf, ' ')
+ }
+ }
+ buf = append(buf, '\r')
+ buf = append(buf, '\n')
+ n, err := out.Write(buf)
+ if err != nil {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/auth_external.go b/vendor/github.com/godbus/dbus/auth_external.go
new file mode 100644
index 00000000..7e376d3e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth_external.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "encoding/hex"
+)
+
+// AuthExternal returns an Auth that authenticates as the given user with the
+// EXTERNAL mechanism.
+func AuthExternal(user string) Auth {
+ return authExternal{user}
+}
+
+// AuthExternal implements the EXTERNAL authentication mechanism.
+type authExternal struct {
+ user string
+}
+
+func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("EXTERNAL"), b, AuthOk
+}
+
+func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/github.com/godbus/dbus/auth_sha1.go b/vendor/github.com/godbus/dbus/auth_sha1.go
new file mode 100644
index 00000000..df15b461
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/auth_sha1.go
@@ -0,0 +1,102 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "os"
+)
+
+// AuthCookieSha1 returns an Auth that authenticates as the given user with the
+// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
+// directory of the user.
+func AuthCookieSha1(user, home string) Auth {
+ return authCookieSha1{user, home}
+}
+
+type authCookieSha1 struct {
+ user, home string
+}
+
+func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
+}
+
+func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
+ challenge := make([]byte, len(data)/2)
+ _, err := hex.Decode(challenge, data)
+ if err != nil {
+ return nil, AuthError
+ }
+ b := bytes.Split(challenge, []byte{' '})
+ if len(b) != 3 {
+ return nil, AuthError
+ }
+ context := b[0]
+ id := b[1]
+ svchallenge := b[2]
+ cookie := a.getCookie(context, id)
+ if cookie == nil {
+ return nil, AuthError
+ }
+ clchallenge := a.generateChallenge()
+ if clchallenge == nil {
+ return nil, AuthError
+ }
+ hash := sha1.New()
+ hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
+ hexhash := make([]byte, 2*hash.Size())
+ hex.Encode(hexhash, hash.Sum(nil))
+ data = append(clchallenge, ' ')
+ data = append(data, hexhash...)
+ resp := make([]byte, 2*len(data))
+ hex.Encode(resp, data)
+ return resp, AuthOk
+}
+
+// getCookie searches for the cookie identified by id in context and returns
+// the cookie content or nil. (Since HandleData can't return a specific error,
+// but only whether an error occured, this function also doesn't bother to
+// return an error.)
+func (a authCookieSha1) getCookie(context, id []byte) []byte {
+ file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
+ if err != nil {
+ return nil
+ }
+ defer file.Close()
+ rd := bufio.NewReader(file)
+ for {
+ line, err := rd.ReadBytes('\n')
+ if err != nil {
+ return nil
+ }
+ line = line[:len(line)-1]
+ b := bytes.Split(line, []byte{' '})
+ if len(b) != 3 {
+ return nil
+ }
+ if bytes.Equal(b[0], id) {
+ return b[2]
+ }
+ }
+}
+
+// generateChallenge returns a random, hex-encoded challenge, or nil on error
+// (see above).
+func (a authCookieSha1) generateChallenge() []byte {
+ b := make([]byte, 16)
+ n, err := rand.Read(b)
+ if err != nil {
+ return nil
+ }
+ if n != 16 {
+ return nil
+ }
+ enc := make([]byte, 32)
+ hex.Encode(enc, b)
+ return enc
+}
diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go
new file mode 100644
index 00000000..ba6e73f6
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/call.go
@@ -0,0 +1,36 @@
+package dbus
+
+import (
+ "errors"
+)
+
+// Call represents a pending or completed method call.
+type Call struct {
+ Destination string
+ Path ObjectPath
+ Method string
+ Args []interface{}
+
+ // Strobes when the call is complete.
+ Done chan *Call
+
+ // After completion, the error status. If this is non-nil, it may be an
+ // error message from the peer (with Error as its type) or some other error.
+ Err error
+
+ // Holds the response once the call is done.
+ Body []interface{}
+}
+
+var errSignature = errors.New("dbus: mismatched signature")
+
+// Store stores the body of the reply into the provided pointers. It returns
+// an error if the signatures of the body and retvalues don't match, or if
+// the error status is not nil.
+func (c *Call) Store(retvalues ...interface{}) error {
+ if c.Err != nil {
+ return c.Err
+ }
+
+ return Store(c.Body, retvalues...)
+}
diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go
new file mode 100644
index 00000000..46f6914c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn.go
@@ -0,0 +1,636 @@
+package dbus
+
+import (
+ "errors"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+var (
+ systemBus *Conn
+ systemBusLck sync.Mutex
+ sessionBus *Conn
+ sessionBusLck sync.Mutex
+)
+
+// ErrClosed is the error returned by calls on a closed connection.
+var ErrClosed = errors.New("dbus: connection closed by user")
+
+// Conn represents a connection to a message bus (usually, the system or
+// session bus).
+//
+// Connections are either shared or private. Shared connections
+// are shared between calls to the functions that return them. As a result,
+// the methods Close, Auth and Hello must not be called on them.
+//
+// Multiple goroutines may invoke methods on a connection simultaneously.
+type Conn struct {
+ transport
+
+ busObj BusObject
+ unixFD bool
+ uuid string
+
+ names []string
+ namesLck sync.RWMutex
+
+ serialLck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+
+ calls map[uint32]*Call
+ callsLck sync.RWMutex
+
+ handlers map[ObjectPath]map[string]exportWithMapping
+ handlersLck sync.RWMutex
+
+ out chan *Message
+ closed bool
+ outLck sync.RWMutex
+
+ signals []chan<- *Signal
+ signalsLck sync.Mutex
+
+ eavesdropped chan<- *Message
+ eavesdroppedLck sync.Mutex
+}
+
+// SessionBus returns a shared connection to the session bus, connecting to it
+// if not already done.
+func SessionBus() (conn *Conn, err error) {
+ sessionBusLck.Lock()
+ defer sessionBusLck.Unlock()
+ if sessionBus != nil {
+ return sessionBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ sessionBus = conn
+ }
+ }()
+ conn, err = SessionBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
+ if address != "" && address != "autolaunch:" {
+ return Dial(address)
+ }
+
+ return sessionBusPlatform()
+}
+
+// SystemBus returns a shared connection to the system bus, connecting to it if
+// not already done.
+func SystemBus() (conn *Conn, err error) {
+ systemBusLck.Lock()
+ defer systemBusLck.Unlock()
+ if systemBus != nil {
+ return systemBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ systemBus = conn
+ }
+ }()
+ conn, err = SystemBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SystemBusPrivate returns a new private connection to the system bus.
+func SystemBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return Dial(address)
+ }
+ return Dial(defaultSystemBusAddress)
+}
+
+// Dial establishes a new private connection to the message bus specified by address.
+func Dial(address string) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr)
+}
+
+// NewConn creates a new private *Conn from an already established connection.
+func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
+ return newConn(genericTransport{conn})
+}
+
+// newConn creates a new *Conn from a transport.
+func newConn(tr transport) (*Conn, error) {
+ conn := new(Conn)
+ conn.transport = tr
+ conn.calls = make(map[uint32]*Call)
+ conn.out = make(chan *Message, 10)
+ conn.handlers = make(map[ObjectPath]map[string]exportWithMapping)
+ conn.nextSerial = 1
+ conn.serialUsed = map[uint32]bool{0: true}
+ conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+ return conn, nil
+}
+
+// BusObject returns the object owned by the bus daemon which handles
+// administrative requests.
+func (conn *Conn) BusObject() BusObject {
+ return conn.busObj
+}
+
+// Close closes the connection. Any blocked operations will return with errors
+// and the channels passed to Eavesdrop and Signal are closed. This method must
+// not be called on shared connections.
+func (conn *Conn) Close() error {
+ conn.outLck.Lock()
+ if conn.closed {
+ // inWorker calls Close on read error, the read error may
+ // be caused by another caller calling Close to shutdown the
+ // dbus connection, a double-close scenario we prevent here.
+ conn.outLck.Unlock()
+ return nil
+ }
+ close(conn.out)
+ conn.closed = true
+ conn.outLck.Unlock()
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ close(ch)
+ }
+ conn.signalsLck.Unlock()
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ close(conn.eavesdropped)
+ }
+ conn.eavesdroppedLck.Unlock()
+ return conn.transport.Close()
+}
+
+// Eavesdrop causes conn to send all incoming messages to the given channel
+// without further processing. Method replies, errors and signals will not be
+// sent to the appropiate channels and method calls will not be handled. If nil
+// is passed, the normal behaviour is restored.
+//
+// The caller has to make sure that ch is sufficiently buffered;
+// if a message arrives when a write to ch is not possible, the message is
+// discarded.
+func (conn *Conn) Eavesdrop(ch chan<- *Message) {
+ conn.eavesdroppedLck.Lock()
+ conn.eavesdropped = ch
+ conn.eavesdroppedLck.Unlock()
+}
+
+// getSerial returns an unused serial.
+func (conn *Conn) getSerial() uint32 {
+ conn.serialLck.Lock()
+ defer conn.serialLck.Unlock()
+ n := conn.nextSerial
+ for conn.serialUsed[n] {
+ n++
+ }
+ conn.serialUsed[n] = true
+ conn.nextSerial = n + 1
+ return n
+}
+
+// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
+// called after authentication, but before sending any other messages to the
+// bus. Hello must not be called for shared connections.
+func (conn *Conn) Hello() error {
+ var s string
+ err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
+ if err != nil {
+ return err
+ }
+ conn.namesLck.Lock()
+ conn.names = make([]string, 1)
+ conn.names[0] = s
+ conn.namesLck.Unlock()
+ return nil
+}
+
+// inWorker runs in an own goroutine, reading incoming messages from the
+// transport and dispatching them appropiately.
+func (conn *Conn) inWorker() {
+ for {
+ msg, err := conn.ReadMessage()
+ if err == nil {
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
+ }
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := false
+ if dest == "" {
+ found = true
+ } else {
+ conn.namesLck.RLock()
+ if len(conn.names) == 0 {
+ found = true
+ }
+ for _, v := range conn.names {
+ if dest == v {
+ found = true
+ break
+ }
+ }
+ conn.namesLck.RUnlock()
+ }
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+ switch msg.Type {
+ case TypeMethodReply, TypeError:
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ conn.callsLck.Lock()
+ if c, ok := conn.calls[serial]; ok {
+ if msg.Type == TypeError {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ c.Err = Error{name, msg.Body}
+ } else {
+ c.Body = msg.Body
+ }
+ c.Done <- c
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, serial)
+ conn.serialLck.Unlock()
+ delete(conn.calls, serial)
+ }
+ conn.callsLck.Unlock()
+ case TypeSignal:
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
+ if member == "NameLost" {
+ // If we lost the name on the bus, remove it from our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the lost name")
+ }
+ conn.namesLck.Lock()
+ for i, v := range conn.names {
+ if v == name {
+ conn.names = append(conn.names[:i],
+ conn.names[i+1:]...)
+ }
+ }
+ conn.namesLck.Unlock()
+ } else if member == "NameAcquired" {
+ // If we acquired the name on the bus, add it to our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the acquired name")
+ }
+ conn.namesLck.Lock()
+ conn.names = append(conn.names, name)
+ conn.namesLck.Unlock()
+ }
+ }
+ signal := &Signal{
+ Sender: sender,
+ Path: msg.Headers[FieldPath].value.(ObjectPath),
+ Name: iface + "." + member,
+ Body: msg.Body,
+ }
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ ch <- signal
+ }
+ conn.signalsLck.Unlock()
+ case TypeMethodCall:
+ go conn.handleCall(msg)
+ }
+ } else if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occured (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.callsLck.RLock()
+ for _, v := range conn.calls {
+ v.Err = err
+ v.Done <- v
+ }
+ conn.callsLck.RUnlock()
+ return
+ }
+ // invalid messages are ignored
+ }
+}
+
+// Names returns the list of all names that are currently owned by this
+// connection. The slice is always at least one element long, the first element
+// being the unique name of the connection.
+func (conn *Conn) Names() []string {
+ conn.namesLck.RLock()
+ // copy the slice so it can't be modified
+ s := make([]string, len(conn.names))
+ copy(s, conn.names)
+ conn.namesLck.RUnlock()
+ return s
+}
+
+// Object returns the object identified by the given destination name and path.
+func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
+ return &Object{conn, dest, path}
+}
+
+// outWorker runs in an own goroutine, encoding and sending messages that are
+// sent to conn.out.
+func (conn *Conn) outWorker() {
+ for msg := range conn.out {
+ err := conn.SendMessage(msg)
+ conn.callsLck.RLock()
+ if err != nil {
+ if c := conn.calls[msg.serial]; c != nil {
+ c.Err = err
+ c.Done <- c
+ }
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ } else if msg.Type != TypeMethodCall {
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ }
+ conn.callsLck.RUnlock()
+ }
+}
+
+// Send sends the given message to the message bus. You usually don't need to
+// use this; use the higher-level equivalents (Call / Go, Emit and Export)
+// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
+// call is returned and the same value is sent to ch (which must be buffered)
+// once the call is complete. Otherwise, ch is ignored and a Call structure is
+// returned of which only the Err member is valid.
+func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
+ var call *Call
+
+ msg.serial = conn.getSerial()
+ if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 5)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Conn).Send")
+ }
+ call = new(Call)
+ call.Destination, _ = msg.Headers[FieldDestination].value.(string)
+ call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
+ iface, _ := msg.Headers[FieldInterface].value.(string)
+ member, _ := msg.Headers[FieldMember].value.(string)
+ call.Method = iface + "." + member
+ call.Args = msg.Body
+ call.Done = ch
+ conn.callsLck.Lock()
+ conn.calls[msg.serial] = call
+ conn.callsLck.Unlock()
+ conn.outLck.RLock()
+ if conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+ } else {
+ conn.outLck.RLock()
+ if conn.closed {
+ call = &Call{Err: ErrClosed}
+ } else {
+ conn.out <- msg
+ call = &Call{Err: nil}
+ }
+ conn.outLck.RUnlock()
+ }
+ return call
+}
+
+// sendError creates an error message corresponding to the parameters and sends
+// it to conn.out.
+func (conn *Conn) sendError(e Error, dest string, serial uint32) {
+ msg := new(Message)
+ msg.Type = TypeError
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldErrorName] = MakeVariant(e.Name)
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = e.Body
+ if len(e.Body) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// sendReply creates a method reply message corresponding to the parameters and
+// sends it to conn.out.
+func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
+ msg := new(Message)
+ msg.Type = TypeMethodReply
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// Signal registers the given channel to be passed all received signal messages.
+// The caller has to make sure that ch is sufficiently buffered; if a message
+// arrives when a write to c is not possible, it is discarded.
+//
+// Multiple of these channels can be registered at the same time.
+//
+// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
+// channel for eavesdropped messages, this channel receives all signals, and
+// none of the channels passed to Signal will receive any signals.
+func (conn *Conn) Signal(ch chan<- *Signal) {
+ conn.signalsLck.Lock()
+ conn.signals = append(conn.signals, ch)
+ conn.signalsLck.Unlock()
+}
+
+// RemoveSignal removes the given channel from the list of the registered channels.
+func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
+ conn.signalsLck.Lock()
+ for i := len(conn.signals) - 1; i >= 0; i-- {
+ if ch == conn.signals[i] {
+ copy(conn.signals[i:], conn.signals[i+1:])
+ conn.signals[len(conn.signals)-1] = nil
+ conn.signals = conn.signals[:len(conn.signals)-1]
+ }
+ }
+ conn.signalsLck.Unlock()
+}
+
+// SupportsUnixFDs returns whether the underlying transport supports passing of
+// unix file descriptors. If this is false, method calls containing unix file
+// descriptors will return an error and emitted signals containing them will
+// not be sent.
+func (conn *Conn) SupportsUnixFDs() bool {
+ return conn.unixFD
+}
+
+// Error represents a D-Bus message of type Error.
+type Error struct {
+ Name string
+ Body []interface{}
+}
+
+func NewError(name string, body []interface{}) *Error {
+ return &Error{name, body}
+}
+
+func (e Error) Error() string {
+ if len(e.Body) >= 1 {
+ s, ok := e.Body[0].(string)
+ if ok {
+ return s
+ }
+ }
+ return e.Name
+}
+
+// Signal represents a D-Bus message of type Signal. The name member is given in
+// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
+type Signal struct {
+ Sender string
+ Path ObjectPath
+ Name string
+ Body []interface{}
+}
+
+// transport is a D-Bus transport.
+type transport interface {
+ // Read and Write raw data (for example, for the authentication protocol).
+ io.ReadWriteCloser
+
+ // Send the initial null byte used for the EXTERNAL mechanism.
+ SendNullByte() error
+
+ // Returns whether this transport supports passing Unix FDs.
+ SupportsUnixFDs() bool
+
+ // Signal the transport that Unix FD passing is enabled for this connection.
+ EnableUnixFDs()
+
+ // Read / send a message, handling things like Unix FDs.
+ ReadMessage() (*Message, error)
+ SendMessage(*Message) error
+}
+
+var (
+ transports = make(map[string]func(string) (transport, error))
+)
+
+func getTransport(address string) (transport, error) {
+ var err error
+ var t transport
+
+ addresses := strings.Split(address, ";")
+ for _, v := range addresses {
+ i := strings.IndexRune(v, ':')
+ if i == -1 {
+ err = errors.New("dbus: invalid bus address (no transport)")
+ continue
+ }
+ f := transports[v[:i]]
+ if f == nil {
+ err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
+ continue
+ }
+ t, err = f(v[i+1:])
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, err
+}
+
+// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
+// of arbitrary types, containes the values that are obtained from dereferencing
+// all elements in vs.
+func dereferenceAll(vs []interface{}) []interface{} {
+ for i := range vs {
+ v := reflect.ValueOf(vs[i])
+ v = v.Elem()
+ vs[i] = v.Interface()
+ }
+ return vs
+}
+
+// getKey gets a key from a the list of keys. Returns "" on error / not found...
+func getKey(s, key string) string {
+ i := strings.Index(s, key)
+ if i == -1 {
+ return ""
+ }
+ if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
+ return ""
+ }
+ j := strings.Index(s, ",")
+ if j == -1 {
+ j = len(s)
+ }
+ return s[i+len(key)+1 : j]
+}
diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go
new file mode 100644
index 00000000..b67bb1b8
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_darwin.go
@@ -0,0 +1,21 @@
+package dbus
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial("unix:path=" + string(b[:len(b)-1]))
+}
diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go
new file mode 100644
index 00000000..f74b8758
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/conn_other.go
@@ -0,0 +1,27 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("dbus-launch")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ i := bytes.IndexByte(b, '=')
+ j := bytes.IndexByte(b, '\n')
+
+ if i == -1 || j == -1 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial(string(b[i+1 : j]))
+}
diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go
new file mode 100644
index 00000000..2ce68735
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/dbus.go
@@ -0,0 +1,258 @@
+package dbus
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ boolType = reflect.TypeOf(false)
+ uint8Type = reflect.TypeOf(uint8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ signatureType = reflect.TypeOf(Signature{""})
+ objectPathType = reflect.TypeOf(ObjectPath(""))
+ variantType = reflect.TypeOf(Variant{Signature{""}, nil})
+ interfacesType = reflect.TypeOf([]interface{}{})
+ unixFDType = reflect.TypeOf(UnixFD(0))
+ unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
+)
+
+// An InvalidTypeError signals that a value which cannot be represented in the
+// D-Bus wire format was passed to a function.
+type InvalidTypeError struct {
+ Type reflect.Type
+}
+
+func (e InvalidTypeError) Error() string {
+ return "dbus: invalid type " + e.Type.String()
+}
+
+// Store copies the values contained in src to dest, which must be a slice of
+// pointers. It converts slices of interfaces from src to corresponding structs
+// in dest. An error is returned if the lengths of src and dest or the types of
+// their elements don't match.
+func Store(src []interface{}, dest ...interface{}) error {
+ if len(src) != len(dest) {
+ return errors.New("dbus.Store: length mismatch")
+ }
+
+ for i := range src {
+ if err := store(src[i], dest[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func store(src, dest interface{}) error {
+ if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
+ reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
+ return nil
+ } else if hasStruct(dest) {
+ rv := reflect.ValueOf(dest).Elem()
+ switch rv.Kind() {
+ case reflect.Struct:
+ vs, ok := src.([]interface{})
+ if !ok {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ t := rv.Type()
+ ndest := make([]interface{}, 0, rv.NumField())
+ for i := 0; i < rv.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ ndest = append(ndest, rv.Field(i).Addr().Interface())
+ }
+ }
+ if len(vs) != len(ndest) {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ err := Store(vs, ndest...)
+ if err != nil {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ case reflect.Slice:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Slice {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
+ for i := 0; i < sv.Len(); i++ {
+ if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
+ return err
+ }
+ }
+ case reflect.Map:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Map {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ keys := sv.MapKeys()
+ rv.Set(reflect.MakeMap(sv.Type()))
+ for _, key := range keys {
+ v := reflect.New(sv.Type().Elem())
+ if err := store(v, sv.MapIndex(key).Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(key, v.Elem())
+ }
+ default:
+ return errors.New("dbus.Store: type mismatch")
+ }
+ return nil
+ } else {
+ return errors.New("dbus.Store: type mismatch")
+ }
+}
+
+func hasStruct(v interface{}) bool {
+ t := reflect.TypeOf(v)
+ for {
+ switch t.Kind() {
+ case reflect.Struct:
+ return true
+ case reflect.Slice, reflect.Ptr, reflect.Map:
+ t = t.Elem()
+ default:
+ return false
+ }
+ }
+}
+
+// An ObjectPath is an object path as defined by the D-Bus spec.
+type ObjectPath string
+
+// IsValid returns whether the object path is valid.
+func (o ObjectPath) IsValid() bool {
+ s := string(o)
+ if len(s) == 0 {
+ return false
+ }
+ if s[0] != '/' {
+ return false
+ }
+ if s[len(s)-1] == '/' && len(s) != 1 {
+ return false
+ }
+ // probably not used, but technically possible
+ if s == "/" {
+ return true
+ }
+ split := strings.Split(s[1:], "/")
+ for _, v := range split {
+ if len(v) == 0 {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
+// documentation for more information about Unix file descriptor passsing.
+type UnixFD int32
+
+// A UnixFDIndex is the representation of a Unix file descriptor in a message.
+type UnixFDIndex uint32
+
+// alignment returns the alignment of values of type t.
+func alignment(t reflect.Type) int {
+ switch t {
+ case variantType:
+ return 1
+ case objectPathType:
+ return 4
+ case signatureType:
+ return 1
+ case interfacesType: // sometimes used for structs
+ return 8
+ }
+ switch t.Kind() {
+ case reflect.Uint8:
+ return 1
+ case reflect.Uint16, reflect.Int16:
+ return 2
+ case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+ return 4
+ case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
+ return 8
+ case reflect.Ptr:
+ return alignment(t.Elem())
+ }
+ return 1
+}
+
+// isKeyType returns whether t is a valid type for a D-Bus dict.
+func isKeyType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
+ reflect.String:
+
+ return true
+ }
+ return false
+}
+
+// isValidInterface returns whether s is a valid name for an interface.
+func isValidInterface(s string) bool {
+ if len(s) == 0 || len(s) > 255 || s[0] == '.' {
+ return false
+ }
+ elem := strings.Split(s, ".")
+ if len(elem) < 2 {
+ return false
+ }
+ for _, v := range elem {
+ if len(v) == 0 {
+ return false
+ }
+ if v[0] >= '0' && v[0] <= '9' {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// isValidMember returns whether s is a valid name for a member.
+func isValidMember(s string) bool {
+ if len(s) == 0 || len(s) > 255 {
+ return false
+ }
+ i := strings.Index(s, ".")
+ if i != -1 {
+ return false
+ }
+ if s[0] >= '0' && s[0] <= '9' {
+ return false
+ }
+ for _, c := range s {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isMemberChar(c rune) bool {
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') || c == '_'
+}
diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go
new file mode 100644
index 00000000..ef50dcab
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/decoder.go
@@ -0,0 +1,228 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+type decoder struct {
+ in io.Reader
+ order binary.ByteOrder
+ pos int
+}
+
+// newDecoder returns a new decoder that reads values from in. The input is
+// expected to be in the given byte order.
+func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
+ dec := new(decoder)
+ dec.in = in
+ dec.order = order
+ return dec
+}
+
+// align aligns the input to the given boundary and panics on error.
+func (dec *decoder) align(n int) {
+ if dec.pos%n != 0 {
+ newpos := (dec.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-dec.pos)
+ if _, err := io.ReadFull(dec.in, empty); err != nil {
+ panic(err)
+ }
+ dec.pos = newpos
+ }
+}
+
+// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
+func (dec *decoder) binread(v interface{}) {
+ if err := binary.Read(dec.in, dec.order, v); err != nil {
+ panic(err)
+ }
+}
+
+func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
+ defer func() {
+ var ok bool
+ v := recover()
+ if err, ok = v.(error); ok {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ err = FormatError("unexpected EOF")
+ }
+ }
+ }()
+ vs = make([]interface{}, 0)
+ s := sig.str
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ v := dec.decode(s[:len(s)-len(rem)], 0)
+ vs = append(vs, v)
+ s = rem
+ }
+ return vs, nil
+}
+
+func (dec *decoder) decode(s string, depth int) interface{} {
+ dec.align(alignment(typeFor(s)))
+ switch s[0] {
+ case 'y':
+ var b [1]byte
+ if _, err := dec.in.Read(b[:]); err != nil {
+ panic(err)
+ }
+ dec.pos++
+ return b[0]
+ case 'b':
+ i := dec.decode("u", depth).(uint32)
+ switch {
+ case i == 0:
+ return false
+ case i == 1:
+ return true
+ default:
+ panic(FormatError("invalid value for boolean"))
+ }
+ case 'n':
+ var i int16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'i':
+ var i int32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 'x':
+ var i int64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'q':
+ var i uint16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'u':
+ var i uint32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 't':
+ var i uint64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'd':
+ var f float64
+ dec.binread(&f)
+ dec.pos += 8
+ return f
+ case 's':
+ length := dec.decode("u", depth).(uint32)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ return string(b[:len(b)-1])
+ case 'o':
+ return ObjectPath(dec.decode("s", depth).(string))
+ case 'g':
+ length := dec.decode("y", depth).(byte)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ sig, err := ParseSignature(string(b[:len(b)-1]))
+ if err != nil {
+ panic(err)
+ }
+ return sig
+ case 'v':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var variant Variant
+ sig := dec.decode("g", depth).(Signature)
+ if len(sig.str) == 0 {
+ panic(FormatError("variant signature is empty"))
+ }
+ err, rem := validSingle(sig.str, 0)
+ if err != nil {
+ panic(err)
+ }
+ if rem != "" {
+ panic(FormatError("variant signature has multiple types"))
+ }
+ variant.sig = sig
+ variant.value = dec.decode(sig.str, depth+1)
+ return variant
+ case 'h':
+ return UnixFDIndex(dec.decode("u", depth).(uint32))
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ ksig := s[2:3]
+ vsig := s[3 : len(s)-1]
+ v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ // Even for empty maps, the correct padding must be included
+ dec.align(8)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ dec.align(8)
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ kv := dec.decode(ksig, depth+2)
+ vv := dec.decode(vsig, depth+2)
+ v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return v.Interface()
+ }
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
+ // Even for empty arrays, the correct padding must be included
+ dec.align(alignment(typeFor(s[1:])))
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ ev := dec.decode(s[1:], depth+1)
+ v = reflect.Append(v, reflect.ValueOf(ev))
+ }
+ return v.Interface()
+ case '(':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ dec.align(8)
+ v := make([]interface{}, 0)
+ s = s[1 : len(s)-1]
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+ ev := dec.decode(s[:len(s)-len(rem)], depth+1)
+ v = append(v, ev)
+ s = rem
+ }
+ return v
+ default:
+ panic(SignatureError{Sig: s})
+ }
+}
+
+// A FormatError is an error in the wire format.
+type FormatError string
+
+func (e FormatError) Error() string {
+ return "dbus: wire format error: " + string(e)
+}
diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go
new file mode 100644
index 00000000..deff554a
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/doc.go
@@ -0,0 +1,63 @@
+/*
+Package dbus implements bindings to the D-Bus message bus system.
+
+To use the message bus API, you first need to connect to a bus (usually the
+session or system bus). The acquired connection then can be used to call methods
+on remote objects and emit or receive signals. Using the Export method, you can
+arrange D-Bus methods calls to be directly translated to method calls on a Go
+value.
+
+Conversion Rules
+
+For outgoing messages, Go types are automatically converted to the
+corresponding D-Bus types. The following types are directly encoded as their
+respective D-Bus equivalents:
+
+ Go type | D-Bus type
+ ------------+-----------
+ byte | BYTE
+ bool | BOOLEAN
+ int16 | INT16
+ uint16 | UINT16
+ int32 | INT32
+ uint32 | UINT32
+ int64 | INT64
+ uint64 | UINT64
+ float64 | DOUBLE
+ string | STRING
+ ObjectPath | OBJECT_PATH
+ Signature | SIGNATURE
+ Variant | VARIANT
+ UnixFDIndex | UNIX_FD
+
+Slices and arrays encode as ARRAYs of their element type.
+
+Maps encode as DICTs, provided that their key type can be used as a key for
+a DICT.
+
+Structs other than Variant and Signature encode as a STRUCT containing their
+exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
+be skipped.
+
+Pointers encode as the value they're pointed to.
+
+Trying to encode any other type or a slice, map or struct containing an
+unsupported type will result in an InvalidTypeError.
+
+For incoming messages, the inverse of these rules are used, with the exception
+of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
+containing the struct fields in the correct order. The Store function can be
+used to convert such values to Go structs.
+
+Unix FD passing
+
+Handling Unix file descriptors deserves special mention. To use them, you should
+first check that they are supported on a connection by calling SupportsUnixFDs.
+If it returns true, all method of Connection will translate messages containing
+UnixFD's to messages that are accompanied by the given file descriptors with the
+UnixFD values being substituted by the correct indices. Similarily, the indices
+of incoming messages are automatically resolved. It shouldn't be necessary to use
+UnixFDIndex.
+
+*/
+package dbus
diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go
new file mode 100644
index 00000000..9f0a9e89
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/encoder.go
@@ -0,0 +1,208 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+// An encoder encodes values to the D-Bus wire format.
+type encoder struct {
+ out io.Writer
+ order binary.ByteOrder
+ pos int
+}
+
+// NewEncoder returns a new encoder that writes to out in the given byte order.
+func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
+ return newEncoderAtOffset(out, 0, order)
+}
+
+// newEncoderAtOffset returns a new encoder that writes to out in the given
+// byte order. Specify the offset to initialize pos for proper alignment
+// computation.
+func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
+ enc := new(encoder)
+ enc.out = out
+ enc.order = order
+ enc.pos = offset
+ return enc
+}
+
+// Aligns the next output to be on a multiple of n. Panics on write errors.
+func (enc *encoder) align(n int) {
+ pad := enc.padding(0, n)
+ if pad > 0 {
+ empty := make([]byte, pad)
+ if _, err := enc.out.Write(empty); err != nil {
+ panic(err)
+ }
+ enc.pos += pad
+ }
+}
+
+// pad returns the number of bytes of padding, based on current position and additional offset.
+// and alignment.
+func (enc *encoder) padding(offset, algn int) int {
+ abs := enc.pos + offset
+ if abs%algn != 0 {
+ newabs := (abs + algn - 1) & ^(algn - 1)
+ return newabs - abs
+ }
+ return 0
+}
+
+// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
+func (enc *encoder) binwrite(v interface{}) {
+ if err := binary.Write(enc.out, enc.order, v); err != nil {
+ panic(err)
+ }
+}
+
+// Encode encodes the given values to the underyling reader. All written values
+// are aligned properly as required by the D-Bus spec.
+func (enc *encoder) Encode(vs ...interface{}) (err error) {
+ defer func() {
+ err, _ = recover().(error)
+ }()
+ for _, v := range vs {
+ enc.encode(reflect.ValueOf(v), 0)
+ }
+ return nil
+}
+
+// encode encodes the given value to the writer and panics on error. depth holds
+// the depth of the container nesting.
+func (enc *encoder) encode(v reflect.Value, depth int) {
+ enc.align(alignment(v.Type()))
+ switch v.Kind() {
+ case reflect.Uint8:
+ var b [1]byte
+ b[0] = byte(v.Uint())
+ if _, err := enc.out.Write(b[:]); err != nil {
+ panic(err)
+ }
+ enc.pos++
+ case reflect.Bool:
+ if v.Bool() {
+ enc.encode(reflect.ValueOf(uint32(1)), depth)
+ } else {
+ enc.encode(reflect.ValueOf(uint32(0)), depth)
+ }
+ case reflect.Int16:
+ enc.binwrite(int16(v.Int()))
+ enc.pos += 2
+ case reflect.Uint16:
+ enc.binwrite(uint16(v.Uint()))
+ enc.pos += 2
+ case reflect.Int32:
+ enc.binwrite(int32(v.Int()))
+ enc.pos += 4
+ case reflect.Uint32:
+ enc.binwrite(uint32(v.Uint()))
+ enc.pos += 4
+ case reflect.Int64:
+ enc.binwrite(v.Int())
+ enc.pos += 8
+ case reflect.Uint64:
+ enc.binwrite(v.Uint())
+ enc.pos += 8
+ case reflect.Float64:
+ enc.binwrite(v.Float())
+ enc.pos += 8
+ case reflect.String:
+ enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
+ b := make([]byte, v.Len()+1)
+ copy(b, v.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case reflect.Ptr:
+ enc.encode(v.Elem(), depth)
+ case reflect.Slice, reflect.Array:
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus alignment for elements.
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+
+ for i := 0; i < v.Len(); i++ {
+ bufenc.encode(v.Index(i), depth+1)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(alignment(v.Type().Elem()))
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Struct:
+ if depth >= 64 && v.Type() != signatureType {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ switch t := v.Type(); t {
+ case signatureType:
+ str := v.Field(0)
+ enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
+ b := make([]byte, str.Len()+1)
+ copy(b, str.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case variantType:
+ variant := v.Interface().(Variant)
+ enc.encode(reflect.ValueOf(variant.sig), depth+1)
+ enc.encode(reflect.ValueOf(variant.value), depth+1)
+ default:
+ for i := 0; i < v.Type().NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ enc.encode(v.Field(i), depth+1)
+ }
+ }
+ }
+ case reflect.Map:
+ // Maps are arrays of structures, so they actually increase the depth by
+ // 2.
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ keys := v.MapKeys()
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus 8-byte alignment
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, 8)
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+ for _, k := range keys {
+ bufenc.align(8)
+ bufenc.encode(k, depth+2)
+ bufenc.encode(v.MapIndex(k), depth+2)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(8)
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ default:
+ panic(InvalidTypeError{v.Type()})
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go
new file mode 100644
index 00000000..c6440a74
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/export.go
@@ -0,0 +1,411 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ errmsgInvalidArg = Error{
+ "org.freedesktop.DBus.Error.InvalidArgs",
+ []interface{}{"Invalid type / number of args"},
+ }
+ errmsgNoObject = Error{
+ "org.freedesktop.DBus.Error.NoSuchObject",
+ []interface{}{"No such object"},
+ }
+ errmsgUnknownMethod = Error{
+ "org.freedesktop.DBus.Error.UnknownMethod",
+ []interface{}{"Unknown / invalid method"},
+ }
+)
+
+// exportWithMapping represents an exported struct along with a method name
+// mapping to allow for exporting lower-case methods, etc.
+type exportWithMapping struct {
+ export interface{}
+
+ // Method name mapping; key -> struct method, value -> dbus method.
+ mapping map[string]string
+
+ // Whether or not this export is for the entire subtree
+ includeSubtree bool
+}
+
+// Sender is a type which can be used in exported methods to receive the message
+// sender.
+type Sender string
+
+func exportedMethod(export exportWithMapping, name string) reflect.Value {
+ if export.export == nil {
+ return reflect.Value{}
+ }
+
+ // If a mapping was included in the export, check the map to see if we
+ // should be looking for a different method in the export.
+ if export.mapping != nil {
+ for key, value := range export.mapping {
+ if value == name {
+ name = key
+ break
+ }
+
+ // Catch the case where a method is aliased but the client is calling
+ // the original, e.g. the "Foo" method was exported mapped to
+ // "foo," and dbus client called the original "Foo."
+ if key == name {
+ return reflect.Value{}
+ }
+ }
+ }
+
+ value := reflect.ValueOf(export.export)
+ m := value.MethodByName(name)
+
+ // Catch the case of attempting to call an unexported method
+ method, ok := value.Type().MethodByName(name)
+
+ if !m.IsValid() || !ok || method.PkgPath != "" {
+ return reflect.Value{}
+ }
+ t := m.Type()
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
+
+ return reflect.Value{}
+ }
+ return m
+}
+
+// searchHandlers will look through all registered handlers looking for one
+// to handle the given path. If a verbatim one isn't found, it will check for
+// a subtree registration for the path as well.
+func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
+ conn.handlersLck.RLock()
+ defer conn.handlersLck.RUnlock()
+
+ handlers, ok := conn.handlers[path]
+ if ok {
+ return handlers, ok
+ }
+
+ // If handlers weren't found for this exact path, look for a matching subtree
+ // registration
+ handlers = make(map[string]exportWithMapping)
+ path = path[:strings.LastIndex(string(path), "/")]
+ for len(path) > 0 {
+ var subtreeHandlers map[string]exportWithMapping
+ subtreeHandlers, ok = conn.handlers[path]
+ if ok {
+ for iface, handler := range subtreeHandlers {
+ // Only include this handler if it registered for the subtree
+ if handler.includeSubtree {
+ handlers[iface] = handler
+ }
+ }
+
+ break
+ }
+
+ path = path[:strings.LastIndex(string(path), "/")]
+ }
+
+ return handlers, ok
+}
+
+// handleCall handles the given method call (i.e. looks if it's one of the
+// pre-implemented ones and searches for a corresponding handler if not).
+func (conn *Conn) handleCall(msg *Message) {
+ name := msg.Headers[FieldMember].value.(string)
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
+ sender, hasSender := msg.Headers[FieldSender].value.(string)
+ serial := msg.serial
+ if ifaceName == "org.freedesktop.DBus.Peer" {
+ switch name {
+ case "Ping":
+ conn.sendReply(sender, serial)
+ case "GetMachineId":
+ conn.sendReply(sender, serial, conn.uuid)
+ default:
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+ return
+ }
+ if len(name) == 0 {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+
+ // Find the exported handler (if any) for this path
+ handlers, ok := conn.searchHandlers(path)
+ if !ok {
+ conn.sendError(errmsgNoObject, sender, serial)
+ return
+ }
+
+ var m reflect.Value
+ if hasIface {
+ iface := handlers[ifaceName]
+ m = exportedMethod(iface, name)
+ } else {
+ for _, v := range handlers {
+ m = exportedMethod(v, name)
+ if m.IsValid() {
+ break
+ }
+ }
+ }
+
+ if !m.IsValid() {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ return
+ }
+
+ t := m.Type()
+ vs := msg.Body
+ pointers := make([]interface{}, t.NumIn())
+ decode := make([]interface{}, 0, len(vs))
+ for i := 0; i < t.NumIn(); i++ {
+ tp := t.In(i)
+ val := reflect.New(tp)
+ pointers[i] = val.Interface()
+ if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
+ val.Elem().SetString(sender)
+ } else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
+ val.Elem().Set(reflect.ValueOf(*msg))
+ } else {
+ decode = append(decode, pointers[i])
+ }
+ }
+
+ if len(decode) != len(vs) {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+
+ if err := Store(vs, decode...); err != nil {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+
+ // Extract parameters
+ params := make([]reflect.Value, len(pointers))
+ for i := 0; i < len(pointers); i++ {
+ params[i] = reflect.ValueOf(pointers[i]).Elem()
+ }
+
+ // Call method
+ ret := m.Call(params)
+ if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
+ conn.sendError(*em, sender, serial)
+ return
+ }
+
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ reply := new(Message)
+ reply.Type = TypeMethodReply
+ reply.serial = conn.getSerial()
+ reply.Headers = make(map[HeaderField]Variant)
+ if hasSender {
+ reply.Headers[FieldDestination] = msg.Headers[FieldSender]
+ }
+ reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
+ reply.Body = make([]interface{}, len(ret)-1)
+ for i := 0; i < len(ret)-1; i++ {
+ reply.Body[i] = ret[i].Interface()
+ }
+ if len(ret) != 1 {
+ reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- reply
+ }
+ conn.outLck.RUnlock()
+ }
+}
+
+// Emit emits the given signal on the message bus. The name parameter must be
+// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
+func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid object path")
+ }
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return errors.New("dbus: invalid method name")
+ }
+ iface := name[:i]
+ member := name[i+1:]
+ if !isValidMember(member) {
+ return errors.New("dbus: invalid method name")
+ }
+ if !isValidInterface(iface) {
+ return errors.New("dbus: invalid interface name")
+ }
+ msg := new(Message)
+ msg.Type = TypeSignal
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ msg.Headers[FieldMember] = MakeVariant(member)
+ msg.Headers[FieldPath] = MakeVariant(path)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ defer conn.outLck.RUnlock()
+ if conn.closed {
+ return ErrClosed
+ }
+ conn.out <- msg
+ return nil
+}
+
+// Export registers the given value to be exported as an object on the
+// message bus.
+//
+// If a method call on the given path and interface is received, an exported
+// method with the same name is called with v as the receiver if the
+// parameters match and the last return value is of type *Error. If this
+// *Error is not nil, it is sent back to the caller as an error.
+// Otherwise, a method reply is sent with the other return values as its body.
+//
+// Any parameters with the special type Sender are set to the sender of the
+// dbus message when the method is called. Parameters of this type do not
+// contribute to the dbus signature of the method (i.e. the method is exposed
+// as if the parameters of type Sender were not there).
+//
+// Similarly, any parameters with the type Message are set to the raw message
+// received on the bus. Again, parameters of this type do not contribute to the
+// dbus signature of the method.
+//
+// Every method call is executed in a new goroutine, so the method may be called
+// in multiple goroutines at once.
+//
+// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
+// handled for every object.
+//
+// Passing nil as the first parameter will cause conn to cease handling calls on
+// the given combination of path and interface.
+//
+// Export returns an error if path is not a valid path name.
+func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportWithMap(v, nil, path, iface)
+}
+
+// ExportWithMap works exactly like Export but provides the ability to remap
+// method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.exportWithMap(v, mapping, path, iface, false)
+}
+
+// ExportSubtree works exactly like Export but registers the given value for
+// an entire subtree rather under the root path provided.
+//
+// In order to make this useful, one parameter in each of the value's exported
+// methods should be a Message, in which case it will contain the raw message
+// (allowing one to get access to the path that caused the method to be called).
+//
+// Note that more specific export paths take precedence over less specific. For
+// example, a method call using the ObjectPath /foo/bar/baz will call a method
+// exported on /foo/bar before a method exported on /foo.
+func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportSubtreeWithMap(v, nil, path, iface)
+}
+
+// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
+// ability to remap method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.exportWithMap(v, mapping, path, iface, true)
+}
+
+// exportWithMap is the worker function for all exports/registrations.
+func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error {
+ if !path.IsValid() {
+ return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
+ }
+
+ conn.handlersLck.Lock()
+ defer conn.handlersLck.Unlock()
+
+ // Remove a previous export if the interface is nil
+ if v == nil {
+ if _, ok := conn.handlers[path]; ok {
+ delete(conn.handlers[path], iface)
+ if len(conn.handlers[path]) == 0 {
+ delete(conn.handlers, path)
+ }
+ }
+
+ return nil
+ }
+
+ // If this is the first handler for this path, make a new map to hold all
+ // handlers for this path.
+ if _, ok := conn.handlers[path]; !ok {
+ conn.handlers[path] = make(map[string]exportWithMapping)
+ }
+
+ // Finally, save this handler
+ conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree}
+
+ return nil
+}
+
+// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
+func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return ReleaseNameReply(r), nil
+}
+
+// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
+func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return RequestNameReply(r), nil
+}
+
+// ReleaseNameReply is the reply to a ReleaseName call.
+type ReleaseNameReply uint32
+
+const (
+ ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
+ ReleaseNameReplyNonExistent
+ ReleaseNameReplyNotOwner
+)
+
+// RequestNameFlags represents the possible flags for a RequestName call.
+type RequestNameFlags uint32
+
+const (
+ NameFlagAllowReplacement RequestNameFlags = 1 << iota
+ NameFlagReplaceExisting
+ NameFlagDoNotQueue
+)
+
+// RequestNameReply is the reply to a RequestName call.
+type RequestNameReply uint32
+
+const (
+ RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
+ RequestNameReplyInQueue
+ RequestNameReplyExists
+ RequestNameReplyAlreadyOwner
+)
diff --git a/vendor/github.com/godbus/dbus/homedir.go b/vendor/github.com/godbus/dbus/homedir.go
new file mode 100644
index 00000000..0b745f93
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir.go
@@ -0,0 +1,28 @@
+package dbus
+
+import (
+ "os"
+ "sync"
+)
+
+var (
+ homeDir string
+ homeDirLock sync.Mutex
+)
+
+func getHomeDir() string {
+ homeDirLock.Lock()
+ defer homeDirLock.Unlock()
+
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = os.Getenv("HOME")
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = lookupHomeDir()
+ return homeDir
+}
diff --git a/vendor/github.com/godbus/dbus/homedir_dynamic.go b/vendor/github.com/godbus/dbus/homedir_dynamic.go
new file mode 100644
index 00000000..2732081e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir_dynamic.go
@@ -0,0 +1,15 @@
+// +build !static_build
+
+package dbus
+
+import (
+ "os/user"
+)
+
+func lookupHomeDir() string {
+ u, err := user.Current()
+ if err != nil {
+ return "/"
+ }
+ return u.HomeDir
+}
diff --git a/vendor/github.com/godbus/dbus/homedir_static.go b/vendor/github.com/godbus/dbus/homedir_static.go
new file mode 100644
index 00000000..b9d9cb55
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/homedir_static.go
@@ -0,0 +1,45 @@
+// +build static_build
+
+package dbus
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func lookupHomeDir() string {
+ myUid := os.Getuid()
+
+ f, err := os.Open("/etc/passwd")
+ if err != nil {
+ return "/"
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ break
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Split(line, ":")
+
+ if len(parts) >= 6 {
+ uid, err := strconv.Atoi(parts[2])
+ if err == nil && uid == myUid {
+ return parts[5]
+ }
+ }
+ }
+
+ // Default to / if we can't get a better value
+ return "/"
+}
diff --git a/vendor/github.com/godbus/dbus/message.go b/vendor/github.com/godbus/dbus/message.go
new file mode 100644
index 00000000..075d6e38
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/message.go
@@ -0,0 +1,346 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+const protoVersion byte = 1
+
+// Flags represents the possible flags of a D-Bus message.
+type Flags byte
+
+const (
+ // FlagNoReplyExpected signals that the message is not expected to generate
+ // a reply. If this flag is set on outgoing messages, any possible reply
+ // will be discarded.
+ FlagNoReplyExpected Flags = 1 << iota
+ // FlagNoAutoStart signals that the message bus should not automatically
+ // start an application when handling this message.
+ FlagNoAutoStart
+)
+
+// Type represents the possible types of a D-Bus message.
+type Type byte
+
+const (
+ TypeMethodCall Type = 1 + iota
+ TypeMethodReply
+ TypeError
+ TypeSignal
+ typeMax
+)
+
+func (t Type) String() string {
+ switch t {
+ case TypeMethodCall:
+ return "method call"
+ case TypeMethodReply:
+ return "reply"
+ case TypeError:
+ return "error"
+ case TypeSignal:
+ return "signal"
+ }
+ return "invalid"
+}
+
+// HeaderField represents the possible byte codes for the headers
+// of a D-Bus message.
+type HeaderField byte
+
+const (
+ FieldPath HeaderField = 1 + iota
+ FieldInterface
+ FieldMember
+ FieldErrorName
+ FieldReplySerial
+ FieldDestination
+ FieldSender
+ FieldSignature
+ FieldUnixFDs
+ fieldMax
+)
+
+// An InvalidMessageError describes the reason why a D-Bus message is regarded as
+// invalid.
+type InvalidMessageError string
+
+func (e InvalidMessageError) Error() string {
+ return "dbus: invalid message: " + string(e)
+}
+
+// fieldType are the types of the various header fields.
+var fieldTypes = [fieldMax]reflect.Type{
+ FieldPath: objectPathType,
+ FieldInterface: stringType,
+ FieldMember: stringType,
+ FieldErrorName: stringType,
+ FieldReplySerial: uint32Type,
+ FieldDestination: stringType,
+ FieldSender: stringType,
+ FieldSignature: signatureType,
+ FieldUnixFDs: uint32Type,
+}
+
+// requiredFields lists the header fields that are required by the different
+// message types.
+var requiredFields = [typeMax][]HeaderField{
+ TypeMethodCall: {FieldPath, FieldMember},
+ TypeMethodReply: {FieldReplySerial},
+ TypeError: {FieldErrorName, FieldReplySerial},
+ TypeSignal: {FieldPath, FieldInterface, FieldMember},
+}
+
+// Message represents a single D-Bus message.
+type Message struct {
+ Type
+ Flags
+ Headers map[HeaderField]Variant
+ Body []interface{}
+
+ serial uint32
+}
+
+type header struct {
+ Field byte
+ Variant
+}
+
+// DecodeMessage tries to decode a single message in the D-Bus wire format
+// from the given reader. The byte order is figured out from the first byte.
+// The possibly returned error can be an error of the underlying reader, an
+// InvalidMessageError or a FormatError.
+func DecodeMessage(rd io.Reader) (msg *Message, err error) {
+ var order binary.ByteOrder
+ var hlength, length uint32
+ var typ, flags, proto byte
+ var headers []header
+
+ b := make([]byte, 1)
+ _, err = rd.Read(b)
+ if err != nil {
+ return
+ }
+ switch b[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+
+ dec := newDecoder(rd, order)
+ dec.pos = 1
+
+ msg = new(Message)
+ vs, err := dec.Decode(Signature{"yyyuu"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
+ return nil, err
+ }
+ msg.Type = Type(typ)
+ msg.Flags = Flags(flags)
+
+ // get the header length separately because we need it later
+ b = make([]byte, 4)
+ _, err = io.ReadFull(rd, b)
+ if err != nil {
+ return nil, err
+ }
+ binary.Read(bytes.NewBuffer(b), order, &hlength)
+ if hlength+length+16 > 1<<27 {
+ return nil, InvalidMessageError("message is too long")
+ }
+ dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
+ dec.pos = 12
+ vs, err = dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &headers); err != nil {
+ return nil, err
+ }
+
+ msg.Headers = make(map[HeaderField]Variant)
+ for _, v := range headers {
+ msg.Headers[HeaderField(v.Field)] = v.Variant
+ }
+
+ dec.align(8)
+ body := make([]byte, int(length))
+ if length != 0 {
+ _, err := io.ReadFull(rd, body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err = msg.IsValid(); err != nil {
+ return nil, err
+ }
+ sig, _ := msg.Headers[FieldSignature].value.(Signature)
+ if sig.str != "" {
+ buf := bytes.NewBuffer(body)
+ dec = newDecoder(buf, order)
+ vs, err := dec.Decode(sig)
+ if err != nil {
+ return nil, err
+ }
+ msg.Body = vs
+ }
+
+ return
+}
+
+// EncodeTo encodes and sends a message to the given writer. The byte order must
+// be either binary.LittleEndian or binary.BigEndian. If the message is not
+// valid or an error occurs when writing, an error is returned.
+func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
+ var vs [7]interface{}
+ switch order {
+ case binary.LittleEndian:
+ vs[0] = byte('l')
+ case binary.BigEndian:
+ vs[0] = byte('B')
+ default:
+ return errors.New("dbus: invalid byte order")
+ }
+ body := new(bytes.Buffer)
+ enc := newEncoder(body, order)
+ if len(msg.Body) != 0 {
+ enc.Encode(msg.Body...)
+ }
+ vs[1] = msg.Type
+ vs[2] = msg.Flags
+ vs[3] = protoVersion
+ vs[4] = uint32(len(body.Bytes()))
+ vs[5] = msg.serial
+ headers := make([]header, 0, len(msg.Headers))
+ for k, v := range msg.Headers {
+ headers = append(headers, header{byte(k), v})
+ }
+ vs[6] = headers
+ var buf bytes.Buffer
+ enc = newEncoder(&buf, order)
+ enc.Encode(vs[:]...)
+ enc.align(8)
+ body.WriteTo(&buf)
+ if buf.Len() > 1<<27 {
+ return InvalidMessageError("message is too long")
+ }
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsValid checks whether msg is a valid message and returns an
+// InvalidMessageError if it is not.
+func (msg *Message) IsValid() error {
+ if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
+ return InvalidMessageError("invalid flags")
+ }
+ if msg.Type == 0 || msg.Type >= typeMax {
+ return InvalidMessageError("invalid message type")
+ }
+ for k, v := range msg.Headers {
+ if k == 0 || k >= fieldMax {
+ return InvalidMessageError("invalid header")
+ }
+ if reflect.TypeOf(v.value) != fieldTypes[k] {
+ return InvalidMessageError("invalid type of header field")
+ }
+ }
+ for _, v := range requiredFields[msg.Type] {
+ if _, ok := msg.Headers[v]; !ok {
+ return InvalidMessageError("missing required header")
+ }
+ }
+ if path, ok := msg.Headers[FieldPath]; ok {
+ if !path.value.(ObjectPath).IsValid() {
+ return InvalidMessageError("invalid path name")
+ }
+ }
+ if iface, ok := msg.Headers[FieldInterface]; ok {
+ if !isValidInterface(iface.value.(string)) {
+ return InvalidMessageError("invalid interface name")
+ }
+ }
+ if member, ok := msg.Headers[FieldMember]; ok {
+ if !isValidMember(member.value.(string)) {
+ return InvalidMessageError("invalid member name")
+ }
+ }
+ if errname, ok := msg.Headers[FieldErrorName]; ok {
+ if !isValidInterface(errname.value.(string)) {
+ return InvalidMessageError("invalid error name")
+ }
+ }
+ if len(msg.Body) != 0 {
+ if _, ok := msg.Headers[FieldSignature]; !ok {
+ return InvalidMessageError("missing signature")
+ }
+ }
+ return nil
+}
+
+// Serial returns the message's serial number. The returned value is only valid
+// for messages received by eavesdropping.
+func (msg *Message) Serial() uint32 {
+ return msg.serial
+}
+
+// String returns a string representation of a message similar to the format of
+// dbus-monitor.
+func (msg *Message) String() string {
+ if err := msg.IsValid(); err != nil {
+ return ""
+ }
+ s := msg.Type.String()
+ if v, ok := msg.Headers[FieldSender]; ok {
+ s += " from " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldDestination]; ok {
+ s += " to " + v.value.(string)
+ }
+ s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
+ if v, ok := msg.Headers[FieldReplySerial]; ok {
+ s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldUnixFDs]; ok {
+ s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldPath]; ok {
+ s += " path " + string(v.value.(ObjectPath))
+ }
+ if v, ok := msg.Headers[FieldInterface]; ok {
+ s += " interface " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldErrorName]; ok {
+ s += " error " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldMember]; ok {
+ s += " member " + v.value.(string)
+ }
+ if len(msg.Body) != 0 {
+ s += "\n"
+ }
+ for i, v := range msg.Body {
+ s += " " + MakeVariant(v).String()
+ if i != len(msg.Body)-1 {
+ s += "\n"
+ }
+ }
+ return s
+}
diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go
new file mode 100644
index 00000000..9573b709
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/object.go
@@ -0,0 +1,136 @@
+package dbus
+
+import (
+ "errors"
+ "strings"
+)
+
+// BusObject is the interface of a remote object on which methods can be
+// invoked.
+type BusObject interface {
+ Call(method string, flags Flags, args ...interface{}) *Call
+ Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ GetProperty(p string) (Variant, error)
+ Destination() string
+ Path() ObjectPath
+}
+
+// Object represents a remote object on which methods can be invoked.
+type Object struct {
+ conn *Conn
+ dest string
+ path ObjectPath
+}
+
+// Call calls a method with (*Object).Go and waits for its reply.
+func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
+ return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
+}
+
+// AddMatchSignal subscribes BusObject to signals from specified interface and
+// method (member).
+func (o *Object) AddMatchSignal(iface, member string) *Call {
+ return o.Call(
+ "org.freedesktop.DBus.AddMatch",
+ 0,
+ "type='signal',interface='"+iface+"',member='"+member+"'",
+ )
+}
+
+// Go calls a method with the given arguments asynchronously. It returns a
+// Call structure representing this method call. The passed channel will
+// return the same value once the call is done. If ch is nil, a new channel
+// will be allocated. Otherwise, ch has to be buffered or Go will panic.
+//
+// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
+// is returned of which only the Err member is valid.
+//
+// If the method parameter contains a dot ('.'), the part before the last dot
+// specifies the interface on which the method is called.
+func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ iface := ""
+ i := strings.LastIndex(method, ".")
+ if i != -1 {
+ iface = method[:i]
+ }
+ method = method[i+1:]
+ msg := new(Message)
+ msg.Type = TypeMethodCall
+ msg.serial = o.conn.getSerial()
+ msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldPath] = MakeVariant(o.path)
+ msg.Headers[FieldDestination] = MakeVariant(o.dest)
+ msg.Headers[FieldMember] = MakeVariant(method)
+ if iface != "" {
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ }
+ msg.Body = args
+ if len(args) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 10)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Object).Go")
+ }
+ call := &Call{
+ Destination: o.dest,
+ Path: o.path,
+ Method: method,
+ Args: args,
+ Done: ch,
+ }
+ o.conn.callsLck.Lock()
+ o.conn.calls[msg.serial] = call
+ o.conn.callsLck.Unlock()
+ o.conn.outLck.RLock()
+ if o.conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ o.conn.out <- msg
+ }
+ o.conn.outLck.RUnlock()
+ return call
+ }
+ o.conn.outLck.RLock()
+ defer o.conn.outLck.RUnlock()
+ if o.conn.closed {
+ return &Call{Err: ErrClosed}
+ }
+ o.conn.out <- msg
+ return &Call{Err: nil}
+}
+
+// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) GetProperty(p string) (Variant, error) {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return Variant{}, errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ result := Variant{}
+ err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
+
+ if err != nil {
+ return Variant{}, err
+ }
+
+ return result, nil
+}
+
+// Destination returns the destination that calls on o are sent to.
+func (o *Object) Destination() string {
+ return o.dest
+}
+
+// Path returns the path that calls on o are sent to.
+func (o *Object) Path() ObjectPath {
+ return o.path
+}
diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go
new file mode 100644
index 00000000..f45b53ce
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/sig.go
@@ -0,0 +1,257 @@
+package dbus
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var sigToType = map[byte]reflect.Type{
+ 'y': byteType,
+ 'b': boolType,
+ 'n': int16Type,
+ 'q': uint16Type,
+ 'i': int32Type,
+ 'u': uint32Type,
+ 'x': int64Type,
+ 't': uint64Type,
+ 'd': float64Type,
+ 's': stringType,
+ 'g': signatureType,
+ 'o': objectPathType,
+ 'v': variantType,
+ 'h': unixFDIndexType,
+}
+
+// Signature represents a correct type signature as specified by the D-Bus
+// specification. The zero value represents the empty signature, "".
+type Signature struct {
+ str string
+}
+
+// SignatureOf returns the concatenation of all the signatures of the given
+// values. It panics if one of them is not representable in D-Bus.
+func SignatureOf(vs ...interface{}) Signature {
+ var s string
+ for _, v := range vs {
+ s += getSignature(reflect.TypeOf(v))
+ }
+ return Signature{s}
+}
+
+// SignatureOfType returns the signature of the given type. It panics if the
+// type is not representable in D-Bus.
+func SignatureOfType(t reflect.Type) Signature {
+ return Signature{getSignature(t)}
+}
+
+// getSignature returns the signature of the given type and panics on unknown types.
+func getSignature(t reflect.Type) string {
+ // handle simple types first
+ switch t.Kind() {
+ case reflect.Uint8:
+ return "y"
+ case reflect.Bool:
+ return "b"
+ case reflect.Int16:
+ return "n"
+ case reflect.Uint16:
+ return "q"
+ case reflect.Int32:
+ if t == unixFDType {
+ return "h"
+ }
+ return "i"
+ case reflect.Uint32:
+ if t == unixFDIndexType {
+ return "h"
+ }
+ return "u"
+ case reflect.Int64:
+ return "x"
+ case reflect.Uint64:
+ return "t"
+ case reflect.Float64:
+ return "d"
+ case reflect.Ptr:
+ return getSignature(t.Elem())
+ case reflect.String:
+ if t == objectPathType {
+ return "o"
+ }
+ return "s"
+ case reflect.Struct:
+ if t == variantType {
+ return "v"
+ } else if t == signatureType {
+ return "g"
+ }
+ var s string
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ s += getSignature(t.Field(i).Type)
+ }
+ }
+ return "(" + s + ")"
+ case reflect.Array, reflect.Slice:
+ return "a" + getSignature(t.Elem())
+ case reflect.Map:
+ if !isKeyType(t.Key()) {
+ panic(InvalidTypeError{t})
+ }
+ return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
+ }
+ panic(InvalidTypeError{t})
+}
+
+// ParseSignature returns the signature represented by this string, or a
+// SignatureError if the string is not a valid signature.
+func ParseSignature(s string) (sig Signature, err error) {
+ if len(s) == 0 {
+ return
+ }
+ if len(s) > 255 {
+ return Signature{""}, SignatureError{s, "too long"}
+ }
+ sig.str = s
+ for err == nil && len(s) != 0 {
+ err, s = validSingle(s, 0)
+ }
+ if err != nil {
+ sig = Signature{""}
+ }
+
+ return
+}
+
+// ParseSignatureMust behaves like ParseSignature, except that it panics if s
+// is not valid.
+func ParseSignatureMust(s string) Signature {
+ sig, err := ParseSignature(s)
+ if err != nil {
+ panic(err)
+ }
+ return sig
+}
+
+// Empty retruns whether the signature is the empty signature.
+func (s Signature) Empty() bool {
+ return s.str == ""
+}
+
+// Single returns whether the signature represents a single, complete type.
+func (s Signature) Single() bool {
+ err, r := validSingle(s.str, 0)
+ return err != nil && r == ""
+}
+
+// String returns the signature's string representation.
+func (s Signature) String() string {
+ return s.str
+}
+
+// A SignatureError indicates that a signature passed to a function or received
+// on a connection is not a valid signature.
+type SignatureError struct {
+ Sig string
+ Reason string
+}
+
+func (e SignatureError) Error() string {
+ return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
+}
+
+// Try to read a single type from this string. If it was successfull, err is nil
+// and rem is the remaining unparsed part. Otherwise, err is a non-nil
+// SignatureError and rem is "". depth is the current recursion depth which may
+// not be greater than 64 and should be given as 0 on the first call.
+func validSingle(s string, depth int) (err error, rem string) {
+ if s == "" {
+ return SignatureError{Sig: s, Reason: "empty signature"}, ""
+ }
+ if depth > 64 {
+ return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
+ }
+ switch s[0] {
+ case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
+ return nil, s[1:]
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ i := findMatching(s[1:], '{', '}')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
+ }
+ i++
+ rem = s[i+1:]
+ s = s[2:i]
+ if err, _ = validSingle(s[:1], depth+1); err != nil {
+ return err, ""
+ }
+ err, nr := validSingle(s[1:], depth+1)
+ if err != nil {
+ return err, ""
+ }
+ if nr != "" {
+ return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
+ }
+ return nil, rem
+ }
+ return validSingle(s[1:], depth+1)
+ case '(':
+ i := findMatching(s, '(', ')')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
+ }
+ rem = s[i+1:]
+ s = s[1:i]
+ for err == nil && s != "" {
+ err, s = validSingle(s, depth+1)
+ }
+ if err != nil {
+ rem = ""
+ }
+ return
+ }
+ return SignatureError{Sig: s, Reason: "invalid type character"}, ""
+}
+
+func findMatching(s string, left, right rune) int {
+ n := 0
+ for i, v := range s {
+ if v == left {
+ n++
+ } else if v == right {
+ n--
+ }
+ if n == 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// typeFor returns the type of the given signature. It ignores any left over
+// characters and panics if s doesn't start with a valid type signature.
+func typeFor(s string) (t reflect.Type) {
+ err, _ := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ if t, ok := sigToType[s[0]]; ok {
+ return t
+ }
+ switch s[0] {
+ case 'a':
+ if s[1] == '{' {
+ i := strings.LastIndex(s, "}")
+ t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
+ } else {
+ t = reflect.SliceOf(typeFor(s[1:]))
+ }
+ case '(':
+ t = interfacesType
+ }
+ return
+}
diff --git a/vendor/github.com/godbus/dbus/transport_darwin.go b/vendor/github.com/godbus/dbus/transport_darwin.go
new file mode 100644
index 00000000..1bba0d6b
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_darwin.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go
new file mode 100644
index 00000000..46f8f49d
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_generic.go
@@ -0,0 +1,35 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+type genericTransport struct {
+ io.ReadWriteCloser
+}
+
+func (t genericTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
+
+func (t genericTransport) SupportsUnixFDs() bool {
+ return false
+}
+
+func (t genericTransport) EnableUnixFDs() {}
+
+func (t genericTransport) ReadMessage() (*Message, error) {
+ return DecodeMessage(t)
+}
+
+func (t genericTransport) SendMessage(msg *Message) error {
+ for _, v := range msg.Body {
+ if _, ok := v.(UnixFD); ok {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ }
+ return msg.EncodeTo(t, binary.LittleEndian)
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go
new file mode 100644
index 00000000..3fafeabb
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unix.go
@@ -0,0 +1,196 @@
+//+build !windows
+
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+type oobReader struct {
+ conn *net.UnixConn
+ oob []byte
+ buf [4096]byte
+}
+
+func (o *oobReader) Read(b []byte) (n int, err error) {
+ n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
+ if err != nil {
+ return n, err
+ }
+ if flags&syscall.MSG_CTRUNC != 0 {
+ return n, errors.New("dbus: control data truncated (too many fds received)")
+ }
+ o.oob = append(o.oob, o.buf[:oobn]...)
+ return n, nil
+}
+
+type unixTransport struct {
+ *net.UnixConn
+ hasUnixFDs bool
+}
+
+func newUnixTransport(keys string) (transport, error) {
+ var err error
+
+ t := new(unixTransport)
+ abstract := getKey(keys, "abstract")
+ path := getKey(keys, "path")
+ switch {
+ case abstract == "" && path == "":
+ return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
+ case abstract != "" && path == "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ case abstract == "" && path != "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ default:
+ return nil, errors.New("dbus: invalid address (both path and abstract set)")
+ }
+}
+
+func init() {
+ transports["unix"] = newUnixTransport
+}
+
+func (t *unixTransport) EnableUnixFDs() {
+ t.hasUnixFDs = true
+}
+
+func (t *unixTransport) ReadMessage() (*Message, error) {
+ var (
+ blen, hlen uint32
+ csheader [16]byte
+ headers []header
+ order binary.ByteOrder
+ unixfds uint32
+ )
+ // To be sure that all bytes of out-of-band data are read, we use a special
+ // reader that uses ReadUnix on the underlying connection instead of Read
+ // and gathers the out-of-band data in a buffer.
+ rd := &oobReader{conn: t.UnixConn}
+ // read the first 16 bytes (the part of the header that has a constant size),
+ // from which we can figure out the length of the rest of the message
+ if _, err := io.ReadFull(rd, csheader[:]); err != nil {
+ return nil, err
+ }
+ switch csheader[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+ // csheader[4:8] -> length of message body, csheader[12:16] -> length of
+ // header fields (without alignment)
+ binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
+ binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
+ if hlen%8 != 0 {
+ hlen += 8 - (hlen % 8)
+ }
+
+ // decode headers and look for unix fds
+ headerdata := make([]byte, hlen+4)
+ copy(headerdata, csheader[12:])
+ if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
+ return nil, err
+ }
+ dec := newDecoder(bytes.NewBuffer(headerdata), order)
+ dec.pos = 12
+ vs, err := dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ Store(vs, &headers)
+ for _, v := range headers {
+ if v.Field == byte(FieldUnixFDs) {
+ unixfds, _ = v.Variant.value.(uint32)
+ }
+ }
+ all := make([]byte, 16+hlen+blen)
+ copy(all, csheader[:])
+ copy(all[16:], headerdata[4:])
+ if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
+ return nil, err
+ }
+ if unixfds != 0 {
+ if !t.hasUnixFDs {
+ return nil, errors.New("dbus: got unix fds on unsupported transport")
+ }
+ // read the fds from the OOB data
+ scms, err := syscall.ParseSocketControlMessage(rd.oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, errors.New("dbus: received more than one socket control message")
+ }
+ fds, err := syscall.ParseUnixRights(&scms[0])
+ if err != nil {
+ return nil, err
+ }
+ msg, err := DecodeMessage(bytes.NewBuffer(all))
+ if err != nil {
+ return nil, err
+ }
+ // substitute the values in the message body (which are indices for the
+ // array receiver via OOB) with the actual values
+ for i, v := range msg.Body {
+ if j, ok := v.(UnixFDIndex); ok {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ msg.Body[i] = UnixFD(fds[j])
+ }
+ }
+ return msg, nil
+ }
+ return DecodeMessage(bytes.NewBuffer(all))
+}
+
+func (t *unixTransport) SendMessage(msg *Message) error {
+ fds := make([]int, 0)
+ for i, v := range msg.Body {
+ if fd, ok := v.(UnixFD); ok {
+ msg.Body[i] = UnixFDIndex(len(fds))
+ fds = append(fds, int(fd))
+ }
+ }
+ if len(fds) != 0 {
+ if !t.hasUnixFDs {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
+ oob := syscall.UnixRights(fds...)
+ buf := new(bytes.Buffer)
+ msg.EncodeTo(buf, binary.LittleEndian)
+ n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() || oobn != len(oob) {
+ return io.ErrShortWrite
+ }
+ } else {
+ if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (t *unixTransport) SupportsUnixFDs() bool {
+ return true
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go
new file mode 100644
index 00000000..a8cd3939
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go
@@ -0,0 +1,95 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for DragonFly BSD
+
+package dbus
+
+/*
+#include
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// http://golang.org/src/pkg/syscall/types_dragonfly.go
+// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+ //salign := sizeofPtr
+ // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
+ // still require 32-bit aligned access to network subsystem.
+ //if darwin64Bit || dragonfly64Bit {
+ // salign = 4
+ //}
+ salign := 4
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go
new file mode 100644
index 00000000..d9dfdf69
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go
@@ -0,0 +1,25 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+package dbus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := syscall.UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go
new file mode 100644
index 00000000..b7b13ae9
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant.go
@@ -0,0 +1,139 @@
+package dbus
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Variant represents the D-Bus variant type.
+type Variant struct {
+ sig Signature
+ value interface{}
+}
+
+// MakeVariant converts the given value to a Variant. It panics if v cannot be
+// represented as a D-Bus type.
+func MakeVariant(v interface{}) Variant {
+ return Variant{SignatureOf(v), v}
+}
+
+// ParseVariant parses the given string as a variant as described at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
+// empty, it is taken to be the expected signature for the variant.
+func ParseVariant(s string, sig Signature) (Variant, error) {
+ tokens := varLex(s)
+ p := &varParser{tokens: tokens}
+ n, err := varMakeNode(p)
+ if err != nil {
+ return Variant{}, err
+ }
+ if sig.str == "" {
+ sig, err = varInfer(n)
+ if err != nil {
+ return Variant{}, err
+ }
+ }
+ v, err := n.Value(sig)
+ if err != nil {
+ return Variant{}, err
+ }
+ return MakeVariant(v), nil
+}
+
+// format returns a formatted version of v and whether this string can be parsed
+// unambigously.
+func (v Variant) format() (string, bool) {
+ switch v.sig.str[0] {
+ case 'b', 'i':
+ return fmt.Sprint(v.value), true
+ case 'n', 'q', 'u', 'x', 't', 'd', 'h':
+ return fmt.Sprint(v.value), false
+ case 's':
+ return strconv.Quote(v.value.(string)), true
+ case 'o':
+ return strconv.Quote(string(v.value.(ObjectPath))), false
+ case 'g':
+ return strconv.Quote(v.value.(Signature).str), false
+ case 'v':
+ s, unamb := v.value.(Variant).format()
+ if !unamb {
+ return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
+ }
+ return "<" + s + ">", true
+ case 'y':
+ return fmt.Sprintf("%#x", v.value.(byte)), false
+ }
+ rv := reflect.ValueOf(v.value)
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.Len() == 0 {
+ return "[]", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("["))
+ for i := 0; i < rv.Len(); i++ {
+ // TODO: slooow
+ s, b := MakeVariant(rv.Index(i).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String(), unamb
+ case reflect.Map:
+ if rv.Len() == 0 {
+ return "{}", false
+ }
+ unamb := true
+ var buf bytes.Buffer
+ kvs := make([]string, rv.Len())
+ for i, k := range rv.MapKeys() {
+ s, b := MakeVariant(k.Interface()).format()
+ unamb = unamb && b
+ buf.Reset()
+ buf.WriteString(s)
+ buf.WriteString(": ")
+ s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ kvs[i] = buf.String()
+ }
+ buf.Reset()
+ buf.WriteByte('{')
+ sort.Strings(kvs)
+ for i, kv := range kvs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(kv)
+ }
+ buf.WriteByte('}')
+ return buf.String(), unamb
+ }
+ return `"INVALID"`, true
+}
+
+// Signature returns the D-Bus signature of the underlying value of v.
+func (v Variant) Signature() Signature {
+ return v.sig
+}
+
+// String returns the string representation of the underlying value of v as
+// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
+func (v Variant) String() string {
+ s, unamb := v.format()
+ if !unamb {
+ return "@" + v.sig.str + " " + s
+ }
+ return s
+}
+
+// Value returns the underlying value of v.
+func (v Variant) Value() interface{} {
+ return v.value
+}
diff --git a/vendor/github.com/godbus/dbus/variant_lexer.go b/vendor/github.com/godbus/dbus/variant_lexer.go
new file mode 100644
index 00000000..332007d6
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant_lexer.go
@@ -0,0 +1,284 @@
+package dbus
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Heavily inspired by the lexer from text/template.
+
+type varToken struct {
+ typ varTokenType
+ val string
+}
+
+type varTokenType byte
+
+const (
+ tokEOF varTokenType = iota
+ tokError
+ tokNumber
+ tokString
+ tokBool
+ tokArrayStart
+ tokArrayEnd
+ tokDictStart
+ tokDictEnd
+ tokVariantStart
+ tokVariantEnd
+ tokComma
+ tokColon
+ tokType
+ tokByteString
+)
+
+type varLexer struct {
+ input string
+ start int
+ pos int
+ width int
+ tokens []varToken
+}
+
+type lexState func(*varLexer) lexState
+
+func varLex(s string) []varToken {
+ l := &varLexer{input: s}
+ l.run()
+ return l.tokens
+}
+
+func (l *varLexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *varLexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *varLexer) emit(t varTokenType) {
+ l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
+ l.start = l.pos
+}
+
+func (l *varLexer) errorf(format string, v ...interface{}) lexState {
+ l.tokens = append(l.tokens, varToken{
+ tokError,
+ fmt.Sprintf(format, v...),
+ })
+ return nil
+}
+
+func (l *varLexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *varLexer) next() rune {
+ var r rune
+
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return -1
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *varLexer) run() {
+ for state := varLexNormal; state != nil; {
+ state = state(l)
+ }
+}
+
+func (l *varLexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func varLexNormal(l *varLexer) lexState {
+ for {
+ r := l.next()
+ switch {
+ case r == -1:
+ l.emit(tokEOF)
+ return nil
+ case r == '[':
+ l.emit(tokArrayStart)
+ case r == ']':
+ l.emit(tokArrayEnd)
+ case r == '{':
+ l.emit(tokDictStart)
+ case r == '}':
+ l.emit(tokDictEnd)
+ case r == '<':
+ l.emit(tokVariantStart)
+ case r == '>':
+ l.emit(tokVariantEnd)
+ case r == ':':
+ l.emit(tokColon)
+ case r == ',':
+ l.emit(tokComma)
+ case r == '\'' || r == '"':
+ l.backup()
+ return varLexString
+ case r == '@':
+ l.backup()
+ return varLexType
+ case unicode.IsSpace(r):
+ l.ignore()
+ case unicode.IsNumber(r) || r == '+' || r == '-':
+ l.backup()
+ return varLexNumber
+ case r == 'b':
+ pos := l.start
+ if n := l.peek(); n == '"' || n == '\'' {
+ return varLexByteString
+ }
+ // not a byte string; try to parse it as a type or bool below
+ l.pos = pos + 1
+ l.width = 1
+ fallthrough
+ default:
+ // either a bool or a type. Try bools first.
+ l.backup()
+ if l.pos+4 <= len(l.input) {
+ if l.input[l.pos:l.pos+4] == "true" {
+ l.pos += 4
+ l.emit(tokBool)
+ continue
+ }
+ }
+ if l.pos+5 <= len(l.input) {
+ if l.input[l.pos:l.pos+5] == "false" {
+ l.pos += 5
+ l.emit(tokBool)
+ continue
+ }
+ }
+ // must be a type.
+ return varLexType
+ }
+ }
+}
+
+var varTypeMap = map[string]string{
+ "boolean": "b",
+ "byte": "y",
+ "int16": "n",
+ "uint16": "q",
+ "int32": "i",
+ "uint32": "u",
+ "int64": "x",
+ "uint64": "t",
+ "double": "f",
+ "string": "s",
+ "objectpath": "o",
+ "signature": "g",
+}
+
+func varLexByteString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated bytestring")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokByteString)
+ return varLexNormal
+}
+
+func varLexNumber(l *varLexer) lexState {
+ l.accept("+-")
+ digits := "0123456789"
+ if l.accept("0") {
+ if l.accept("x") {
+ digits = "0123456789abcdefABCDEF"
+ } else {
+ digits = "01234567"
+ }
+ }
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ if l.accept(".") {
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ for strings.IndexRune("0123456789", l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if r := l.peek(); unicode.IsLetter(r) {
+ l.next()
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokNumber)
+ return varLexNormal
+}
+
+func varLexString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated string")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokString)
+ return varLexNormal
+}
+
+func varLexType(l *varLexer) lexState {
+ at := l.accept("@")
+ for {
+ r := l.next()
+ if r == -1 {
+ break
+ }
+ if unicode.IsSpace(r) {
+ l.backup()
+ break
+ }
+ }
+ if at {
+ if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
+ return l.errorf("%s", err)
+ }
+ } else {
+ if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
+ l.emit(tokType)
+ return varLexNormal
+ }
+ return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokType)
+ return varLexNormal
+}
diff --git a/vendor/github.com/godbus/dbus/variant_parser.go b/vendor/github.com/godbus/dbus/variant_parser.go
new file mode 100644
index 00000000..d20f5da6
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/variant_parser.go
@@ -0,0 +1,817 @@
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type varParser struct {
+ tokens []varToken
+ i int
+}
+
+func (p *varParser) backup() {
+ p.i--
+}
+
+func (p *varParser) next() varToken {
+ if p.i < len(p.tokens) {
+ t := p.tokens[p.i]
+ p.i++
+ return t
+ }
+ return varToken{typ: tokEOF}
+}
+
+type varNode interface {
+ Infer() (Signature, error)
+ String() string
+ Sigs() sigSet
+ Value(Signature) (interface{}, error)
+}
+
+func varMakeNode(p *varParser) (varNode, error) {
+ var sig Signature
+
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokNumber:
+ return varMakeNumNode(t, sig)
+ case tokString:
+ return varMakeStringNode(t, sig)
+ case tokBool:
+ if sig.str != "" && sig.str != "b" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := strconv.ParseBool(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return boolNode(b), nil
+ case tokArrayStart:
+ return varMakeArrayNode(p, sig)
+ case tokVariantStart:
+ return varMakeVariantNode(p, sig)
+ case tokDictStart:
+ return varMakeDictNode(p, sig)
+ case tokType:
+ if sig.str != "" {
+ return nil, errors.New("unexpected type annotation")
+ }
+ if t.val[0] == '@' {
+ sig.str = t.val[1:]
+ } else {
+ sig.str = varTypeMap[t.val]
+ }
+ case tokByteString:
+ if sig.str != "" && sig.str != "ay" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := varParseByteString(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return byteStringNode(b), nil
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+}
+
+type varTypeError struct {
+ val string
+ sig Signature
+}
+
+func (e varTypeError) Error() string {
+ return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
+}
+
+type sigSet map[Signature]bool
+
+func (s sigSet) Empty() bool {
+ return len(s) == 0
+}
+
+func (s sigSet) Intersect(s2 sigSet) sigSet {
+ r := make(sigSet)
+ for k := range s {
+ if s2[k] {
+ r[k] = true
+ }
+ }
+ return r
+}
+
+func (s sigSet) Single() (Signature, bool) {
+ if len(s) == 1 {
+ for k := range s {
+ return k, true
+ }
+ }
+ return Signature{}, false
+}
+
+func (s sigSet) ToArray() sigSet {
+ r := make(sigSet, len(s))
+ for k := range s {
+ r[Signature{"a" + k.str}] = true
+ }
+ return r
+}
+
+type numNode struct {
+ sig Signature
+ str string
+ val interface{}
+}
+
+var numSigSet = sigSet{
+ Signature{"y"}: true,
+ Signature{"n"}: true,
+ Signature{"q"}: true,
+ Signature{"i"}: true,
+ Signature{"u"}: true,
+ Signature{"x"}: true,
+ Signature{"t"}: true,
+ Signature{"d"}: true,
+}
+
+func (n numNode) Infer() (Signature, error) {
+ if strings.ContainsAny(n.str, ".e") {
+ return Signature{"d"}, nil
+ }
+ return Signature{"i"}, nil
+}
+
+func (n numNode) String() string {
+ return n.str
+}
+
+func (n numNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ if strings.ContainsAny(n.str, ".e") {
+ return sigSet{Signature{"d"}: true}
+ }
+ return numSigSet
+}
+
+func (n numNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ return varNumAs(n.str, sig)
+}
+
+func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str == "" {
+ return numNode{str: tok.val}, nil
+ }
+ num, err := varNumAs(tok.val, sig)
+ if err != nil {
+ return nil, err
+ }
+ return numNode{sig: sig, val: num}, nil
+}
+
+func varNumAs(s string, sig Signature) (interface{}, error) {
+ isUnsigned := false
+ size := 32
+ switch sig.str {
+ case "n":
+ size = 16
+ case "i":
+ case "x":
+ size = 64
+ case "y":
+ size = 8
+ isUnsigned = true
+ case "q":
+ size = 16
+ isUnsigned = true
+ case "u":
+ isUnsigned = true
+ case "t":
+ size = 64
+ isUnsigned = true
+ case "d":
+ d, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ default:
+ return nil, varTypeError{s, sig}
+ }
+ base := 10
+ if strings.HasPrefix(s, "0x") {
+ base = 16
+ s = s[2:]
+ }
+ if strings.HasPrefix(s, "0") && len(s) != 1 {
+ base = 8
+ s = s[1:]
+ }
+ if isUnsigned {
+ i, err := strconv.ParseUint(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "y":
+ v = byte(i)
+ case "q":
+ v = uint16(i)
+ case "u":
+ v = uint32(i)
+ }
+ return v, nil
+ }
+ i, err := strconv.ParseInt(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "n":
+ v = int16(i)
+ case "i":
+ v = int32(i)
+ }
+ return v, nil
+}
+
+type stringNode struct {
+ sig Signature
+ str string // parsed
+ val interface{} // has correct type
+}
+
+var stringSigSet = sigSet{
+ Signature{"s"}: true,
+ Signature{"g"}: true,
+ Signature{"o"}: true,
+}
+
+func (n stringNode) Infer() (Signature, error) {
+ return Signature{"s"}, nil
+}
+
+func (n stringNode) String() string {
+ return n.str
+}
+
+func (n stringNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ return stringSigSet
+}
+
+func (n stringNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ switch {
+ case sig.str == "g":
+ return Signature{n.str}, nil
+ case sig.str == "o":
+ return ObjectPath(n.str), nil
+ case sig.str == "s":
+ return n.str, nil
+ default:
+ return nil, varTypeError{n.str, sig}
+ }
+}
+
+func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
+ return nil, fmt.Errorf("invalid type %q for string", sig.str)
+ }
+ s, err := varParseString(tok.val)
+ if err != nil {
+ return nil, err
+ }
+ n := stringNode{str: s}
+ if sig.str == "" {
+ return stringNode{str: s}, nil
+ }
+ n.sig = sig
+ switch sig.str {
+ case "o":
+ n.val = ObjectPath(s)
+ case "g":
+ n.val = Signature{s}
+ case "s":
+ n.val = s
+ }
+ return n, nil
+}
+
+func varParseString(s string) (string, error) {
+ // quotes are guaranteed to be there
+ s = s[1 : len(s)-1]
+ buf := new(bytes.Buffer)
+ for len(s) != 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ if r != '\\' {
+ buf.WriteRune(r)
+ continue
+ }
+ r, size = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ switch r {
+ case 'a':
+ buf.WriteRune(0x7)
+ case 'b':
+ buf.WriteRune(0x8)
+ case 'f':
+ buf.WriteRune(0xc)
+ case 'n':
+ buf.WriteRune('\n')
+ case 'r':
+ buf.WriteRune('\r')
+ case 't':
+ buf.WriteRune('\t')
+ case '\n':
+ case 'u':
+ if len(s) < 4 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:4], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[4:]
+ case 'U':
+ if len(s) < 8 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:8], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[8:]
+ default:
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String(), nil
+}
+
+var boolSigSet = sigSet{Signature{"b"}: true}
+
+type boolNode bool
+
+func (boolNode) Infer() (Signature, error) {
+ return Signature{"b"}, nil
+}
+
+func (b boolNode) String() string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+func (boolNode) Sigs() sigSet {
+ return boolSigSet
+}
+
+func (b boolNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "b" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return bool(b), nil
+}
+
+type arrayNode struct {
+ set sigSet
+ children []varNode
+ val interface{}
+}
+
+func (n arrayNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ csig, err := varInfer(v)
+ if err != nil {
+ continue
+ }
+ return Signature{"a" + csig.str}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n arrayNode) String() string {
+ s := "["
+ for i, v := range n.children {
+ s += v.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "]"
+}
+
+func (n arrayNode) Sigs() sigSet {
+ return n.set
+}
+
+func (n arrayNode) Value(sig Signature) (interface{}, error) {
+ if n.set.Empty() {
+ // no type information whatsoever, so this must be an empty slice
+ return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
+ }
+ if !n.set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
+ for i, v := range n.children {
+ rv, err := v.Value(Signature{sig.str[1:]})
+ if err != nil {
+ return nil, err
+ }
+ s.Index(i).Set(reflect.ValueOf(rv))
+ }
+ return s.Interface(), nil
+}
+
+func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
+ var n arrayNode
+ if sig.str != "" {
+ n.set = sigSet{sig: true}
+ }
+ if t := p.next(); t.typ == tokArrayEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ cn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if cset := cn.Sigs(); !cset.Empty() {
+ if n.set.Empty() {
+ n.set = cset.ToArray()
+ } else {
+ nset := cset.ToArray().Intersect(n.set)
+ if nset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
+ }
+ n.set = nset
+ }
+ }
+ n.children = append(n.children, cn)
+ switch t := p.next(); t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokArrayEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type variantNode struct {
+ n varNode
+}
+
+var variantSet = sigSet{
+ Signature{"v"}: true,
+}
+
+func (variantNode) Infer() (Signature, error) {
+ return Signature{"v"}, nil
+}
+
+func (n variantNode) String() string {
+ return "<" + n.n.String() + ">"
+}
+
+func (variantNode) Sigs() sigSet {
+ return variantSet
+}
+
+func (n variantNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "v" {
+ return nil, varTypeError{n.String(), sig}
+ }
+ sig, err := varInfer(n.n)
+ if err != nil {
+ return nil, err
+ }
+ v, err := n.n.Value(sig)
+ if err != nil {
+ return nil, err
+ }
+ return MakeVariant(v), nil
+}
+
+func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
+ n, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if t := p.next(); t.typ != tokVariantEnd {
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ vn := variantNode{n}
+ if sig.str != "" && sig.str != "v" {
+ return nil, varTypeError{vn.String(), sig}
+ }
+ return variantNode{n}, nil
+}
+
+type dictEntry struct {
+ key, val varNode
+}
+
+type dictNode struct {
+ kset, vset sigSet
+ children []dictEntry
+ val interface{}
+}
+
+func (n dictNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ ksig, err := varInfer(v.key)
+ if err != nil {
+ continue
+ }
+ vsig, err := varInfer(v.val)
+ if err != nil {
+ continue
+ }
+ return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n dictNode) String() string {
+ s := "{"
+ for i, v := range n.children {
+ s += v.key.String() + ": " + v.val.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "}"
+}
+
+func (n dictNode) Sigs() sigSet {
+ r := sigSet{}
+ for k := range n.kset {
+ for v := range n.vset {
+ sig := "a{" + k.str + v.str + "}"
+ r[Signature{sig}] = true
+ }
+ }
+ return r
+}
+
+func (n dictNode) Value(sig Signature) (interface{}, error) {
+ set := n.Sigs()
+ if set.Empty() {
+ // no type information -> empty dict
+ return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
+ }
+ if !set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ m := reflect.MakeMap(typeFor(sig.str))
+ ksig := Signature{sig.str[2:3]}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ for _, v := range n.children {
+ kv, err := v.key.Value(ksig)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := v.val.Value(vsig)
+ if err != nil {
+ return nil, err
+ }
+ m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return m.Interface(), nil
+}
+
+func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
+ var n dictNode
+
+ if sig.str != "" {
+ if len(sig.str) < 5 {
+ return nil, fmt.Errorf("invalid signature %q for dict type", sig)
+ }
+ ksig := Signature{string(sig.str[2])}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ n.kset = sigSet{ksig: true}
+ n.vset = sigSet{vsig: true}
+ }
+ if t := p.next(); t.typ == tokDictEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ kn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if kset := kn.Sigs(); !kset.Empty() {
+ if n.kset.Empty() {
+ n.kset = kset
+ } else {
+ n.kset = kset.Intersect(n.kset)
+ if n.kset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
+ }
+ }
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokColon:
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ vn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if vset := vn.Sigs(); !vset.Empty() {
+ if n.vset.Empty() {
+ n.vset = vset
+ } else {
+ n.vset = n.vset.Intersect(vset)
+ if n.vset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
+ }
+ }
+ }
+ n.children = append(n.children, dictEntry{kn, vn})
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokDictEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type byteStringNode []byte
+
+var byteStringSet = sigSet{
+ Signature{"ay"}: true,
+}
+
+func (byteStringNode) Infer() (Signature, error) {
+ return Signature{"ay"}, nil
+}
+
+func (b byteStringNode) String() string {
+ return string(b)
+}
+
+func (b byteStringNode) Sigs() sigSet {
+ return byteStringSet
+}
+
+func (b byteStringNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "ay" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return []byte(b), nil
+}
+
+func varParseByteString(s string) ([]byte, error) {
+ // quotes and b at start are guaranteed to be there
+ b := make([]byte, 0, 1)
+ s = s[2 : len(s)-1]
+ for len(s) != 0 {
+ c := s[0]
+ s = s[1:]
+ if c != '\\' {
+ b = append(b, c)
+ continue
+ }
+ c = s[0]
+ s = s[1:]
+ switch c {
+ case 'a':
+ b = append(b, 0x7)
+ case 'b':
+ b = append(b, 0x8)
+ case 'f':
+ b = append(b, 0xc)
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'x':
+ if len(s) < 2 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:2], 16, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[2:]
+ case '0':
+ if len(s) < 3 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:3], 8, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[3:]
+ default:
+ b = append(b, c)
+ }
+ }
+ return append(b, 0), nil
+}
+
+func varInfer(n varNode) (Signature, error) {
+ if sig, ok := n.Sigs().Single(); ok {
+ return sig, nil
+ }
+ return n.Infer()
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..1b1b1921
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 00000000..f1f06564
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..e98ddec9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..5810782f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,867 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000..231b0740
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1325 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000..f5db1def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..054f4f1d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,399 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..42a58c6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,893 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..e25e01e6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..749919d2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..e9be0fe9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..d4531c05
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,842 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..2336b144
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,751 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..6d0cf258
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,798 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/LICENSE b/vendor/github.com/kolo/xmlrpc/LICENSE
new file mode 100644
index 00000000..8103dd13
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2012 Dmitry Maksimov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/kolo/xmlrpc/README.md b/vendor/github.com/kolo/xmlrpc/README.md
new file mode 100644
index 00000000..12b7692e
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/README.md
@@ -0,0 +1,79 @@
+## Overview
+
+xmlrpc is an implementation of client side part of XMLRPC protocol in Go language.
+
+## Installation
+
+To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use
+it in application add `"github.com/kolo/xmlrpc"` string to `import`
+statement.
+
+## Usage
+
+ client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil)
+ result := struct{
+ Version string `xmlrpc:"version"`
+ }{}
+ client.Call("Bugzilla.version", nil, &result)
+ fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+
+
+Second argument of NewClient function is an object that implements
+[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper)
+interface, it can be used to get more control over connection options.
+By default it initialized by http.DefaultTransport object.
+
+### Arguments encoding
+
+xmlrpc package supports encoding of native Go data types to method
+arguments.
+
+Data types encoding rules:
+* int, int8, int16, int32, int64 encoded to int;
+* float32, float64 encoded to double;
+* bool encoded to boolean;
+* string encoded to string;
+* time.Time encoded to datetime.iso8601;
+* xmlrpc.Base64 encoded to base64;
+* slice decoded to array;
+
+Structs decoded to struct by following rules:
+* all public field become struct members;
+* field name become member name;
+* if field has xmlrpc tag, its value become member name.
+
+Server method can accept few arguments, to handle this case there is
+special approach to handle slice of empty interfaces (`[]interface{}`).
+Each value of such slice encoded as separate argument.
+
+### Result decoding
+
+Result of remote function is decoded to native Go data type.
+
+Data types decoding rules:
+* int, i4 decoded to int, int8, int16, int32, int64;
+* double decoded to float32, float64;
+* boolean decoded to bool;
+* string decoded to string;
+* array decoded to slice;
+* structs decoded following the rules described in previous section;
+* datetime.iso8601 decoded as time.Time data type;
+* base64 decoded to string.
+
+## Implementation details
+
+xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec)
+interface of [net/rpc](http://golang.org/pkg/net/rpc) package.
+
+xmlrpc package works over HTTP protocol, but some internal functions
+and data type were made public to make it easier to create another
+implementation of xmlrpc that works over another protocol. To encode
+request body there is EncodeMethodCall function. To decode server
+response Response data type can be used.
+
+## Contribution
+
+Feel free to fork the project, submit pull requests, ask questions.
+
+## Authors
+
+Dmitry Maksimov (dmtmax@gmail.com)
diff --git a/vendor/github.com/kolo/xmlrpc/client.go b/vendor/github.com/kolo/xmlrpc/client.go
new file mode 100644
index 00000000..fb66b65f
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/client.go
@@ -0,0 +1,144 @@
+package xmlrpc
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/cookiejar"
+ "net/rpc"
+ "net/url"
+)
+
+type Client struct {
+ *rpc.Client
+}
+
+// clientCodec is rpc.ClientCodec interface implementation.
+type clientCodec struct {
+ // url presents url of xmlrpc service
+ url *url.URL
+
+ // httpClient works with HTTP protocol
+ httpClient *http.Client
+
+ // cookies stores cookies received on last request
+ cookies http.CookieJar
+
+ // responses presents map of active requests. It is required to return request id, that
+ // rpc.Client can mark them as done.
+ responses map[uint64]*http.Response
+
+ response *Response
+
+ // ready presents channel, that is used to link request and it`s response.
+ ready chan uint64
+}
+
+func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) {
+ httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args)
+
+ if codec.cookies != nil {
+ for _, cookie := range codec.cookies.Cookies(codec.url) {
+ httpRequest.AddCookie(cookie)
+ }
+ }
+
+ if err != nil {
+ return err
+ }
+
+ var httpResponse *http.Response
+ httpResponse, err = codec.httpClient.Do(httpRequest)
+
+ if err != nil {
+ return err
+ }
+
+ if codec.cookies != nil {
+ codec.cookies.SetCookies(codec.url, httpResponse.Cookies())
+ }
+
+ codec.responses[request.Seq] = httpResponse
+ codec.ready <- request.Seq
+
+ return nil
+}
+
+func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) (err error) {
+ seq := <-codec.ready
+ httpResponse := codec.responses[seq]
+
+ if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 {
+ return fmt.Errorf("request error: bad status code - %d", httpResponse.StatusCode)
+ }
+
+ respData, err := ioutil.ReadAll(httpResponse.Body)
+
+ if err != nil {
+ return err
+ }
+
+ httpResponse.Body.Close()
+
+ resp := NewResponse(respData)
+
+ if resp.Failed() {
+ response.Error = fmt.Sprintf("%v", resp.Err())
+ }
+
+ codec.response = resp
+
+ response.Seq = seq
+ delete(codec.responses, seq)
+
+ return nil
+}
+
+func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) {
+ if v == nil {
+ return nil
+ }
+
+ if err = codec.response.Unmarshal(v); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (codec *clientCodec) Close() error {
+ transport := codec.httpClient.Transport.(*http.Transport)
+ transport.CloseIdleConnections()
+ return nil
+}
+
+// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service.
+func NewClient(requrl string, transport http.RoundTripper) (*Client, error) {
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ httpClient := &http.Client{Transport: transport}
+
+ jar, err := cookiejar.New(nil)
+
+ if err != nil {
+ return nil, err
+ }
+
+ u, err := url.Parse(requrl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ codec := clientCodec{
+ url: u,
+ httpClient: httpClient,
+ ready: make(chan uint64),
+ responses: make(map[uint64]*http.Response),
+ cookies: jar,
+ }
+
+ return &Client{rpc.NewClientWithCodec(&codec)}, nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/decoder.go b/vendor/github.com/kolo/xmlrpc/decoder.go
new file mode 100644
index 00000000..b7395597
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/decoder.go
@@ -0,0 +1,449 @@
+package xmlrpc
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const iso8601 = "20060102T15:04:05"
+
+var (
+ // CharsetReader is a function to generate reader which converts a non UTF-8
+ // charset into UTF-8.
+ CharsetReader func(string, io.Reader) (io.Reader, error)
+
+ invalidXmlError = errors.New("invalid xml")
+)
+
+type TypeMismatchError string
+
+func (e TypeMismatchError) Error() string { return string(e) }
+
+type decoder struct {
+ *xml.Decoder
+}
+
+func unmarshal(data []byte, v interface{}) (err error) {
+ dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))}
+
+ if CharsetReader != nil {
+ dec.CharsetReader = CharsetReader
+ }
+
+ var tok xml.Token
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+
+ if t, ok := tok.(xml.StartElement); ok {
+ if t.Name.Local == "value" {
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer value passed to unmarshal")
+ }
+ if err = dec.decodeValue(val.Elem()); err != nil {
+ return err
+ }
+
+ break
+ }
+ }
+ }
+
+ // read until end of document
+ err = dec.Skip()
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+func (dec *decoder) decodeValue(val reflect.Value) error {
+ var tok xml.Token
+ var err error
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ var typeName string
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+
+ if t, ok := tok.(xml.EndElement); ok {
+ if t.Name.Local == "value" {
+ return nil
+ } else {
+ return invalidXmlError
+ }
+ }
+
+ if t, ok := tok.(xml.StartElement); ok {
+ typeName = t.Name.Local
+ break
+ }
+
+ // Treat value data without type identifier as string
+ if t, ok := tok.(xml.CharData); ok {
+ if value := strings.TrimSpace(string(t)); value != "" {
+ if err = checkType(val, reflect.String); err != nil {
+ return err
+ }
+
+ val.SetString(value)
+ return nil
+ }
+ }
+ }
+
+ switch typeName {
+ case "struct":
+ ismap := false
+ pmap := val
+ valType := val.Type()
+
+ if err = checkType(val, reflect.Struct); err != nil {
+ if checkType(val, reflect.Map) == nil {
+ if valType.Key().Kind() != reflect.String {
+ return fmt.Errorf("only maps with string key type can be unmarshalled")
+ }
+ ismap = true
+ } else if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ var dummy map[string]interface{}
+ pmap = reflect.New(reflect.TypeOf(dummy)).Elem()
+ valType = pmap.Type()
+ ismap = true
+ } else {
+ return err
+ }
+ }
+
+ var fields map[string]reflect.Value
+
+ if !ismap {
+ fields = make(map[string]reflect.Value)
+
+ for i := 0; i < valType.NumField(); i++ {
+ field := valType.Field(i)
+ fieldVal := val.FieldByName(field.Name)
+
+ if fieldVal.CanSet() {
+ if fn := field.Tag.Get("xmlrpc"); fn != "" {
+ fields[fn] = fieldVal
+ } else {
+ fields[field.Name] = fieldVal
+ }
+ }
+ }
+ } else {
+ // Create initial empty map
+ pmap.Set(reflect.MakeMap(valType))
+ }
+
+ // Process struct members.
+ StructLoop:
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case xml.StartElement:
+ if t.Name.Local != "member" {
+ return invalidXmlError
+ }
+
+ tagName, fieldName, err := dec.readTag()
+ if err != nil {
+ return err
+ }
+ if tagName != "name" {
+ return invalidXmlError
+ }
+
+ var fv reflect.Value
+ ok := true
+
+ if !ismap {
+ fv, ok = fields[string(fieldName)]
+ } else {
+ fv = reflect.New(valType.Elem())
+ }
+
+ if ok {
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+ if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" {
+ if err = dec.decodeValue(fv); err != nil {
+ return err
+ }
+
+ //
+ if err = dec.Skip(); err != nil {
+ return err
+ }
+
+ break
+ }
+ }
+ }
+
+ //
+ if err = dec.Skip(); err != nil {
+ return err
+ }
+
+ if ismap {
+ pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv))
+ val.Set(pmap)
+ }
+ case xml.EndElement:
+ break StructLoop
+ }
+ }
+ case "array":
+ pslice := val
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ var dummy []interface{}
+ pslice = reflect.New(reflect.TypeOf(dummy)).Elem()
+ } else if err = checkType(val, reflect.Slice); err != nil {
+ return err
+ }
+
+ ArrayLoop:
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+
+ switch t := tok.(type) {
+ case xml.StartElement:
+ if t.Name.Local != "data" {
+ return invalidXmlError
+ }
+
+ slice := reflect.MakeSlice(pslice.Type(), 0, 0)
+
+ DataLoop:
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+
+ switch tt := tok.(type) {
+ case xml.StartElement:
+ if tt.Name.Local != "value" {
+ return invalidXmlError
+ }
+
+ v := reflect.New(pslice.Type().Elem())
+ if err = dec.decodeValue(v); err != nil {
+ return err
+ }
+
+ slice = reflect.Append(slice, v.Elem())
+
+ //
+ if err = dec.Skip(); err != nil {
+ return err
+ }
+ case xml.EndElement:
+ pslice.Set(slice)
+ val.Set(pslice)
+ break DataLoop
+ }
+ }
+ case xml.EndElement:
+ break ArrayLoop
+ }
+ }
+ default:
+ if tok, err = dec.Token(); err != nil {
+ return err
+ }
+
+ var data []byte
+
+ switch t := tok.(type) {
+ case xml.EndElement:
+ return nil
+ case xml.CharData:
+ data = []byte(t.Copy())
+ default:
+ return invalidXmlError
+ }
+
+ switch typeName {
+ case "int", "i4", "i8":
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ i, err := strconv.ParseInt(string(data), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ pi := reflect.New(reflect.TypeOf(i)).Elem()
+ pi.SetInt(i)
+ val.Set(pi)
+ } else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64); err != nil {
+ return err
+ } else {
+ i, err := strconv.ParseInt(string(data), 10, val.Type().Bits())
+ if err != nil {
+ return err
+ }
+
+ val.SetInt(i)
+ }
+ case "string", "base64":
+ str := string(data)
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ pstr := reflect.New(reflect.TypeOf(str)).Elem()
+ pstr.SetString(str)
+ val.Set(pstr)
+ } else if err = checkType(val, reflect.String); err != nil {
+ return err
+ } else {
+ val.SetString(str)
+ }
+ case "dateTime.iso8601":
+ t, err := time.Parse(iso8601, string(data))
+ if err != nil {
+ return err
+ }
+
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ ptime := reflect.New(reflect.TypeOf(t)).Elem()
+ ptime.Set(reflect.ValueOf(t))
+ val.Set(ptime)
+ } else if _, ok := val.Interface().(time.Time); !ok {
+ return TypeMismatchError(fmt.Sprintf("error: type mismatch error - can't decode %v to time", val.Kind()))
+ } else {
+ val.Set(reflect.ValueOf(t))
+ }
+ case "boolean":
+ v, err := strconv.ParseBool(string(data))
+ if err != nil {
+ return err
+ }
+
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ pv := reflect.New(reflect.TypeOf(v)).Elem()
+ pv.SetBool(v)
+ val.Set(pv)
+ } else if err = checkType(val, reflect.Bool); err != nil {
+ return err
+ } else {
+ val.SetBool(v)
+ }
+ case "double":
+ if checkType(val, reflect.Interface) == nil && val.IsNil() {
+ i, err := strconv.ParseFloat(string(data), 64)
+ if err != nil {
+ return err
+ }
+
+ pdouble := reflect.New(reflect.TypeOf(i)).Elem()
+ pdouble.SetFloat(i)
+ val.Set(pdouble)
+ } else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil {
+ return err
+ } else {
+ i, err := strconv.ParseFloat(string(data), val.Type().Bits())
+ if err != nil {
+ return err
+ }
+
+ val.SetFloat(i)
+ }
+ default:
+ return errors.New("unsupported type")
+ }
+
+ //
+ if err = dec.Skip(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dec *decoder) readTag() (string, []byte, error) {
+ var tok xml.Token
+ var err error
+
+ var name string
+ for {
+ if tok, err = dec.Token(); err != nil {
+ return "", nil, err
+ }
+
+ if t, ok := tok.(xml.StartElement); ok {
+ name = t.Name.Local
+ break
+ }
+ }
+
+ value, err := dec.readCharData()
+ if err != nil {
+ return "", nil, err
+ }
+
+ return name, value, dec.Skip()
+}
+
+func (dec *decoder) readCharData() ([]byte, error) {
+ var tok xml.Token
+ var err error
+
+ if tok, err = dec.Token(); err != nil {
+ return nil, err
+ }
+
+ if t, ok := tok.(xml.CharData); ok {
+ return []byte(t.Copy()), nil
+ } else {
+ return nil, invalidXmlError
+ }
+}
+
+func checkType(val reflect.Value, kinds ...reflect.Kind) error {
+ if len(kinds) == 0 {
+ return nil
+ }
+
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+
+ match := false
+
+ for _, kind := range kinds {
+ if val.Kind() == kind {
+ match = true
+ break
+ }
+ }
+
+ if !match {
+ return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v",
+ val.Kind(), kinds[0]))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/encoder.go b/vendor/github.com/kolo/xmlrpc/encoder.go
new file mode 100644
index 00000000..bb1285ff
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/encoder.go
@@ -0,0 +1,164 @@
+package xmlrpc
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+type encodeFunc func(reflect.Value) ([]byte, error)
+
+func marshal(v interface{}) ([]byte, error) {
+ if v == nil {
+ return []byte{}, nil
+ }
+
+ val := reflect.ValueOf(v)
+ return encodeValue(val)
+}
+
+func encodeValue(val reflect.Value) ([]byte, error) {
+ var b []byte
+ var err error
+
+ if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface {
+ if val.IsNil() {
+ return []byte(" "), nil
+ }
+
+ val = val.Elem()
+ }
+
+ switch val.Kind() {
+ case reflect.Struct:
+ switch val.Interface().(type) {
+ case time.Time:
+ t := val.Interface().(time.Time)
+ b = []byte(fmt.Sprintf("%s ", t.Format(iso8601)))
+ default:
+ b, err = encodeStruct(val)
+ }
+ case reflect.Map:
+ b, err = encodeMap(val)
+ case reflect.Slice:
+ b, err = encodeSlice(val)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ b = []byte(fmt.Sprintf("%s ", strconv.FormatInt(val.Int(), 10)))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ b = []byte(fmt.Sprintf("%s ", strconv.FormatUint(val.Uint(), 10)))
+ case reflect.Float32, reflect.Float64:
+ b = []byte(fmt.Sprintf("%s ",
+ strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits())))
+ case reflect.Bool:
+ if val.Bool() {
+ b = []byte("1 ")
+ } else {
+ b = []byte("0 ")
+ }
+ case reflect.String:
+ var buf bytes.Buffer
+
+ xml.Escape(&buf, []byte(val.String()))
+
+ if _, ok := val.Interface().(Base64); ok {
+ b = []byte(fmt.Sprintf("%s ", buf.String()))
+ } else {
+ b = []byte(fmt.Sprintf("%s ", buf.String()))
+ }
+ default:
+ return nil, fmt.Errorf("xmlrpc encode error: unsupported type")
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return []byte(fmt.Sprintf("%s ", string(b))), nil
+}
+
+func encodeStruct(val reflect.Value) ([]byte, error) {
+ var b bytes.Buffer
+
+ b.WriteString("")
+
+ t := val.Type()
+ for i := 0; i < t.NumField(); i++ {
+ b.WriteString("")
+ f := t.Field(i)
+
+ name := f.Tag.Get("xmlrpc")
+ if name == "" {
+ name = f.Name
+ }
+ b.WriteString(fmt.Sprintf("%s ", name))
+
+ p, err := encodeValue(val.FieldByName(f.Name))
+ if err != nil {
+ return nil, err
+ }
+ b.Write(p)
+
+ b.WriteString(" ")
+ }
+
+ b.WriteString(" ")
+
+ return b.Bytes(), nil
+}
+
+func encodeMap(val reflect.Value) ([]byte, error) {
+ var t = val.Type()
+
+ if t.Key().Kind() != reflect.String {
+ return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported")
+ }
+
+ var b bytes.Buffer
+
+ b.WriteString("")
+
+ keys := val.MapKeys()
+
+ for i := 0; i < val.Len(); i++ {
+ key := keys[i]
+ kval := val.MapIndex(key)
+
+ b.WriteString("")
+ b.WriteString(fmt.Sprintf("%s ", key.String()))
+
+ p, err := encodeValue(kval)
+
+ if err != nil {
+ return nil, err
+ }
+
+ b.Write(p)
+ b.WriteString(" ")
+ }
+
+ b.WriteString(" ")
+
+ return b.Bytes(), nil
+}
+
+func encodeSlice(val reflect.Value) ([]byte, error) {
+ var b bytes.Buffer
+
+ b.WriteString("")
+
+ for i := 0; i < val.Len(); i++ {
+ p, err := encodeValue(val.Index(i))
+ if err != nil {
+ return nil, err
+ }
+
+ b.Write(p)
+ }
+
+ b.WriteString(" ")
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/request.go b/vendor/github.com/kolo/xmlrpc/request.go
new file mode 100644
index 00000000..acb8251b
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/request.go
@@ -0,0 +1,57 @@
+package xmlrpc
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+)
+
+func NewRequest(url string, method string, args interface{}) (*http.Request, error) {
+ var t []interface{}
+ var ok bool
+ if t, ok = args.([]interface{}); !ok {
+ if args != nil {
+ t = []interface{}{args}
+ }
+ }
+
+ body, err := EncodeMethodCall(method, t...)
+ if err != nil {
+ return nil, err
+ }
+
+ request, err := http.NewRequest("POST", url, bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+
+ request.Header.Set("Content-Type", "text/xml")
+ request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body)))
+
+ return request, nil
+}
+
+func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteString(``)
+ b.WriteString(fmt.Sprintf("%s ", method))
+
+ if args != nil {
+ b.WriteString("")
+
+ for _, arg := range args {
+ p, err := marshal(arg)
+ if err != nil {
+ return nil, err
+ }
+
+ b.WriteString(fmt.Sprintf(" %s", string(p)))
+ }
+
+ b.WriteString(" ")
+ }
+
+ b.WriteString(" ")
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/response.go b/vendor/github.com/kolo/xmlrpc/response.go
new file mode 100644
index 00000000..6742a1c7
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/response.go
@@ -0,0 +1,52 @@
+package xmlrpc
+
+import (
+ "regexp"
+)
+
+var (
+ faultRx = regexp.MustCompile(`(\s|\S)+ `)
+)
+
+type failedResponse struct {
+ Code int `xmlrpc:"faultCode"`
+ Error string `xmlrpc:"faultString"`
+}
+
+func (r *failedResponse) err() error {
+ return &xmlrpcError{
+ code: r.Code,
+ err: r.Error,
+ }
+}
+
+type Response struct {
+ data []byte
+}
+
+func NewResponse(data []byte) *Response {
+ return &Response{
+ data: data,
+ }
+}
+
+func (r *Response) Failed() bool {
+ return faultRx.Match(r.data)
+}
+
+func (r *Response) Err() error {
+ failedResp := new(failedResponse)
+ if err := unmarshal(r.data, failedResp); err != nil {
+ return err
+ }
+
+ return failedResp.err()
+}
+
+func (r *Response) Unmarshal(v interface{}) error {
+ if err := unmarshal(r.data, v); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/kolo/xmlrpc/test_server.rb b/vendor/github.com/kolo/xmlrpc/test_server.rb
new file mode 100644
index 00000000..1b1ff876
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/test_server.rb
@@ -0,0 +1,25 @@
+# encoding: utf-8
+
+require "xmlrpc/server"
+
+class Service
+ def time
+ Time.now
+ end
+
+ def upcase(s)
+ s.upcase
+ end
+
+ def sum(x, y)
+ x + y
+ end
+
+ def error
+ raise XMLRPC::FaultException.new(500, "Server error")
+ end
+end
+
+server = XMLRPC::Server.new 5001, 'localhost'
+server.add_handler "service", Service.new
+server.serve
diff --git a/vendor/github.com/kolo/xmlrpc/xmlrpc.go b/vendor/github.com/kolo/xmlrpc/xmlrpc.go
new file mode 100644
index 00000000..8766403a
--- /dev/null
+++ b/vendor/github.com/kolo/xmlrpc/xmlrpc.go
@@ -0,0 +1,19 @@
+package xmlrpc
+
+import (
+ "fmt"
+)
+
+// xmlrpcError represents errors returned on xmlrpc request.
+type xmlrpcError struct {
+ code int
+ err string
+}
+
+// Error() method implements Error interface
+func (e *xmlrpcError) Error() string {
+ return fmt.Sprintf("error: \"%s\" code: %d", e.err, e.code)
+}
+
+// Base64 represents value in base64 encoding
+type Base64 string
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 00000000..5d8cb5b7
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 00000000..66d9b545
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 00000000..c318385c
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 00000000..4b76ea9a
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 00000000..37e4a7d4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,28 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+goautoneg
+http://bitbucket.org/ww/goautoneg
+Copyright 2011, Open Knowledge Foundation Ltd.
+See README.txt for license details.
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 00000000..81032bed
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 00000000..c0468800
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 00000000..d2a564b5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 00000000..ee02d9b8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,192 @@
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 00000000..425fe879
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 00000000..0f7630d5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 00000000..e3b67df8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 00000000..390c0746
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 00000000..8be24769
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained by system. Sum of all system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes_total"),
+ "Total number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 00000000..7a689108
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,448 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 00000000..eabe6024
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 00000000..86fd81c1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 00000000..d8cf0eda
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 00000000..5ec0a3ab
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by url. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 00000000..1dc25363
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,736 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ if strings.HasSuffix(pushURL, "/") {
+ pushURL = pushURL[:len(pushURL)-1]
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ contentType := expfmt.Negotiate(req.Header)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(encoder expfmt.Encoder) error {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[h] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return ®istry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 00000000..eb849616
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,538 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 00000000..89b86ea9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,142 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 00000000..b54ac11e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 00000000..68f94612
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,249 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64]Metric
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ m.mtx.RLock()
+ metric, ok := m.children[h]
+ m.mtx.RUnlock()
+ if ok {
+ return metric, nil
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ m.mtx.RLock()
+ metric, ok := m.children[h]
+ m.mtx.RUnlock()
+ if ok {
+ return metric, nil
+ }
+
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, ok := m.children[h]; !ok {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, ok := m.children[h]; !ok {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = hashAdd(h, val)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = hashAdd(h, val)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 00000000..20110e41
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 00000000..b065f868
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 00000000..636a2c1a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 00000000..a98696df
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,433 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const (
+ textType = "text/plain"
+ jsonType = "application/json"
+ )
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+
+ case jsonType:
+ var prometheusAPIVersion string
+
+ if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
+ prometheusAPIVersion = params["version"]
+ } else {
+ prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
+ }
+
+ switch prometheusAPIVersion {
+ case "0.0.2", "":
+ return fmtJSON2
+ default:
+ return FmtUnknown
+ }
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ case fmtJSON2:
+ return newJSON2Decoder(r)
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 00000000..11839ed6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 00000000..366fbde9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 00000000..14f92014
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/client_golang/text
+// go-fuzz -bin text-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/json_decode.go b/vendor/github.com/prometheus/common/expfmt/json_decode.go
new file mode 100644
index 00000000..cf865451
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/json_decode.go
@@ -0,0 +1,174 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+type json2Decoder struct {
+ dec *json.Decoder
+ fams []*dto.MetricFamily
+}
+
+func newJSON2Decoder(r io.Reader) Decoder {
+ return &json2Decoder{
+ dec: json.NewDecoder(r),
+ }
+}
+
+type histogram002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Values map[string]float64 `json:"value"`
+}
+
+type counter002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Value float64 `json:"value"`
+}
+
+func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
+ labels := base.Clone().Merge(ext)
+ delete(labels, model.MetricNameLabel)
+
+ names := make([]string, 0, len(labels))
+ for ln := range labels {
+ names = append(names, string(ln))
+ }
+ sort.Strings(names)
+
+ pairs := make([]*dto.LabelPair, 0, len(labels))
+
+ for _, ln := range names {
+ if !model.LabelNameRE.MatchString(ln) {
+ return nil, fmt.Errorf("invalid label name %q", ln)
+ }
+ lv := labels[model.LabelName(ln)]
+
+ pairs = append(pairs, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(string(lv)),
+ })
+ }
+
+ return pairs, nil
+}
+
+func (d *json2Decoder) more() error {
+ var entities []struct {
+ BaseLabels model.LabelSet `json:"baseLabels"`
+ Docstring string `json:"docstring"`
+ Metric struct {
+ Type string `json:"type"`
+ Values json.RawMessage `json:"value"`
+ } `json:"metric"`
+ }
+
+ if err := d.dec.Decode(&entities); err != nil {
+ return err
+ }
+ for _, e := range entities {
+ f := &dto.MetricFamily{
+ Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
+ Help: proto.String(e.Docstring),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{},
+ }
+
+ d.fams = append(d.fams, f)
+
+ switch e.Metric.Type {
+ case "counter", "gauge":
+ var values []counter002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, ctr := range values {
+ labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
+ if err != nil {
+ return err
+ }
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(ctr.Value),
+ },
+ })
+ }
+
+ case "histogram":
+ var values []histogram002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, hist := range values {
+ quants := make([]string, 0, len(values))
+ for q := range hist.Values {
+ quants = append(quants, q)
+ }
+
+ sort.Strings(quants)
+
+ for _, q := range quants {
+ value := hist.Values[q]
+ // The correct label is "quantile" but to not break old expressions
+ // this remains "percentile"
+ hist.Labels["percentile"] = model.LabelValue(q)
+
+ labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
+ if err != nil {
+ return err
+ }
+
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: labels,
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(value),
+ },
+ })
+ }
+ }
+
+ default:
+ return fmt.Errorf("unknown metric type %q", e.Metric.Type)
+ }
+ }
+ return nil
+}
+
+// Decode implements the Decoder interface.
+func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
+ if len(d.fams) == 0 {
+ if err := d.more(); err != nil {
+ return err
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 00000000..0bb9c14c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,305 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 00000000..84433bc4
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,746 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 00000000..7723656d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 00000000..648b38cb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
new file mode 100644
index 00000000..07fbfef6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -0,0 +1,304 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "flag"
+ "fmt"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type levelFlag struct{}
+
+// String implements flag.Value.
+func (f levelFlag) String() string {
+ return origLogger.Level.String()
+}
+
+// Set implements flag.Value.
+func (f levelFlag) Set(level string) error {
+ l, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+ origLogger.Level = l
+ return nil
+}
+
+// setSyslogFormatter is nil if the target architecture does not support syslog.
+var setSyslogFormatter func(string, string) error
+
+func setJSONFormatter() {
+ origLogger.Formatter = &logrus.JSONFormatter{}
+}
+
+type logFormatFlag struct{ uri string }
+
+// String implements flag.Value.
+func (f logFormatFlag) String() string {
+ return f.uri
+}
+
+// Set implements flag.Value.
+func (f logFormatFlag) Set(format string) error {
+ f.uri = format
+ u, err := url.Parse(format)
+ if err != nil {
+ return err
+ }
+ if u.Scheme != "logger" {
+ return fmt.Errorf("invalid scheme %s", u.Scheme)
+ }
+ jsonq := u.Query().Get("json")
+ if jsonq == "true" {
+ setJSONFormatter()
+ }
+
+ switch u.Opaque {
+ case "syslog":
+ if setSyslogFormatter == nil {
+ return fmt.Errorf("system does not support syslog")
+ }
+ appname := u.Query().Get("appname")
+ facility := u.Query().Get("local")
+ return setSyslogFormatter(appname, facility)
+ case "stdout":
+ origLogger.Out = os.Stdout
+ case "stderr":
+ origLogger.Out = os.Stderr
+
+ default:
+ return fmt.Errorf("unsupported logger %s", u.Opaque)
+ }
+ return nil
+}
+
+func init() {
+ // In order for these flags to take effect, the user of the package must call
+ // flag.Parse() before logging anything.
+ flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
+ flag.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.")
+}
+
+type Logger interface {
+ Debug(...interface{})
+ Debugln(...interface{})
+ Debugf(string, ...interface{})
+
+ Info(...interface{})
+ Infoln(...interface{})
+ Infof(string, ...interface{})
+
+ Warn(...interface{})
+ Warnln(...interface{})
+ Warnf(string, ...interface{})
+
+ Error(...interface{})
+ Errorln(...interface{})
+ Errorf(string, ...interface{})
+
+ Fatal(...interface{})
+ Fatalln(...interface{})
+ Fatalf(string, ...interface{})
+
+ With(key string, value interface{}) Logger
+}
+
+type logger struct {
+ entry *logrus.Entry
+}
+
+func (l logger) With(key string, value interface{}) Logger {
+ return logger{l.entry.WithField(key, value)}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+ l.sourced().Debug(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debugln(args ...interface{}) {
+ l.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+ l.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+ l.sourced().Info(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Infoln(args ...interface{}) {
+ l.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+ l.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+ l.sourced().Warn(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warnln(args ...interface{}) {
+ l.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+ l.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+ l.sourced().Error(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Errorln(args ...interface{}) {
+ l.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+ l.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+ l.sourced().Fatal(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatalln(args ...interface{}) {
+ l.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+ l.sourced().Fatalf(format, args...)
+}
+
+// sourced adds a source field to the logger that contains
+// the file name and line where the logging happened.
+func (l logger) sourced() *logrus.Entry {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = "??>"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ file = file[slash+1:]
+ }
+ return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
+}
+
+var origLogger = logrus.New()
+var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
+
+func Base() Logger {
+ return baseLogger
+}
+
+func With(key string, value interface{}) Logger {
+ return baseLogger.With(key, value)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ baseLogger.sourced().Debug(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ baseLogger.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ baseLogger.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ baseLogger.sourced().Info(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ baseLogger.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ baseLogger.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ baseLogger.sourced().Warn(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ baseLogger.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ baseLogger.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ baseLogger.sourced().Error(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ baseLogger.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ baseLogger.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ baseLogger.sourced().Fatal(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ baseLogger.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ baseLogger.sourced().Fatalf(format, args...)
+}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
new file mode 100644
index 00000000..fd8c6fbe
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -0,0 +1,119 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+ "fmt"
+ "log/syslog"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setSyslogFormatter = func(appname, local string) error {
+ if appname == "" {
+ return fmt.Errorf("missing appname parameter")
+ }
+ if local == "" {
+ return fmt.Errorf("missing local parameter")
+ }
+
+ fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to syslog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+var ceeTag = []byte("@cee:")
+
+type syslogger struct {
+ wrap logrus.Formatter
+ out *syslog.Writer
+}
+
+func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
+ priority, err := getFacility(facility)
+ if err != nil {
+ return nil, err
+ }
+ out, err := syslog.New(priority, appname)
+ return &syslogger{
+ out: out,
+ wrap: fmter,
+ }, err
+}
+
+func getFacility(facility string) (syslog.Priority, error) {
+ switch facility {
+ case "0":
+ return syslog.LOG_LOCAL0, nil
+ case "1":
+ return syslog.LOG_LOCAL1, nil
+ case "2":
+ return syslog.LOG_LOCAL2, nil
+ case "3":
+ return syslog.LOG_LOCAL3, nil
+ case "4":
+ return syslog.LOG_LOCAL4, nil
+ case "5":
+ return syslog.LOG_LOCAL5, nil
+ case "6":
+ return syslog.LOG_LOCAL6, nil
+ case "7":
+ return syslog.LOG_LOCAL7, nil
+ }
+ return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
+}
+
+func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
+ return data, err
+ }
+ // only append tag to data sent to syslog (line), not to what
+ // is returned
+ line := string(append(ceeTag, data...))
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ err = s.out.Crit(line)
+ case logrus.FatalLevel:
+ err = s.out.Crit(line)
+ case logrus.ErrorLevel:
+ err = s.out.Err(line)
+ case logrus.WarnLevel:
+ err = s.out.Warning(line)
+ case logrus.InfoLevel:
+ err = s.out.Info(line)
+ case logrus.DebugLevel:
+ err = s.out.Debug(line)
+ default:
+ err = s.out.Notice(line)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 00000000..35e739c7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 00000000..fc4de410
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 00000000..038fc1c9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 00000000..3b72e7ff
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 00000000..5f931cdb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 00000000..a5da59a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 00000000..88f013a4
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 00000000..cf14e82c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(m) == 0 || len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 00000000..7538e299
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 00000000..ebc8bf6c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ durSeconds, _ := strconv.Atoi(matches[1])
+ dur := time.Duration(durSeconds) * time.Second
+ unit := matches[2]
+ switch unit {
+ case "d":
+ dur *= 60 * 60 * 24
+ case "h":
+ dur *= 60 * 60
+ case "m":
+ dur *= 60
+ case "s":
+ dur *= 1
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
+
+func (d Duration) String() string {
+ seconds := int64(time.Duration(d) / time.Second)
+ factors := map[string]int64{
+ "d": 60 * 60 * 24,
+ "h": 60 * 60,
+ "m": 60,
+ "s": 1,
+ }
+ unit := "s"
+ switch int64(0) {
+ case seconds % factors["d"]:
+ unit = "d"
+ case seconds % factors["h"]:
+ unit = "h"
+ case seconds % factors["m"]:
+ unit = "m"
+ }
+ return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 00000000..10ffb0bd
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+func (v SampleValue) Equal(o SampleValue) bool {
+ return v == o
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value != o.Value {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 00000000..0c802dd8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,20 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian
+* Bjoern Rabenstein
+* David Cournapeau
+* Ji-Hoon, Seol
+* Jonas Große Sundrup
+* Julius Volz
+* Matthias Rampke
+* Nicky Gerritsen
+* Rémi Audebert
+* Tobias Schmidt
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 00000000..5705f0fb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 00000000..c264a49d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ ! gofmt -l *.go | read nothing
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 00000000..53c5e9aa
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 00000000..6e7ee6b8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[](https://godoc.org/github.com/prometheus/procfs)
+[](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 00000000..e2acd6d4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 00000000..49aaab05
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,33 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 00000000..e7012f73
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 00000000..d7a248c0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 00000000..deeab173
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,212 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 00000000..b4e31d7b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 00000000..acac02bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 00000000..724e271b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 00000000..1ca217e8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/github.com/soundcloud/go-runit/LICENSE b/vendor/github.com/soundcloud/go-runit/LICENSE
new file mode 100644
index 00000000..67e61e7a
--- /dev/null
+++ b/vendor/github.com/soundcloud/go-runit/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 SoundCloud Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/soundcloud/go-runit/runit/runit.go b/vendor/github.com/soundcloud/go-runit/runit/runit.go
new file mode 100644
index 00000000..4bb85d7c
--- /dev/null
+++ b/vendor/github.com/soundcloud/go-runit/runit/runit.go
@@ -0,0 +1,151 @@
+package runit
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "syscall"
+ "time"
+)
+
+const (
+ defaultServiceDir = "/etc/service"
+
+ taiOffset = 4611686018427387914
+ statusLen = 20
+
+ posTimeStart = 0
+ posTimeEnd = 7
+ posPidStart = 12
+ posPidEnd = 15
+
+ posWant = 17
+ posState = 19
+
+ StateDown = 0
+ StateUp = 1
+ StateFinish = 2
+)
+
+var (
+ ENoRunsv = errors.New("runsv not running")
+ StateToString = map[int]string{
+ StateDown: "down",
+ StateUp: "up",
+ StateFinish: "finish",
+ }
+)
+
+type SvStatus struct {
+ Pid int
+ Duration int
+ Timestamp time.Time
+ State int
+ NormallyUp bool
+ Want int
+}
+
+type service struct {
+ Name string
+ ServiceDir string
+}
+
+func GetServices(dir string) ([]*service, error) {
+ if dir == "" {
+ dir = defaultServiceDir
+ }
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ services := []*service{}
+ for _, file := range files {
+ if file.Mode()&os.ModeSymlink == os.ModeSymlink || file.IsDir() {
+ services = append(services, GetService(file.Name(), dir))
+ }
+ }
+ return services, nil
+}
+
+func GetService(name string, dir string) *service {
+ if dir == "" {
+ dir = defaultServiceDir
+ }
+ r := service{Name: name, ServiceDir: dir}
+ return &r
+}
+
+func (s *service) file(file string) string {
+ return fmt.Sprintf("%s/%s/supervise/%s", s.ServiceDir, s.Name, file)
+}
+
+func (s *service) runsvRunning() (bool, error) {
+ file, err := os.OpenFile(s.file("ok"), os.O_WRONLY, 0)
+ if err != nil {
+ if err == syscall.ENXIO {
+ return false, nil
+ }
+ return false, err
+ }
+ file.Close()
+ return true, nil
+}
+
+func (s *service) status() ([]byte, error) {
+ file, err := os.Open(s.file("status"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ status := make([]byte, statusLen)
+ _, err = file.Read(status)
+ return status, err
+}
+
+func (s *service) NormallyUp() bool {
+ _, err := os.Stat(s.file("down"))
+ return err != nil
+}
+
+func (s *service) Status() (*SvStatus, error) {
+ status, err := s.status()
+ if err != nil {
+ return nil, err
+ }
+
+ var pid int
+ pid = int(status[posPidEnd])
+ for i := posPidEnd - 1; i >= posPidStart; i-- {
+ pid <<= 8
+ pid += int(status[i])
+ }
+
+ tai := int64(status[posTimeStart])
+ for i := posTimeStart + 1; i <= posTimeEnd; i++ {
+ tai <<= 8
+ tai += int64(status[i])
+ }
+ state := status[posState] // 0: down, 1: run, 2: finish
+
+ tv := &syscall.Timeval{}
+ if err := syscall.Gettimeofday(tv); err != nil {
+ return nil, err
+ }
+ sS := SvStatus{
+ Pid: pid,
+ Timestamp: time.Unix(tai-taiOffset, 0), // FIXME: do we just select the wrong slice?
+ Duration: int(int64(tv.Sec) - (tai - taiOffset)),
+ State: int(state),
+ NormallyUp: s.NormallyUp(),
+ }
+
+ switch status[posWant] {
+ case 'u':
+ sS.Want = StateUp
+ case 'd':
+ sS.Want = StateDown
+ }
+
+ return &sS, nil
+}
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/unix/asm.s b/vendor/golang.org/x/sys/unix/asm.s
new file mode 100644
index 00000000..8ed2fdb9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm.s
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+TEXT ·use(SB),NOSPLIT,$0
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
new file mode 100644
index 00000000..8a727831
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
new file mode 100644
index 00000000..6321421f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
new file mode 100644
index 00000000..333242d5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm,darwin
+
+#include "textflag.h"
+
+//
+// System call support for ARM, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
new file mode 100644
index 00000000..97e01743
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm64,darwin
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s
new file mode 100644
index 00000000..7e55e0d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-32
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-32
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-44
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
new file mode 100644
index 00000000..d5ed6726
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, DragonFly
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-64
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-112
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-64
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
new file mode 100644
index 00000000..c9a0a260
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
new file mode 100644
index 00000000..35172477
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
new file mode 100644
index 00000000..9227c875
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 00000000..4db29093
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,35 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·socketcall(SB),NOSPLIT,$0-36
+ JMP syscall·socketcall(SB)
+
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall·rawsocketcall(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 00000000..44e25c62
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 00000000..cf0b5746
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-32
+ B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 00000000..4be9bfed
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build arm64
+// +build !gccgo
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 00000000..8d231feb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,28 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build ppc64 ppc64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
new file mode 100644
index 00000000..48bdcd76
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
new file mode 100644
index 00000000..2ede05c7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
new file mode 100644
index 00000000..e8928571
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
new file mode 100644
index 00000000..00576f3c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
new file mode 100644
index 00000000..790ef77f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 00000000..43ed17a0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-64
+ JMP syscall·sysvicall6(SB)
+
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64
+ JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 00000000..a96f0ebc
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 00000000..45e281a0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,27 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go
new file mode 100644
index 00000000..92222625
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unset.go
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.4
+
+package unix
+
+import "syscall"
+
+func Unsetenv(key string) error {
+ // This was added in Go 1.4.
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go
new file mode 100644
index 00000000..ce67a595
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/flock.go
@@ -0,0 +1,24 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by flock_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
new file mode 100644
index 00000000..362831c3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
@@ -0,0 +1,13 @@
+// +build linux,386 linux,arm
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 00000000..94c82321
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+//extern gccgoRealSyscall
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 00000000..07f6be03
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,41 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+// Define the use function in C so that it is not inlined.
+
+extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
+
+void
+use(void *p __attribute__ ((unused)))
+{
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 00000000..bffe1a77
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo,linux,amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100755
index 00000000..de95a4bb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,274 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# The unix package provides access to the raw system call
+# interface of the underlying operating system. Porting Go to
+# a new architecture/operating system combination requires
+# some manual effort, though there are tools that automate
+# much of the process. The auto-generated files have names
+# beginning with z.
+#
+# This script runs or (given -n) prints suggested commands to generate z files
+# for the current system. Running those commands is not automatic.
+# This script is documentation more than anything else.
+#
+# * asm_${GOOS}_${GOARCH}.s
+#
+# This hand-written assembly file implements system call dispatch.
+# There are three entry points:
+#
+# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
+# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+#
+# The first and second are the standard ones; they differ only in
+# how many arguments can be passed to the kernel.
+# The third is for low-level use by the ForkExec wrapper;
+# unlike the first two, it does not call into the scheduler to
+# let it know that a system call is running.
+#
+# * syscall_${GOOS}.go
+#
+# This hand-written Go file implements system calls that need
+# special handling and lists "//sys" comments giving prototypes
+# for ones that can be auto-generated. Mksyscall reads those
+# comments to generate the stubs.
+#
+# * syscall_${GOOS}_${GOARCH}.go
+#
+# Same as syscall_${GOOS}.go except that it contains code specific
+# to ${GOOS} on one particular architecture.
+#
+# * types_${GOOS}.c
+#
+# This hand-written C file includes standard C headers and then
+# creates typedef or enum names beginning with a dollar sign
+# (use of $ in variable names is a gcc extension). The hardest
+# part about preparing this file is figuring out which headers to
+# include and which symbols need to be #defined to get the
+# actual data structures that pass through to the kernel system calls.
+# Some C libraries present alternate versions for binary compatibility
+# and translate them on the way in and out of system calls, but
+# there is almost always a #define that can get the real ones.
+# See types_darwin.c and types_linux.c for examples.
+#
+# * zerror_${GOOS}_${GOARCH}.go
+#
+# This machine-generated file defines the system's error numbers,
+# error strings, and signal numbers. The generator is "mkerrors.sh".
+# Usually no arguments are needed, but mkerrors.sh will pass its
+# arguments on to godefs.
+#
+# * zsyscall_${GOOS}_${GOARCH}.go
+#
+# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
+#
+# * zsysnum_${GOOS}_${GOARCH}.go
+#
+# Generated by mksysnum_${GOOS}.
+#
+# * ztypes_${GOOS}_${GOARCH}.go
+#
+# Generated by godefs; see types_${GOOS}.c above.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="./mksyscall.pl"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+run="sh"
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+darwin_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_arm)
+ mkerrors="$mkerrors"
+ mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ # Let the type of C char be singed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_amd64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ # Let the type of C char be singed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_ppc64)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_ppc64le)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+solaris_amd64)
+ mksyscall="./mksyscall_solaris.pl"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
+ ;;
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then
+ echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
+ echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go";
+ fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100755
index 00000000..c40d788c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,476 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+CC=${CC:-cc}
+
+if [[ "$GOOS" -eq "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_DragonFly='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_FreeBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if __FreeBSD__ >= 10
+#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
+#undef SIOCAIFADDR
+#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
+#undef SIOCSIFPHYADDR
+#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
+#endif
+'
+
+includes_Linux='
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#ifndef __LP64__
+#define _FILE_OFFSET_BITS 64
+#endif
+#define _GNU_SOURCE
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
+#ifndef PTRACE_GETREGS
+#define PTRACE_GETREGS 0xc
+#endif
+
+#ifndef PTRACE_SETREGS
+#define PTRACE_SETREGS 0xd
+#endif
+'
+
+includes_NetBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// Needed since refers to it...
+#define schedppq 1
+'
+
+includes_OpenBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// We keep some constants not supported in OpenBSD 5.5 and beyond for
+// the promise of compatibility.
+#define EMUL_ENABLED 0x1
+#define EMUL_NATIVE 0x2
+#define IPV6_FAITH 0x1d
+#define IPV6_OPTIONS 0x1
+#define IPV6_RTHDR_STRICT 0x1
+#define IPV6_SOCKOPT_RESERVED1 0x3
+#define SIOCGIFGENERIC 0xc020693a
+#define SIOCSIFGENERIC 0x80206939
+#define WALTSIG 0x4
+'
+
+includes_SunOS='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+
+includes='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+ccflags="$@"
+
+# Write go tool cgo -godefs input.
+(
+ echo package unix
+ echo
+ echo '/*'
+ indirect="includes_$(uname)"
+ echo "${!indirect} $includes"
+ echo '*/'
+ echo 'import "C"'
+ echo 'import "syscall"'
+ echo
+ echo 'const ('
+
+ # The gcc command line prints all the #defines
+ # it encounters while processing the input
+ echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
+ awk '
+ $1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
+
+ $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
+ $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
+ $2 ~ /^(SCM_SRCRT)$/ {next}
+ $2 ~ /^(MAP_FAILED)$/ {next}
+ $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc.
+
+ $2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
+ $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
+
+ $2 !~ /^ETH_/ &&
+ $2 !~ /^EPROC_/ &&
+ $2 !~ /^EQUIV_/ &&
+ $2 !~ /^EXPR_/ &&
+ $2 ~ /^E[A-Z0-9_]+$/ ||
+ $2 ~ /^B[0-9_]+$/ ||
+ $2 == "BOTHER" ||
+ $2 ~ /^CI?BAUD(EX)?$/ ||
+ $2 == "IBSHIFT" ||
+ $2 ~ /^V[A-Z0-9]+$/ ||
+ $2 ~ /^CS[A-Z0-9]/ ||
+ $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+ $2 ~ /^IGN/ ||
+ $2 ~ /^IX(ON|ANY|OFF)$/ ||
+ $2 ~ /^IN(LCR|PCK)$/ ||
+ $2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
+ $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
+ $2 == "BRKINT" ||
+ $2 == "HUPCL" ||
+ $2 == "PENDIN" ||
+ $2 == "TOSTOP" ||
+ $2 == "XCASE" ||
+ $2 == "ALTWERASE" ||
+ $2 == "NOKERNINFO" ||
+ $2 ~ /^PAR/ ||
+ $2 ~ /^SIG[^_]/ ||
+ $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^O?XTABS$/ ||
+ $2 ~ /^TC[IO](ON|OFF)$/ ||
+ $2 ~ /^IN_/ ||
+ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 == "ICMPV6_FILTER" ||
+ $2 == "SOMAXCONN" ||
+ $2 == "NAME_MAX" ||
+ $2 == "IFNAMSIZ" ||
+ $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
+ $2 ~ /^SYSCTL_VERS/ ||
+ $2 ~ /^(MS|MNT)_/ ||
+ $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
+ $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^LINUX_REBOOT_CMD_/ ||
+ $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
+ $2 ~ /^SIOC/ ||
+ $2 ~ /^TIOC/ ||
+ $2 ~ /^TCGET/ ||
+ $2 ~ /^TCSET/ ||
+ $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
+ $2 !~ "RTF_BITS" &&
+ $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
+ $2 ~ /^BIOC/ ||
+ $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
+ $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
+ $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
+ $2 ~ /^CLONE_[A-Z_]+/ ||
+ $2 !~ /^(BPF_TIMEVAL)$/ &&
+ $2 ~ /^(BPF|DLT)_/ ||
+ $2 ~ /^CLOCK_/ ||
+ $2 !~ "WMESGLEN" &&
+ $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
+ $2 ~ /^__WCOREFLAG$/ {next}
+ $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
+
+ {next}
+ ' | sort
+
+ echo ')'
+) >_const.go
+
+# Pull out the error names for later.
+errors=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
+ sort
+)
+
+# Pull out the signal names for later.
+signals=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort
+)
+
+# Again, writing regexps to a file.
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ sort >_error.grep
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort >_signal.grep
+
+echo '// mkerrors.sh' "$@"
+echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT'
+echo
+echo "// +build ${GOARCH},${GOOS}"
+echo
+go tool cgo -godefs -- "$@" _const.go >_error.out
+cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
+echo
+echo '// Errors'
+echo 'const ('
+cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
+echo ')'
+
+echo
+echo '// Signals'
+echo 'const ('
+cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
+echo ')'
+
+# Run C program to print error and syscall strings.
+(
+ echo -E "
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
+
+int errors[] = {
+"
+ for i in $errors
+ do
+ echo -E ' '$i,
+ done
+
+ echo -E "
+};
+
+int signals[] = {
+"
+ for i in $signals
+ do
+ echo -E ' '$i,
+ done
+
+ # Use -E because on some systems bash builtin interprets \n itself.
+ echo -E '
+};
+
+static int
+intcmp(const void *a, const void *b)
+{
+ return *(int*)a - *(int*)b;
+}
+
+int
+main(void)
+{
+ int i, j, e;
+ char buf[1024], *p;
+
+ printf("\n\n// Error table\n");
+ printf("var errors = [...]string {\n");
+ qsort(errors, nelem(errors), sizeof errors[0], intcmp);
+ for(i=0; i 0 && errors[i-1] == e)
+ continue;
+ strcpy(buf, strerror(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ printf("\n\n// Signal table\n");
+ printf("var signals = [...]string {\n");
+ qsort(signals, nelem(signals), sizeof signals[0], intcmp);
+ for(i=0; i 0 && signals[i-1] == e)
+ continue;
+ strcpy(buf, strsignal(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ // cut trailing : number.
+ p = strrchr(buf, ":"[0]);
+ if(p)
+ *p = '\0';
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ return 0;
+}
+
+'
+) >_errors.c
+
+$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl
new file mode 100755
index 00000000..b1e7766d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall.pl
@@ -0,0 +1,323 @@
+#!/usr/bin/env perl
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This program reads a file containing function prototypes
+# (like syscall_darwin.go) and generates system call bodies.
+# The prototypes are marked by lines beginning with "//sys"
+# and read like func declarations if //sys is replaced by func, but:
+# * The parameter lists must give a name for each argument.
+# This includes return parameters.
+# * The parameter lists must give a type for each argument:
+# the (x, y, z int) shorthand is not allowed.
+# * If the return parameter is an error number, it must be named errno.
+
+# A line beginning with //sysnb is like //sys, except that the
+# goroutine will not be suspended during the execution of the system
+# call. This must only be used for system calls which can never
+# block, as otherwise the system call could cause all goroutines to
+# hang.
+
+use strict;
+
+my $cmdline = "mksyscall.pl " . join(' ', @ARGV);
+my $errors = 0;
+my $_32bit = "";
+my $plan9 = 0;
+my $openbsd = 0;
+my $netbsd = 0;
+my $dragonfly = 0;
+my $arm = 0; # 64-bit value should use (even, odd)-pair
+
+if($ARGV[0] eq "-b32") {
+ $_32bit = "big-endian";
+ shift;
+} elsif($ARGV[0] eq "-l32") {
+ $_32bit = "little-endian";
+ shift;
+}
+if($ARGV[0] eq "-plan9") {
+ $plan9 = 1;
+ shift;
+}
+if($ARGV[0] eq "-openbsd") {
+ $openbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-netbsd") {
+ $netbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-dragonfly") {
+ $dragonfly = 1;
+ shift;
+}
+if($ARGV[0] eq "-arm") {
+ $arm = 1;
+ shift;
+}
+
+if($ARGV[0] =~ /^-/) {
+ print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n";
+ exit 1;
+}
+
+if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
+ print STDERR "GOARCH or GOOS not defined in environment\n";
+ exit 1;
+}
+
+sub parseparamlist($) {
+ my ($list) = @_;
+ $list =~ s/^\s*//;
+ $list =~ s/\s*$//;
+ if($list eq "") {
+ return ();
+ }
+ return split(/\s*,\s*/, $list);
+}
+
+sub parseparam($) {
+ my ($p) = @_;
+ if($p !~ /^(\S*) (\S*)$/) {
+ print STDERR "$ARGV:$.: malformed parameter: $p\n";
+ $errors = 1;
+ return ("xx", "int");
+ }
+ return ($1, $2);
+}
+
+my $text = "";
+while(<>) {
+ chomp;
+ s/\s+/ /g;
+ s/^\s+//;
+ s/\s+$//;
+ my $nonblock = /^\/\/sysnb /;
+ next if !/^\/\/sys / && !$nonblock;
+
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, errno error)
+ # Split into name, in params, out params.
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+ }
+ my ($func, $in, $out, $sysname) = ($2, $3, $4, $5);
+
+ # Split argument lists on comma.
+ my @in = parseparamlist($in);
+ my @out = parseparamlist($out);
+
+ # Try in vain to keep people from editing this file.
+ # The theory is that they jump into the middle of the file
+ # without reading the header.
+ $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
+
+ # Go function header.
+ my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
+ $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
+
+ # Check if err return available
+ my $errvar = "";
+ foreach my $p (@out) {
+ my ($name, $type) = parseparam($p);
+ if($type eq "error") {
+ $errvar = $name;
+ last;
+ }
+ }
+
+ # Prepare arguments to Syscall.
+ my @args = ();
+ my @uses = ();
+ my $n = 0;
+ foreach my $p (@in) {
+ my ($name, $type) = parseparam($p);
+ if($type =~ /^\*/) {
+ push @args, "uintptr(unsafe.Pointer($name))";
+ } elsif($type eq "string" && $errvar ne "") {
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n";
+ $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type eq "string") {
+ print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, _ = BytePtrFromString($name)\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type =~ /^\[\](.*)/) {
+ # Convert slice into pointer, length.
+ # Have to be careful not to take address of &a[0] if len == 0:
+ # pass dummy pointer in that case.
+ # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
+ $text .= "\tvar _p$n unsafe.Pointer\n";
+ $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}";
+ $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}";
+ $text .= "\n";
+ push @args, "uintptr(_p$n)", "uintptr(len($name))";
+ $n++;
+ } elsif($type eq "int64" && ($openbsd || $netbsd)) {
+ push @args, "0";
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $dragonfly) {
+ if ($func !~ /^extp(read|write)/i) {
+ push @args, "0";
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $_32bit ne "") {
+ if(@args % 2 && $arm) {
+ # arm abi specifies 64-bit argument uses
+ # (even, odd) pair
+ push @args, "0"
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } else {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ }
+ } else {
+ push @args, "uintptr($name)";
+ }
+ }
+
+ # Determine which form to use; pad args with zeros.
+ my $asm = "Syscall";
+ if ($nonblock) {
+ $asm = "RawSyscall";
+ }
+ if(@args <= 3) {
+ while(@args < 3) {
+ push @args, "0";
+ }
+ } elsif(@args <= 6) {
+ $asm .= "6";
+ while(@args < 6) {
+ push @args, "0";
+ }
+ } elsif(@args <= 9) {
+ $asm .= "9";
+ while(@args < 9) {
+ push @args, "0";
+ }
+ } else {
+ print STDERR "$ARGV:$.: too many arguments to system call\n";
+ }
+
+ # System call number.
+ if($sysname eq "") {
+ $sysname = "SYS_$func";
+ $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
+ $sysname =~ y/a-z/A-Z/;
+ }
+
+ # Actual call.
+ my $args = join(', ', @args);
+ my $call = "$asm($sysname, $args)";
+
+ # Assign return values.
+ my $body = "";
+ my @ret = ("_", "_", "_");
+ my $do_errno = 0;
+ for(my $i=0; $i<@out; $i++) {
+ my $p = $out[$i];
+ my ($name, $type) = parseparam($p);
+ my $reg = "";
+ if($name eq "err" && !$plan9) {
+ $reg = "e1";
+ $ret[2] = $reg;
+ $do_errno = 1;
+ } elsif($name eq "err" && $plan9) {
+ $ret[0] = "r0";
+ $ret[2] = "e1";
+ next;
+ } else {
+ $reg = sprintf("r%d", $i);
+ $ret[$i] = $reg;
+ }
+ if($type eq "bool") {
+ $reg = "$reg != 0";
+ }
+ if($type eq "int64" && $_32bit ne "") {
+ # 64-bit number in r1:r0 or r0:r1.
+ if($i+2 > @out) {
+ print STDERR "$ARGV:$.: not enough registers for int64 return\n";
+ }
+ if($_32bit eq "big-endian") {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
+ } else {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
+ }
+ $ret[$i] = sprintf("r%d", $i);
+ $ret[$i+1] = sprintf("r%d", $i+1);
+ }
+ if($reg ne "e1" || $plan9) {
+ $body .= "\t$name = $type($reg)\n";
+ }
+ }
+ if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
+ $text .= "\t$call\n";
+ } else {
+ $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
+ }
+ foreach my $use (@uses) {
+ $text .= "\t$use\n";
+ }
+ $text .= $body;
+
+ if ($plan9 && $ret[2] eq "e1") {
+ $text .= "\tif int32(r0) == -1 {\n";
+ $text .= "\t\terr = e1\n";
+ $text .= "\t}\n";
+ } elsif ($do_errno) {
+ $text .= "\tif e1 != 0 {\n";
+ $text .= "\t\terr = errnoErr(e1)\n";
+ $text .= "\t}\n";
+ }
+ $text .= "\treturn\n";
+ $text .= "}\n\n";
+}
+
+chomp $text;
+chomp $text;
+
+if($errors) {
+ exit 1;
+}
+
+print <) {
+ chomp;
+ s/\s+/ /g;
+ s/^\s+//;
+ s/\s+$//;
+ $package = $1 if !$package && /^package (\S+)$/;
+ my $nonblock = /^\/\/sysnb /;
+ next if !/^\/\/sys / && !$nonblock;
+
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, err error)
+ # Split into name, in params, out params.
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+ }
+ my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
+
+ # Split argument lists on comma.
+ my @in = parseparamlist($in);
+ my @out = parseparamlist($out);
+
+ # So file name.
+ if($modname eq "") {
+ $modname = "libc";
+ }
+
+ # System call name.
+ if($sysname eq "") {
+ $sysname = "$func";
+ }
+
+ # System call pointer variable name.
+ my $sysvarname = "proc$sysname";
+
+ my $strconvfunc = "BytePtrFromString";
+ my $strconvtype = "*byte";
+
+ $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
+
+ # Runtime import of function to allow cross-platform builds.
+ $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n";
+ # Link symbol to proc address variable.
+ $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n";
+ # Library proc address variable.
+ push @vars, $sysvarname;
+
+ # Go function header.
+ $out = join(', ', @out);
+ if($out ne "") {
+ $out = " ($out)";
+ }
+ if($text ne "") {
+ $text .= "\n"
+ }
+ $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out;
+
+ # Check if err return available
+ my $errvar = "";
+ foreach my $p (@out) {
+ my ($name, $type) = parseparam($p);
+ if($type eq "error") {
+ $errvar = $name;
+ last;
+ }
+ }
+
+ # Prepare arguments to Syscall.
+ my @args = ();
+ my @uses = ();
+ my $n = 0;
+ foreach my $p (@in) {
+ my ($name, $type) = parseparam($p);
+ if($type =~ /^\*/) {
+ push @args, "uintptr(unsafe.Pointer($name))";
+ } elsif($type eq "string" && $errvar ne "") {
+ $text .= "\tvar _p$n $strconvtype\n";
+ $text .= "\t_p$n, $errvar = $strconvfunc($name)\n";
+ $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type eq "string") {
+ print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
+ $text .= "\tvar _p$n $strconvtype\n";
+ $text .= "\t_p$n, _ = $strconvfunc($name)\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type =~ /^\[\](.*)/) {
+ # Convert slice into pointer, length.
+ # Have to be careful not to take address of &a[0] if len == 0:
+ # pass nil in that case.
+ $text .= "\tvar _p$n *$1\n";
+ $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))";
+ $n++;
+ } elsif($type eq "int64" && $_32bit ne "") {
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name >> 32)", "uintptr($name)";
+ } else {
+ push @args, "uintptr($name)", "uintptr($name >> 32)";
+ }
+ } elsif($type eq "bool") {
+ $text .= "\tvar _p$n uint32\n";
+ $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
+ push @args, "uintptr(_p$n)";
+ $n++;
+ } else {
+ push @args, "uintptr($name)";
+ }
+ }
+ my $nargs = @args;
+
+ # Determine which form to use; pad args with zeros.
+ my $asm = "sysvicall6";
+ if ($nonblock) {
+ $asm = "rawSysvicall6";
+ }
+ if(@args <= 6) {
+ while(@args < 6) {
+ push @args, "0";
+ }
+ } else {
+ print STDERR "$ARGV:$.: too many arguments to system call\n";
+ }
+
+ # Actual call.
+ my $args = join(', ', @args);
+ my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)";
+
+ # Assign return values.
+ my $body = "";
+ my $failexpr = "";
+ my @ret = ("_", "_", "_");
+ my @pout= ();
+ my $do_errno = 0;
+ for(my $i=0; $i<@out; $i++) {
+ my $p = $out[$i];
+ my ($name, $type) = parseparam($p);
+ my $reg = "";
+ if($name eq "err") {
+ $reg = "e1";
+ $ret[2] = $reg;
+ $do_errno = 1;
+ } else {
+ $reg = sprintf("r%d", $i);
+ $ret[$i] = $reg;
+ }
+ if($type eq "bool") {
+ $reg = "$reg != 0";
+ }
+ if($type eq "int64" && $_32bit ne "") {
+ # 64-bit number in r1:r0 or r0:r1.
+ if($i+2 > @out) {
+ print STDERR "$ARGV:$.: not enough registers for int64 return\n";
+ }
+ if($_32bit eq "big-endian") {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
+ } else {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
+ }
+ $ret[$i] = sprintf("r%d", $i);
+ $ret[$i+1] = sprintf("r%d", $i+1);
+ }
+ if($reg ne "e1") {
+ $body .= "\t$name = $type($reg)\n";
+ }
+ }
+ if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
+ $text .= "\t$call\n";
+ } else {
+ $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
+ }
+ foreach my $use (@uses) {
+ $text .= "\t$use\n";
+ }
+ $text .= $body;
+
+ if ($do_errno) {
+ $text .= "\tif e1 != 0 {\n";
+ $text .= "\t\terr = e1\n";
+ $text .= "\t}\n";
+ }
+ $text .= "\treturn\n";
+ $text .= "}\n";
+}
+
+if($errors) {
+ exit 1;
+}
+
+print < "net.inet",
+ "net.inet.ipproto" => "net.inet",
+ "net.inet6.ipv6proto" => "net.inet6",
+ "net.inet6.ipv6" => "net.inet6.ip6",
+ "net.inet.icmpv6" => "net.inet6.icmp6",
+ "net.inet6.divert6" => "net.inet6.divert",
+ "net.inet6.tcp6" => "net.inet.tcp",
+ "net.inet6.udp6" => "net.inet.udp",
+ "mpls" => "net.mpls",
+ "swpenc" => "vm.swapencrypt"
+);
+
+# Node mappings
+my %node_map = (
+ "net.inet.ip.ifq" => "net.ifq",
+ "net.inet.pfsync" => "net.pfsync",
+ "net.mpls.ifq" => "net.ifq"
+);
+
+my $ctlname;
+my %mib = ();
+my %sysctl = ();
+my $node;
+
+sub debug() {
+ print STDERR "$_[0]\n" if $debug;
+}
+
+# Walk the MIB and build a sysctl name to OID mapping.
+sub build_sysctl() {
+ my ($node, $name, $oid) = @_;
+ my %node = %{$node};
+ my @oid = @{$oid};
+
+ foreach my $key (sort keys %node) {
+ my @node = @{$node{$key}};
+ my $nodename = $name.($name ne '' ? '.' : '').$key;
+ my @nodeoid = (@oid, $node[0]);
+ if ($node[1] eq 'CTLTYPE_NODE') {
+ if (exists $node_map{$nodename}) {
+ $node = \%mib;
+ $ctlname = $node_map{$nodename};
+ foreach my $part (split /\./, $ctlname) {
+ $node = \%{@{$$node{$part}}[2]};
+ }
+ } else {
+ $node = $node[2];
+ }
+ &build_sysctl($node, $nodename, \@nodeoid);
+ } elsif ($node[1] ne '') {
+ $sysctl{$nodename} = \@nodeoid;
+ }
+ }
+}
+
+foreach my $ctl (@ctls) {
+ $ctls{$ctl} = $ctl;
+}
+
+# Build MIB
+foreach my $header (@headers) {
+ &debug("Processing $header...");
+ open HEADER, "/usr/include/$header" ||
+ print STDERR "Failed to open $header\n";
+ while () {
+ if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ ||
+ $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ ||
+ $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) {
+ if ($1 eq 'CTL_NAMES') {
+ # Top level.
+ $node = \%mib;
+ } else {
+ # Node.
+ my $nodename = lc($2);
+ if ($header =~ /^netinet\//) {
+ $ctlname = "net.inet.$nodename";
+ } elsif ($header =~ /^netinet6\//) {
+ $ctlname = "net.inet6.$nodename";
+ } elsif ($header =~ /^net\//) {
+ $ctlname = "net.$nodename";
+ } else {
+ $ctlname = "$nodename";
+ $ctlname =~ s/^(fs|net|kern)_/$1\./;
+ }
+ if (exists $ctl_map{$ctlname}) {
+ $ctlname = $ctl_map{$ctlname};
+ }
+ if (not exists $ctls{$ctlname}) {
+ &debug("Ignoring $ctlname...");
+ next;
+ }
+
+ # Walk down from the top of the MIB.
+ $node = \%mib;
+ foreach my $part (split /\./, $ctlname) {
+ if (not exists $$node{$part}) {
+ &debug("Missing node $part");
+ $$node{$part} = [ 0, '', {} ];
+ }
+ $node = \%{@{$$node{$part}}[2]};
+ }
+ }
+
+ # Populate current node with entries.
+ my $i = -1;
+ while (defined($_) && $_ !~ /^}/) {
+ $_ = ;
+ $i++ if $_ =~ /{.*}/;
+ next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/;
+ $$node{$1} = [ $i, $2, {} ];
+ }
+ }
+ }
+ close HEADER;
+}
+
+&build_sysctl(\%mib, "", []);
+
+print <){
+ if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){
+ my $name = $1;
+ my $num = $2;
+ $name =~ y/a-z/A-Z/;
+ print " SYS_$name = $num;"
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){
+ my $num = $1;
+ my $proto = $2;
+ my $name = "SYS_$3";
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+
+ print " $name = $num; // $proto\n";
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){
+ my $num = $1;
+ my $proto = $2;
+ my $name = "SYS_$3";
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+ if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){
+ next
+ }
+
+ print " $name = $num; // $proto\n";
+
+ # We keep Capsicum syscall numbers for FreeBSD
+ # 9-STABLE here because we are not sure whether they
+ # are mature and stable.
+ if($num == 513){
+ print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n";
+ print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n";
+ print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n";
+ print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n";
+ }
+ }
+}
+
+print < 999){
+ # ignore deprecated syscalls that are no longer implemented
+ # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716
+ return;
+ }
+ $name =~ y/a-z/A-Z/;
+ print " SYS_$name = $num;\n";
+}
+
+my $prev;
+open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc";
+while(){
+ if(/^#define __NR_syscalls\s+/) {
+ # ignore redefinitions of __NR_syscalls
+ }
+ elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
+ fmt($1, $prev+$2)
+ }
+}
+
+print <){
+ if($line =~ /^(.*)\\$/) {
+ # Handle continuation
+ $line = $1;
+ $_ =~ s/^\s+//;
+ $line .= $_;
+ } else {
+ # New line
+ $line = $_;
+ }
+ next if $line =~ /\\$/;
+ if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) {
+ my $num = $1;
+ my $proto = $6;
+ my $compat = $8;
+ my $name = "$7_$9";
+
+ $name = "$7_$11" if $11 ne '';
+ $name =~ y/a-z/A-Z/;
+
+ if($compat eq '' || $compat eq '30' || $compat eq '50') {
+ print " $name = $num; // $proto\n";
+ }
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){
+ my $num = $1;
+ my $proto = $3;
+ my $name = $4;
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+
+ print " $name = $num; // $proto\n";
+ }
+}
+
+print < len(b) {
+ return nil, nil, EINVAL
+ }
+ return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
+}
+
+// UnixRights encodes a set of open file descriptors into a socket
+// control message for sending to another process.
+func UnixRights(fds ...int) []byte {
+ datalen := len(fds) * 4
+ b := make([]byte, CmsgSpace(datalen))
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = SOL_SOCKET
+ h.Type = SCM_RIGHTS
+ h.SetLen(CmsgLen(datalen))
+ data := cmsgData(h)
+ for _, fd := range fds {
+ *(*int32)(data) = int32(fd)
+ data = unsafe.Pointer(uintptr(data) + 4)
+ }
+ return b
+}
+
+// ParseUnixRights decodes a socket control message that contains an
+// integer array of open file descriptors from another process.
+func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
+ if m.Header.Level != SOL_SOCKET {
+ return nil, EINVAL
+ }
+ if m.Header.Type != SCM_RIGHTS {
+ return nil, EINVAL
+ }
+ fds := make([]int, len(m.Data)>>2)
+ for i, j := 0, 0; i < len(m.Data); i += 4 {
+ fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
+ j++
+ }
+ return fds, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go
new file mode 100644
index 00000000..35ed6643
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/str.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+func itoa(val int) string { // do it here rather than with fmt to avoid dependency
+ if val < 0 {
+ return "-" + uitoa(uint(-val))
+ }
+ return uitoa(uint(val))
+}
+
+func uitoa(val uint) string {
+ var buf [32]byte // big enough for int64
+ i := len(buf) - 1
+ for val >= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
new file mode 100644
index 00000000..6442a993
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// Package unix contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display OS-specific documentation for the current
+// system. If you want godoc to display OS documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net". Use
+// those packages rather than this one if you can.
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.Errno.
+package unix // import "golang.org/x/sys/unix"
+
+import "unsafe"
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return nil, EINVAL
+ }
+ }
+ a := make([]byte, len(s)+1)
+ copy(a, s)
+ return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ return nil, err
+ }
+ return &a[0], nil
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mkunix.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+//go:noescape
+func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
new file mode 100644
index 00000000..e9671764
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -0,0 +1,628 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+// BSD system call wrappers shared by *BSD based systems
+// including OS X (Darwin) and FreeBSD. Like the other
+// syscall_*.go files it is compiled as Go code but also
+// used as input to mksyscall which parses the //sys
+// lines and generates system call stubs.
+
+package unix
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+/*
+ * Wrapped
+ */
+
+//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error)
+//sysnb setgroups(ngid int, gid *_Gid_t) (err error)
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 16 on BSD.
+ if n < 0 || n > 1000 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ // Final argument is (basep *uintptr) and the syscall doesn't take nil.
+ // 64 bits should be enough. (32 bits isn't even on 386). Since the
+ // actual system call is getdirentries64, 64 is a good guess.
+ // TODO(rsc): Can we use a single global basep for all calls?
+ var base = (*uintptr)(unsafe.Pointer(new(uint64)))
+ return Getdirentries(fd, buf, base)
+}
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits.
+
+type WaitStatus uint32
+
+const (
+ mask = 0x7F
+ core = 0x80
+ shift = 8
+
+ exited = 0
+ stopped = 0x7F
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) ExitStatus() int {
+ if w&mask != exited {
+ return -1
+ }
+ return int(w >> shift)
+}
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 }
+
+func (w WaitStatus) Signal() syscall.Signal {
+ sig := syscall.Signal(w & mask)
+ if sig == stopped || sig == 0 {
+ return -1
+ }
+ return sig
+}
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
+
+func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys Shutdown(s int, how int) (err error)
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = SizeofSockaddrInet4
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = SizeofSockaddrInet6
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) || n == 0 {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Index == 0 {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = sa.Len
+ sa.raw.Family = AF_LINK
+ sa.raw.Index = sa.Index
+ sa.raw.Type = sa.Type
+ sa.raw.Nlen = sa.Nlen
+ sa.raw.Alen = sa.Alen
+ sa.raw.Slen = sa.Slen
+ for i := 0; i < len(sa.raw.Data); i++ {
+ sa.raw.Data[i] = sa.Data[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_LINK:
+ pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrDatalink)
+ sa.Len = pp.Len
+ sa.Family = pp.Family
+ sa.Index = pp.Index
+ sa.Type = pp.Type
+ sa.Nlen = pp.Nlen
+ sa.Alen = pp.Alen
+ sa.Slen = pp.Slen
+ for i := 0; i < len(sa.Data); i++ {
+ sa.Data[i] = pp.Data[i]
+ }
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ if pp.Len < 2 || pp.Len > SizeofSockaddrUnix {
+ return nil, EINVAL
+ }
+ sa := new(SockaddrUnix)
+
+ // Some BSDs include the trailing NUL in the length, whereas
+ // others do not. Work around this by subtracting the leading
+ // family and len. The path is then scanned to see if a NUL
+ // terminator still exists within the length.
+ n := int(pp.Len) - 2 // subtract leading Family, Len
+ for i := 0; i < n; i++ {
+ if pp.Path[i] == 0 {
+ // found early NUL; assume Len included the NUL
+ // or was overestimating.
+ n = i
+ break
+ }
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ if runtime.GOOS == "darwin" && len == 0 {
+ // Accepted socket has no address.
+ // This is likely due to a bug in xnu kernels,
+ // where instead of ECONNABORTED error socket
+ // is accepted, but has no address.
+ Close(nfd)
+ return 0, nil, ECONNABORTED
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ // TODO(jsing): DragonFly has a "bug" (see issue 3349), which should be
+ // reported upstream.
+ if runtime.GOOS == "dragonfly" && rsa.Addr.Family == AF_UNSPEC && rsa.Addr.Len == 0 {
+ rsa.Addr.Family = AF_UNIX
+ rsa.Addr.Len = SizeofSockaddrUnix
+ }
+ return anyToSockaddr(&rsa)
+}
+
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+
+func GetsockoptByte(fd, level, opt int) (value byte, err error) {
+ var n byte
+ vallen := _Socklen(1)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return n, err
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+//sys kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error)
+
+func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err error) {
+ var change, event unsafe.Pointer
+ if len(changes) > 0 {
+ change = unsafe.Pointer(&changes[0])
+ }
+ if len(events) > 0 {
+ event = unsafe.Pointer(&events[0])
+ }
+ return kevent(kq, change, len(changes), event, len(events), timeout)
+}
+
+//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
+
+// sysctlmib translates name to mib number and appends any additional args.
+func sysctlmib(name string, args ...int) ([]_C_int, error) {
+ // Translate name to mib number.
+ mib, err := nametomib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, a := range args {
+ mib = append(mib, _C_int(a))
+ }
+
+ return mib, nil
+}
+
+func Sysctl(name string) (string, error) {
+ return SysctlArgs(name)
+}
+
+func SysctlArgs(name string, args ...int) (string, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return "", err
+ }
+
+ // Find size.
+ n := uintptr(0)
+ if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+ return "", err
+ }
+ if n == 0 {
+ return "", nil
+ }
+
+ // Read into buffer of that size.
+ buf := make([]byte, n)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return "", err
+ }
+
+ // Throw away terminating NUL.
+ if n > 0 && buf[n-1] == '\x00' {
+ n--
+ }
+ return string(buf[0:n]), nil
+}
+
+func SysctlUint32(name string) (uint32, error) {
+ return SysctlUint32Args(name)
+}
+
+func SysctlUint32Args(name string, args ...int) (uint32, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ n := uintptr(4)
+ buf := make([]byte, 4)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, EIO
+ }
+ return *(*uint32)(unsafe.Pointer(&buf[0])), nil
+}
+
+func SysctlUint64(name string, args ...int) (uint64, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ n := uintptr(8)
+ buf := make([]byte, 8)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return 0, err
+ }
+ if n != 8 {
+ return 0, EIO
+ }
+ return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+}
+
+func SysctlRaw(name string, args ...int) ([]byte, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find size.
+ n := uintptr(0)
+ if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Read into buffer of that size.
+ buf := make([]byte, n)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return nil, err
+ }
+
+ // The actual call may return less than the original reported required
+ // size so ensure we deal with that.
+ return buf[:n], nil
+}
+
+//sys utimes(path string, timeval *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) error {
+ if tv == nil {
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ return utimes(path, nil)
+ }
+ // TODO: The BSDs can do utimensat with SYS_UTIMENSAT but it
+ // isn't supported by darwin so this uses utimes instead
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ // Not as efficient as it could be because Timespec and
+ // Timeval have different types in the different OSes
+ tv := [2]Timeval{
+ NsecToTimeval(TimespecToNsec(ts[0])),
+ NsecToTimeval(TimespecToNsec(ts[1])),
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys futimes(fd int, timeval *[2]Timeval) (err error)
+
+func Futimes(fd int, tv []Timeval) error {
+ if tv == nil {
+ return futimes(fd, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+
+// TODO: wrap
+// Acct(name nil-string) (err error)
+// Gethostuuid(uuid *byte, timeout *Timespec) (err error)
+// Madvise(addr *byte, len int, behav int) (err error)
+// Mprotect(addr *byte, len int, prot int) (err error)
+// Msync(addr *byte, len int, flags int) (err error)
+// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
new file mode 100644
index 00000000..0d1771c3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -0,0 +1,509 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ errorspkg "errors"
+ "syscall"
+ "unsafe"
+)
+
+const ImplementsGetwd = true
+
+func Getwd() (string, error) {
+ buf := make([]byte, 2048)
+ attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0)
+ if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 {
+ wd := string(attrs[0])
+ // Sanity check that it's an absolute path and ends
+ // in a null byte, which we then strip.
+ if wd[0] == '/' && wd[len(wd)-1] == 0 {
+ return wd[:len(wd)-1], nil
+ }
+ }
+ // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the
+ // slow algorithm.
+ return "", ENOTSUP
+}
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
+func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
+
+const (
+ attrBitMapCount = 5
+ attrCmnFullpath = 0x08000000
+)
+
+type attrList struct {
+ bitmapCount uint16
+ _ uint16
+ CommonAttr uint32
+ VolAttr uint32
+ DirAttr uint32
+ FileAttr uint32
+ Forkattr uint32
+}
+
+func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) {
+ if len(attrBuf) < 4 {
+ return nil, errorspkg.New("attrBuf too small")
+ }
+ attrList.bitmapCount = attrBitMapCount
+
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, e1 := Syscall6(
+ SYS_GETATTRLIST,
+ uintptr(unsafe.Pointer(_p0)),
+ uintptr(unsafe.Pointer(&attrList)),
+ uintptr(unsafe.Pointer(&attrBuf[0])),
+ uintptr(len(attrBuf)),
+ uintptr(options),
+ 0,
+ )
+ if e1 != 0 {
+ return nil, e1
+ }
+ size := *(*uint32)(unsafe.Pointer(&attrBuf[0]))
+
+ // dat is the section of attrBuf that contains valid data,
+ // without the 4 byte length header. All attribute offsets
+ // are relative to dat.
+ dat := attrBuf
+ if int(size) < len(attrBuf) {
+ dat = dat[:size]
+ }
+ dat = dat[4:] // remove length prefix
+
+ for i := uint32(0); int(i) < len(dat); {
+ header := dat[i:]
+ if len(header) < 8 {
+ return attrs, errorspkg.New("truncated attribute header")
+ }
+ datOff := *(*int32)(unsafe.Pointer(&header[0]))
+ attrLen := *(*uint32)(unsafe.Pointer(&header[4]))
+ if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) {
+ return attrs, errorspkg.New("truncated results; attrBuf too small")
+ }
+ end := uint32(datOff) + attrLen
+ attrs = append(attrs, dat[datOff:end])
+ i = end
+ if r := i % 4; r != 0 {
+ i += (4 - r)
+ }
+ }
+ return
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Wrapped
+ */
+
+//sys kill(pid int, signum int, posix int) (err error)
+
+func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exchangedata(path1 string, path2 string, options int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Getuid() (uid int)
+//sysnb Issetugid() (tainted bool)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sys Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setprivexec(flag int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Mlock
+// Munlock
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// Mlockall
+// Munlockall
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// sendfile
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
new file mode 100644
index 00000000..3195c8bf
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -0,0 +1,79 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = int32(sec)
+ tv.Usec = int32(usec)
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/386 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
new file mode 100644
index 00000000..7adb98de
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -0,0 +1,81 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = sec
+ tv.Usec = usec
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/amd64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
new file mode 100644
index 00000000..e47ffd73
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = int32(sec)
+ tv.Usec = int32(usec)
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
new file mode 100644
index 00000000..2560a959
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 16384 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = sec
+ tv.Usec = usec
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/arm64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
new file mode 100644
index 00000000..fbbe0dce
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -0,0 +1,411 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FreeBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import "unsafe"
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ Rcf uint16
+ Route [16]uint16
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ reclen := int(16+dirent.Namlen+1+7) & ^7
+ buf = buf[reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ return extpread(fd, p, 0, offset)
+}
+
+//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ return extpwrite(fd, p, 0, offset)
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ * TODO(jsing): Update this list for DragonFly.
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
new file mode 100644
index 00000000..41c2e697
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,dragonfly
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
new file mode 100644
index 00000000..2ed92590
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,dragonfly
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
new file mode 100644
index 00000000..ec56ed60
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -0,0 +1,682 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FreeBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import "unsafe"
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [46]int8
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, errno
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Derive extattr namespace and attribute name
+
+func xattrnamespace(fullattr string) (ns int, attr string, err error) {
+ s := -1
+ for idx, val := range fullattr {
+ if val == '.' {
+ s = idx
+ break
+ }
+ }
+
+ if s == -1 {
+ return -1, "", ENOATTR
+ }
+
+ namespace := fullattr[0:s]
+ attr = fullattr[s+1:]
+
+ switch namespace {
+ case "user":
+ return EXTATTR_NAMESPACE_USER, attr, nil
+ case "system":
+ return EXTATTR_NAMESPACE_SYSTEM, attr, nil
+ default:
+ return -1, "", ENOATTR
+ }
+}
+
+func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) {
+ if len(dest) > idx {
+ return unsafe.Pointer(&dest[idx])
+ } else {
+ return unsafe.Pointer(_zero)
+ }
+}
+
+// FreeBSD implements its own syscalls to handle extended attributes
+
+func Getxattr(file string, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetFile(file, nsid, a, uintptr(d), destsize)
+}
+
+func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize)
+}
+
+func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetLink(link, nsid, a, uintptr(d), destsize)
+}
+
+// flags are unused on FreeBSD
+
+func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Setxattr(file string, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Lsetxattr(link string, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Removexattr(file string, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteFile(file, nsid, a)
+ return
+}
+
+func Fremovexattr(fd int, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteFd(fd, nsid, a)
+ return
+}
+
+func Lremovexattr(link string, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteLink(link, nsid, a)
+ return
+}
+
+func Listxattr(file string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ // FreeBSD won't allow you to list xattrs from multiple namespaces
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz)
+
+ /* Errors accessing system attrs are ignored so that
+ * we can implement the Linux-like behavior of omitting errors that
+ * we don't have read permissions on
+ *
+ * Linux will still error if we ask for user attributes on a file that
+ * we don't have read permissions on, so don't ignore those errors
+ */
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+func Flistxattr(fd int, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz)
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+func Llistxattr(link string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz)
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error)
+//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error)
+//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error)
+//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
+
+/*
+ * Unimplemented
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Mlock
+// Munlock
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// Mlockall
+// Munlockall
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
new file mode 100644
index 00000000..6255d40f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
new file mode 100644
index 00000000..8b395d59
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -0,0 +1,63 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
new file mode 100644
index 00000000..4e72d46a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -0,0 +1,63 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return tv.Sec*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = nsec / 1e9
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
new file mode 100644
index 00000000..d3ee5d2c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -0,0 +1,1086 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Linux system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+/*
+ * Wrapped
+ */
+
+func Access(path string, mode uint32) (err error) {
+ return Faccessat(AT_FDCWD, path, mode, 0)
+}
+
+func Chmod(path string, mode uint32) (err error) {
+ return Fchmodat(AT_FDCWD, path, mode, 0)
+}
+
+func Chown(path string, uid int, gid int) (err error) {
+ return Fchownat(AT_FDCWD, path, uid, gid, 0)
+}
+
+func Creat(path string, mode uint32) (fd int, err error) {
+ return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
+}
+
+//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
+
+func Link(oldpath string, newpath string) (err error) {
+ return linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+ return Mkdirat(AT_FDCWD, path, mode)
+}
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ return Mknodat(AT_FDCWD, path, mode, dev)
+}
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm)
+}
+
+//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+
+func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ return openat(dirfd, path, flags|O_LARGEFILE, mode)
+}
+
+//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error)
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ return readlinkat(AT_FDCWD, path, buf)
+}
+
+func Rename(oldpath string, newpath string) (err error) {
+ return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
+}
+
+func Rmdir(path string) error {
+ return unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
+}
+
+//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error)
+
+func Symlink(oldpath string, newpath string) (err error) {
+ return symlinkat(oldpath, AT_FDCWD, newpath)
+}
+
+func Unlink(path string) error {
+ return unlinkat(AT_FDCWD, path, 0)
+}
+
+//sys unlinkat(dirfd int, path string, flags int) (err error)
+
+func Unlinkat(dirfd int, path string) error {
+ return unlinkat(dirfd, path, 0)
+}
+
+//sys utimes(path string, times *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) (err error) {
+ if tv == nil {
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ err := utimensat(AT_FDCWD, path, nil, 0)
+ if err != ENOSYS {
+ return err
+ }
+ return utimes(path, nil)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+ if err != ENOSYS {
+ return err
+ }
+ // If the utimensat syscall isn't available (utimensat was added to Linux
+ // in 2.6.22, Released, 8 July 2007) then fall back to utimes
+ var tv [2]Timeval
+ for i := 0; i < 2; i++ {
+ tv[i].Sec = ts[i].Sec
+ tv[i].Usec = ts[i].Nsec / 1000
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
+ if ts == nil {
+ return utimensat(dirfd, path, nil, flags)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
+}
+
+//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error)
+
+func Futimesat(dirfd int, path string, tv []Timeval) error {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if tv == nil {
+ return futimesat(dirfd, pathp, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func Futimes(fd int, tv []Timeval) (err error) {
+ // Believe it or not, this is the best we can do on Linux
+ // (and is what glibc does).
+ return Utimes("/proc/self/fd/"+itoa(fd), tv)
+}
+
+const ImplementsGetwd = true
+
+//sys Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+ var buf [PathMax]byte
+ n, err := Getcwd(buf[0:])
+ if err != nil {
+ return "", err
+ }
+ // Getcwd returns the number of bytes written to buf, including the NUL.
+ if n < 1 || n > len(buf) || buf[n-1] != 0 {
+ return "", EINVAL
+ }
+ return string(buf[0 : n-1]), nil
+}
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 1<<16 on Linux.
+ if n < 0 || n > 1<<20 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+type WaitStatus uint32
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits. At least that's the idea.
+// There are various irregularities. For example, the
+// "continued" status is 0xFFFF, distinguishing itself
+// from stopped via the core dump bit.
+
+const (
+ mask = 0x7F
+ core = 0x80
+ exited = 0x00
+ stopped = 0x7F
+ shift = 8
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
+
+func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
+
+func (w WaitStatus) Continued() bool { return w == 0xFFFF }
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) ExitStatus() int {
+ if !w.Exited() {
+ return -1
+ }
+ return int(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) Signal() syscall.Signal {
+ if !w.Signaled() {
+ return -1
+ }
+ return syscall.Signal(w & mask)
+}
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int {
+ if w.StopSignal() != SIGTRAP {
+ return -1
+ }
+ return int(w>>shift) >> 8
+}
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+func Mkfifo(path string, mode uint32) (err error) {
+ return Mknod(path, mode|S_IFIFO, 0)
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+type SockaddrLinklayer struct {
+ Protocol uint16
+ Ifindex int
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]byte
+ raw RawSockaddrLinklayer
+}
+
+func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_PACKET
+ sa.raw.Protocol = sa.Protocol
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ sa.raw.Hatype = sa.Hatype
+ sa.raw.Pkttype = sa.Pkttype
+ sa.raw.Halen = sa.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
+}
+
+type SockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+ raw RawSockaddrNetlink
+}
+
+func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_NETLINK
+ sa.raw.Pad = sa.Pad
+ sa.raw.Pid = sa.Pid
+ sa.raw.Groups = sa.Groups
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_NETLINK:
+ pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrNetlink)
+ sa.Family = pp.Family
+ sa.Pad = pp.Pad
+ sa.Pid = pp.Pid
+ sa.Groups = pp.Groups
+ return sa, nil
+
+ case AF_PACKET:
+ pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
+ sa := new(SockaddrLinklayer)
+ sa.Protocol = pp.Protocol
+ sa.Ifindex = int(pp.Ifindex)
+ sa.Hatype = pp.Hatype
+ sa.Pkttype = pp.Pkttype
+ sa.Halen = pp.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ if pp.Path[0] == 0 {
+ // "Abstract" Unix domain socket.
+ // Rewrite leading NUL as @ for textual display.
+ // (This is the standard convention.)
+ // Not friendly to overwrite in place,
+ // but the callers below don't care.
+ pp.Path[0] = '@'
+ }
+
+ // Assume path ends at NUL.
+ // This is not technically the Linux semantics for
+ // abstract Unix domain sockets--they are supposed
+ // to be uninterpreted fixed-size binary blobs--but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
+ var value Ucred
+ vallen := _Socklen(SizeofUcred)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ var err error
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+// BindToDevice binds the socket associated with fd to device.
+func BindToDevice(fd int, device string) (err error) {
+ return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+
+func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
+ // The peek requests are machine-size oriented, so we wrap it
+ // to retrieve arbitrary-length data.
+
+ // The ptrace syscall differs from glibc's ptrace.
+ // Peeks returns the word in *data, not as the return value.
+
+ var buf [sizeofPtr]byte
+
+ // Leading edge. PEEKTEXT/PEEKDATA don't require aligned
+ // access (PEEKUSER warns that it might), but if we don't
+ // align our reads, we might straddle an unmapped page
+ // boundary and not get the bytes leading up to the page
+ // boundary.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(out, buf[addr%sizeofPtr:])
+ out = out[n:]
+ }
+
+ // Remainder.
+ for len(out) > 0 {
+ // We use an internal buffer to guarantee alignment.
+ // It's not documented if this is necessary, but we're paranoid.
+ err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copied := copy(out, buf[0:])
+ n += copied
+ out = out[copied:]
+ }
+
+ return n, nil
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
+}
+
+func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
+ // As for ptracePeek, we need to align our accesses to deal
+ // with the possibility of straddling an invalid page.
+
+ // Leading edge.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(buf[addr%sizeofPtr:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
+ if err != nil {
+ return 0, err
+ }
+ data = data[n:]
+ }
+
+ // Interior.
+ for len(data) > sizeofPtr {
+ word := *((*uintptr)(unsafe.Pointer(&data[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += sizeofPtr
+ data = data[sizeofPtr:]
+ }
+
+ // Trailing edge.
+ if len(data) > 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copy(buf[0:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += len(data)
+ }
+
+ return n, nil
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
+}
+
+func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
+ return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
+ return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+func PtraceSetOptions(pid int, options int) (err error) {
+ return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
+}
+
+func PtraceGetEventMsg(pid int) (msg uint, err error) {
+ var data _C_long
+ err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
+ msg = uint(data)
+ return
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
+}
+
+func PtraceSyscall(pid int, signal int) (err error) {
+ return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
+}
+
+func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
+
+func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
+
+func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
+
+//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
+
+func Reboot(cmd int) (err error) {
+ return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return Getdents(fd, buf)
+}
+
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
+
+func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
+ // Certain file systems get rather angry and EINVAL if you give
+ // them an empty string of data, rather than NULL.
+ if data == "" {
+ return mount(source, target, fstype, flags, nil)
+ }
+ datap, err := BytePtrFromString(data)
+ if err != nil {
+ return err
+ }
+ return mount(source, target, fstype, flags, datap)
+}
+
+// Sendto
+// Recvfrom
+// Socketpair
+
+/*
+ * Direct access
+ */
+//sys Acct(path string) (err error)
+//sys Adjtimex(buf *Timex) (state int, err error)
+//sys Chdir(path string) (err error)
+//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(oldfd int) (fd int, err error)
+//sys Dup3(oldfd int, newfd int, flags int) (err error)
+//sysnb EpollCreate(size int) (fd int, err error)
+//sysnb EpollCreate1(flag int) (fd int, err error)
+//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Exit(code int) = SYS_EXIT_GROUP
+//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys Fdatasync(fd int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fsync(fd int) (err error)
+//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
+//sysnb Getpgid(pid int) (pgid int, err error)
+
+func Getpgrp() (pid int) {
+ pid, _ = Getpgid(0)
+ return
+}
+
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Gettid() (tid int)
+//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
+//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
+//sysnb InotifyInit1(flags int) (fd int, err error)
+//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
+//sysnb Kill(pid int, sig syscall.Signal) (err error)
+//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
+//sys Listxattr(path string, dest []byte) (sz int, err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Pause() (err error)
+//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
+//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64
+//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Removexattr(path string, attr string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Setdomainname(p []byte) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tv *Timeval) (err error)
+
+// issue 1435.
+// On linux Setuid and Setgid only affects the current thread, not the process.
+// This does not match what most callers expect so we must return an error
+// here rather than letting the caller think that the call succeeded.
+
+func Setuid(uid int) (err error) {
+ return EOPNOTSUPP
+}
+
+func Setgid(uid int) (err error) {
+ return EOPNOTSUPP
+}
+
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys Sync()
+//sysnb Sysinfo(info *Sysinfo_t) (err error)
+//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
+//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sysnb Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
+//sys Unshare(flags int) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys exitThread(code int) (err error) = SYS_EXIT
+//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
+//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
+
+// mmap varies by architecture; see syscall_linux_*.go.
+//sys munmap(addr uintptr, length uintptr) (err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Munlockall() (err error)
+
+/*
+ * Unimplemented
+ */
+// AddKey
+// AfsSyscall
+// Alarm
+// ArchPrctl
+// Brk
+// Capget
+// Capset
+// ClockGetres
+// ClockNanosleep
+// ClockSettime
+// Clone
+// CreateModule
+// DeleteModule
+// EpollCtlOld
+// EpollPwait
+// EpollWaitOld
+// Eventfd
+// Execve
+// Fgetxattr
+// Flistxattr
+// Fork
+// Fremovexattr
+// Fsetxattr
+// Futex
+// GetKernelSyms
+// GetMempolicy
+// GetRobustList
+// GetThreadArea
+// Getitimer
+// Getpmsg
+// IoCancel
+// IoDestroy
+// IoGetevents
+// IoSetup
+// IoSubmit
+// Ioctl
+// IoprioGet
+// IoprioSet
+// KexecLoad
+// Keyctl
+// Lgetxattr
+// Llistxattr
+// LookupDcookie
+// Lremovexattr
+// Lsetxattr
+// Mbind
+// MigratePages
+// Mincore
+// ModifyLdt
+// Mount
+// MovePages
+// Mprotect
+// MqGetsetattr
+// MqNotify
+// MqOpen
+// MqTimedreceive
+// MqTimedsend
+// MqUnlink
+// Mremap
+// Msgctl
+// Msgget
+// Msgrcv
+// Msgsnd
+// Msync
+// Newfstatat
+// Nfsservctl
+// Personality
+// Poll
+// Ppoll
+// Pselect6
+// Ptrace
+// Putpmsg
+// QueryModule
+// Quotactl
+// Readahead
+// Readv
+// RemapFilePages
+// RequestKey
+// RestartSyscall
+// RtSigaction
+// RtSigpending
+// RtSigprocmask
+// RtSigqueueinfo
+// RtSigreturn
+// RtSigsuspend
+// RtSigtimedwait
+// SchedGetPriorityMax
+// SchedGetPriorityMin
+// SchedGetaffinity
+// SchedGetparam
+// SchedGetscheduler
+// SchedRrGetInterval
+// SchedSetaffinity
+// SchedSetparam
+// SchedYield
+// Security
+// Semctl
+// Semget
+// Semop
+// Semtimedop
+// SetMempolicy
+// SetRobustList
+// SetThreadArea
+// SetTidAddress
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Sigaltstack
+// Signalfd
+// Swapoff
+// Swapon
+// Sysfs
+// TimerCreate
+// TimerDelete
+// TimerGetoverrun
+// TimerGettime
+// TimerSettime
+// Timerfd
+// Tkill (obsolete)
+// Tuxcall
+// Umount2
+// Uselib
+// Utimensat
+// Vfork
+// Vhangup
+// Vmsplice
+// Vserver
+// Waitid
+// _Sysctl
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
new file mode 100644
index 00000000..7171219a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -0,0 +1,388 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP)
+// so that go vet can check that they are correct.
+
+// +build 386,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = int32(nsec / 1e9)
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// 64-bit file system and 32-bit uid calls
+// (386 default is 32-bit file system and 16-bit uid).
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64
+//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
+//sysnb Getegid() (egid int) = SYS_GETEGID32
+//sysnb Geteuid() (euid int) = SYS_GETEUID32
+//sysnb Getgid() (gid int) = SYS_GETGID32
+//sysnb Getuid() (uid int) = SYS_GETUID32
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
+//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
+//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32
+//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
+//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
+//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32
+//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
+
+//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ page := uintptr(offset / 4096)
+ if offset != int64(page)*4096 {
+ return 0, EINVAL
+ }
+ return mmap2(addr, length, prot, flags, fd, page)
+}
+
+type rlimit32 struct {
+ Cur uint32
+ Max uint32
+}
+
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
+
+const rlimInf32 = ^uint32(0)
+const rlimInf64 = ^uint64(0)
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ err = getrlimit(resource, &rl)
+ if err != nil {
+ return
+ }
+
+ if rl.Cur == rlimInf32 {
+ rlim.Cur = rlimInf64
+ } else {
+ rlim.Cur = uint64(rl.Cur)
+ }
+
+ if rl.Max == rlimInf32 {
+ rlim.Max = rlimInf64
+ } else {
+ rlim.Max = uint64(rl.Max)
+ }
+ return
+}
+
+//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ if rlim.Cur == rlimInf64 {
+ rl.Cur = rlimInf32
+ } else if rlim.Cur < uint64(rlimInf32) {
+ rl.Cur = uint32(rlim.Cur)
+ } else {
+ return EINVAL
+ }
+ if rlim.Max == rlimInf64 {
+ rl.Max = rlimInf32
+ } else if rlim.Max < uint64(rlimInf32) {
+ rl.Max = uint32(rlim.Max)
+ } else {
+ return EINVAL
+ }
+
+ return setrlimit(resource, &rl)
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, errno := seek(fd, offset, whence)
+ if errno != 0 {
+ return 0, errno
+ }
+ return newoffset, nil
+}
+
+// Vsyscalls on amd64.
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+// On x86 Linux, all the socket calls go through an extra indirection,
+// I think because the 5-register system call interface can't handle
+// the 6-argument calls like sendto and recvfrom. Instead the
+// arguments to the underlying system call are the number below
+// and a pointer to an array of uintptr. We hide the pointer in the
+// socketcall assembly to avoid allocation on every system call.
+
+const (
+ // see linux/net.h
+ _SOCKET = 1
+ _BIND = 2
+ _CONNECT = 3
+ _LISTEN = 4
+ _ACCEPT = 5
+ _GETSOCKNAME = 6
+ _GETPEERNAME = 7
+ _SOCKETPAIR = 8
+ _SEND = 9
+ _RECV = 10
+ _SENDTO = 11
+ _RECVFROM = 12
+ _SHUTDOWN = 13
+ _SETSOCKOPT = 14
+ _GETSOCKOPT = 15
+ _SENDMSG = 16
+ _RECVMSG = 17
+ _ACCEPT4 = 18
+ _RECVMMSG = 19
+ _SENDMMSG = 20
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno)
+func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno)
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) {
+ _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Listen(s int, n int) (err error) {
+ _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Shutdown(s, how int) (err error) {
+ _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return uint64(uint32(r.Eip)) }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Eip = int32(pc) }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
new file mode 100644
index 00000000..ae70c2af
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -0,0 +1,146 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,linux
+
+package unix
+
+import "syscall"
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+//go:noescape
+func gettimeofday(tv *Timeval) (err syscall.Errno)
+
+func Gettimeofday(tv *Timeval) (err error) {
+ errno := gettimeofday(tv)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func Getpagesize() int { return 4096 }
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ errno := gettimeofday(&tv)
+ if errno != 0 {
+ return 0, errno
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Rip }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
new file mode 100644
index 00000000..abc41c3e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -0,0 +1,233 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = int32(nsec / 1e9)
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, errno := seek(fd, offset, whence)
+ if errno != 0 {
+ return 0, errno
+ }
+ return newoffset, nil
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32
+//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb socketpair(domain int, typ int, flags int, fd *[2]int32) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+// 64-bit file system and 32-bit uid calls
+// (16-bit uid calls are not always supported in newer kernels)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sysnb Getegid() (egid int) = SYS_GETEGID32
+//sysnb Geteuid() (euid int) = SYS_GETEUID32
+//sysnb Getgid() (gid int) = SYS_GETGID32
+//sysnb Getuid() (uid int) = SYS_GETUID32
+//sysnb InotifyInit() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
+//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
+//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32
+//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
+//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
+//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+
+// Vsyscalls on amd64.
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
+//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
+
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+ _, _, e1 := Syscall6(SYS_ARM_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ page := uintptr(offset / 4096)
+ if offset != int64(page)*4096 {
+ return 0, EINVAL
+ }
+ return mmap2(addr, length, prot, flags, fd, page)
+}
+
+type rlimit32 struct {
+ Cur uint32
+ Max uint32
+}
+
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
+
+const rlimInf32 = ^uint32(0)
+const rlimInf64 = ^uint64(0)
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ err = getrlimit(resource, &rl)
+ if err != nil {
+ return
+ }
+
+ if rl.Cur == rlimInf32 {
+ rlim.Cur = rlimInf64
+ } else {
+ rlim.Cur = uint64(rl.Cur)
+ }
+
+ if rl.Max == rlimInf32 {
+ rlim.Max = rlimInf64
+ } else {
+ rlim.Max = uint64(rl.Max)
+ }
+ return
+}
+
+//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ if rlim.Cur == rlimInf64 {
+ rl.Cur = rlimInf32
+ } else if rlim.Cur < uint64(rlimInf32) {
+ rl.Cur = uint32(rlim.Cur)
+ } else {
+ return EINVAL
+ }
+ if rlim.Max == rlimInf64 {
+ rl.Max = rlimInf32
+ } else if rlim.Max < uint64(rlimInf32) {
+ rl.Max = uint32(rlim.Max)
+ } else {
+ return EINVAL
+ }
+
+ return setrlimit(resource, &rl)
+}
+
+func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
new file mode 100644
index 00000000..f3d72dfd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,linux
+
+package unix
+
+const _SYS_dup = SYS_DUP3
+
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sys Listen(s int, n int) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+
+func Stat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, 0)
+}
+
+func Lchown(path string, uid int, gid int) (err error) {
+ return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW)
+}
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW)
+}
+
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Pc }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+func InotifyInit() (fd int, err error) {
+ return InotifyInit1(0)
+}
+
+// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove
+// these when the deprecated syscalls that the syscall package relies on
+// are removed.
+const (
+ SYS_GETPGRP = 1060
+ SYS_UTIMES = 1037
+ SYS_FUTIMESAT = 1066
+ SYS_PAUSE = 1061
+ SYS_USTAT = 1070
+ SYS_UTIME = 1063
+ SYS_LCHOWN = 1032
+ SYS_TIME = 1062
+ SYS_EPOLL_CREATE = 1042
+ SYS_EPOLL_WAIT = 1069
+)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
new file mode 100644
index 00000000..67eed633
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -0,0 +1,96 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build ppc64 ppc64le
+
+package unix
+
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT
+//sysnb Getuid() (uid int)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Nip }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Nip = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
new file mode 100644
index 00000000..c4e945cd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -0,0 +1,492 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NetBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ raw RawSockaddrDatalink
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) {
+ var olen uintptr
+
+ // Get a list of all sysctl nodes below the given MIB by performing
+ // a sysctl for the given MIB with CTL_QUERY appended.
+ mib = append(mib, CTL_QUERY)
+ qnode := Sysctlnode{Flags: SYSCTL_VERS_1}
+ qp := (*byte)(unsafe.Pointer(&qnode))
+ sz := unsafe.Sizeof(qnode)
+ if err = sysctl(mib, nil, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ // Now that we know the size, get the actual nodes.
+ nodes = make([]Sysctlnode, olen/sz)
+ np := (*byte)(unsafe.Pointer(&nodes[0]))
+ if err = sysctl(mib, np, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ return nodes, nil
+}
+
+func nametomib(name string) (mib []_C_int, err error) {
+
+ // Split name into components.
+ var parts []string
+ last := 0
+ for i := 0; i < len(name); i++ {
+ if name[i] == '.' {
+ parts = append(parts, name[last:i])
+ last = i + 1
+ }
+ }
+ parts = append(parts, name[last:])
+
+ // Discover the nodes and construct the MIB OID.
+ for partno, part := range parts {
+ nodes, err := sysctlNodes(mib)
+ if err != nil {
+ return nil, err
+ }
+ for _, node := range nodes {
+ n := make([]byte, 0)
+ for i := range node.Name {
+ if node.Name[i] != 0 {
+ n = append(n, byte(node.Name[i]))
+ }
+ }
+ if string(n) == part {
+ mib = append(mib, _C_int(node.Num))
+ break
+ }
+ }
+ if len(mib) != partno+1 {
+ return nil, EINVAL
+ }
+ }
+
+ return mib, nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (fd1 int, fd2 int, err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+//sys getdents(fd int, buf []byte) (n int, err error)
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ return getdents(fd, buf)
+}
+
+// TODO
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ return -1, ENOSYS
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// ____semctl13
+// __clone
+// __fhopen40
+// __fhstat40
+// __fhstatvfs140
+// __fstat30
+// __getcwd
+// __getfh30
+// __getlogin
+// __lstat30
+// __mount50
+// __msgctl13
+// __msync13
+// __ntp_gettime30
+// __posix_chown
+// __posix_fadvise50
+// __posix_fchown
+// __posix_lchown
+// __posix_rename
+// __setlogin
+// __shmctl13
+// __sigaction_sigtramp
+// __sigaltstack14
+// __sigpending14
+// __sigprocmask14
+// __sigsuspend14
+// __sigtimedwait
+// __stat30
+// __syscall
+// __vfork14
+// _ksem_close
+// _ksem_destroy
+// _ksem_getvalue
+// _ksem_init
+// _ksem_open
+// _ksem_post
+// _ksem_trywait
+// _ksem_unlink
+// _ksem_wait
+// _lwp_continue
+// _lwp_create
+// _lwp_ctl
+// _lwp_detach
+// _lwp_exit
+// _lwp_getname
+// _lwp_getprivate
+// _lwp_kill
+// _lwp_park
+// _lwp_self
+// _lwp_setname
+// _lwp_setprivate
+// _lwp_suspend
+// _lwp_unpark
+// _lwp_unpark_all
+// _lwp_wait
+// _lwp_wakeup
+// _pset_bind
+// _sched_getaffinity
+// _sched_getparam
+// _sched_setaffinity
+// _sched_setparam
+// acct
+// aio_cancel
+// aio_error
+// aio_fsync
+// aio_read
+// aio_return
+// aio_suspend
+// aio_write
+// break
+// clock_getres
+// clock_gettime
+// clock_settime
+// compat_09_ogetdomainname
+// compat_09_osetdomainname
+// compat_09_ouname
+// compat_10_omsgsys
+// compat_10_osemsys
+// compat_10_oshmsys
+// compat_12_fstat12
+// compat_12_getdirentries
+// compat_12_lstat12
+// compat_12_msync
+// compat_12_oreboot
+// compat_12_oswapon
+// compat_12_stat12
+// compat_13_sigaction13
+// compat_13_sigaltstack13
+// compat_13_sigpending13
+// compat_13_sigprocmask13
+// compat_13_sigreturn13
+// compat_13_sigsuspend13
+// compat_14___semctl
+// compat_14_msgctl
+// compat_14_shmctl
+// compat_16___sigaction14
+// compat_16___sigreturn14
+// compat_20_fhstatfs
+// compat_20_fstatfs
+// compat_20_getfsstat
+// compat_20_statfs
+// compat_30___fhstat30
+// compat_30___fstat13
+// compat_30___lstat13
+// compat_30___stat13
+// compat_30_fhopen
+// compat_30_fhstat
+// compat_30_fhstatvfs1
+// compat_30_getdents
+// compat_30_getfh
+// compat_30_ntp_gettime
+// compat_30_socket
+// compat_40_mount
+// compat_43_fstat43
+// compat_43_lstat43
+// compat_43_oaccept
+// compat_43_ocreat
+// compat_43_oftruncate
+// compat_43_ogetdirentries
+// compat_43_ogetdtablesize
+// compat_43_ogethostid
+// compat_43_ogethostname
+// compat_43_ogetkerninfo
+// compat_43_ogetpagesize
+// compat_43_ogetpeername
+// compat_43_ogetrlimit
+// compat_43_ogetsockname
+// compat_43_okillpg
+// compat_43_olseek
+// compat_43_ommap
+// compat_43_oquota
+// compat_43_orecv
+// compat_43_orecvfrom
+// compat_43_orecvmsg
+// compat_43_osend
+// compat_43_osendmsg
+// compat_43_osethostid
+// compat_43_osethostname
+// compat_43_osetrlimit
+// compat_43_osigblock
+// compat_43_osigsetmask
+// compat_43_osigstack
+// compat_43_osigvec
+// compat_43_otruncate
+// compat_43_owait
+// compat_43_stat43
+// execve
+// extattr_delete_fd
+// extattr_delete_file
+// extattr_delete_link
+// extattr_get_fd
+// extattr_get_file
+// extattr_get_link
+// extattr_list_fd
+// extattr_list_file
+// extattr_list_link
+// extattr_set_fd
+// extattr_set_file
+// extattr_set_link
+// extattrctl
+// fchroot
+// fdatasync
+// fgetxattr
+// fktrace
+// flistxattr
+// fork
+// fremovexattr
+// fsetxattr
+// fstatvfs1
+// fsync_range
+// getcontext
+// getitimer
+// getvfsstat
+// getxattr
+// ioctl
+// ktrace
+// lchflags
+// lchmod
+// lfs_bmapv
+// lfs_markv
+// lfs_segclean
+// lfs_segwait
+// lgetxattr
+// lio_listio
+// listxattr
+// llistxattr
+// lremovexattr
+// lseek
+// lsetxattr
+// lutimes
+// madvise
+// mincore
+// minherit
+// modctl
+// mq_close
+// mq_getattr
+// mq_notify
+// mq_open
+// mq_receive
+// mq_send
+// mq_setattr
+// mq_timedreceive
+// mq_timedsend
+// mq_unlink
+// mremap
+// msgget
+// msgrcv
+// msgsnd
+// nfssvc
+// ntp_adjtime
+// pmc_control
+// pmc_get_info
+// poll
+// pollts
+// preadv
+// profil
+// pselect
+// pset_assign
+// pset_create
+// pset_destroy
+// ptrace
+// pwritev
+// quotactl
+// rasctl
+// readv
+// reboot
+// removexattr
+// sa_enable
+// sa_preempt
+// sa_register
+// sa_setconcurrency
+// sa_stacks
+// sa_yield
+// sbrk
+// sched_yield
+// semconfig
+// semget
+// semop
+// setcontext
+// setitimer
+// setxattr
+// shmat
+// shmdt
+// shmget
+// sstk
+// statvfs1
+// swapctl
+// sysarch
+// syscall
+// timer_create
+// timer_delete
+// timer_getoverrun
+// timer_gettime
+// timer_settime
+// undelete
+// utrace
+// uuidgen
+// vadvise
+// vfork
+// writev
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
new file mode 100644
index 00000000..1b0e1af1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
@@ -0,0 +1,44 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
new file mode 100644
index 00000000..1b6dcbe3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
@@ -0,0 +1,44 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int64(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
new file mode 100644
index 00000000..87d1d6fe
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
@@ -0,0 +1,44 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/golang.org/x/sys/unix/syscall_no_getwd.go
new file mode 100644
index 00000000..530792ea
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_no_getwd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd netbsd openbsd
+
+package unix
+
+const ImplementsGetwd = false
+
+func Getwd() (string, error) { return "", ENOTSUP }
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
new file mode 100644
index 00000000..246131d2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -0,0 +1,303 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// OpenBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [24]int8
+ raw RawSockaddrDatalink
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func nametomib(name string) (mib []_C_int, err error) {
+
+ // Perform lookup via a binary search
+ left := 0
+ right := len(sysctlMib) - 1
+ for {
+ idx := left + (right-left)/2
+ switch {
+ case name == sysctlMib[idx].ctlname:
+ return sysctlMib[idx].ctloid, nil
+ case name > sysctlMib[idx].ctlname:
+ left = idx + 1
+ default:
+ right = idx - 1
+ }
+ if left > right {
+ break
+ }
+ }
+ return nil, EINVAL
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys getdents(fd int, buf []byte) (n int, err error)
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ return getdents(fd, buf)
+}
+
+// TODO
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ return -1, ENOSYS
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// __getcwd
+// __semctl
+// __syscall
+// __sysctl
+// adjfreq
+// break
+// clock_getres
+// clock_gettime
+// clock_settime
+// closefrom
+// execve
+// faccessat
+// fchmodat
+// fchownat
+// fcntl
+// fhopen
+// fhstat
+// fhstatfs
+// fork
+// fstatat
+// futimens
+// getfh
+// getgid
+// getitimer
+// getlogin
+// getresgid
+// getresuid
+// getrtable
+// getthrid
+// ioctl
+// ktrace
+// lfs_bmapv
+// lfs_markv
+// lfs_segclean
+// lfs_segwait
+// linkat
+// mincore
+// minherit
+// mkdirat
+// mkfifoat
+// mknodat
+// mount
+// mquery
+// msgctl
+// msgget
+// msgrcv
+// msgsnd
+// nfssvc
+// nnpfspioctl
+// openat
+// poll
+// preadv
+// profil
+// pwritev
+// quotactl
+// readlinkat
+// readv
+// reboot
+// renameat
+// rfork
+// sched_yield
+// semget
+// semop
+// setgroups
+// setitimer
+// setrtable
+// setsockopt
+// shmat
+// shmctl
+// shmdt
+// shmget
+// sigaction
+// sigaltstack
+// sigpending
+// sigprocmask
+// sigreturn
+// sigsuspend
+// symlinkat
+// sysarch
+// syscall
+// threxit
+// thrsigdivert
+// thrsleep
+// thrwakeup
+// unlinkat
+// utimensat
+// vfork
+// writev
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
new file mode 100644
index 00000000..9529b20e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
@@ -0,0 +1,44 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,openbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
new file mode 100644
index 00000000..fc640294
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
@@ -0,0 +1,44 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,openbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = nsec / 1e9
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
new file mode 100644
index 00000000..00837326
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -0,0 +1,713 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Solaris system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_solaris.go or syscall_unix.go.
+
+package unix
+
+import (
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+// Implemented in runtime/syscall_solaris.go.
+type syscallFunc uintptr
+
+func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+type SockaddrDatalink struct {
+ Family uint16
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [244]int8
+ raw RawSockaddrDatalink
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+func pipe() (r uintptr, w uintptr, err uintptr)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ r0, w0, e1 := pipe()
+ if e1 != 0 {
+ err = syscall.Errno(e1)
+ }
+ p[0], p[1] = int(r0), int(w0)
+ return
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+const ImplementsGetwd = true
+
+//sys Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+ var buf [PathMax]byte
+ // Getcwd will return an error if it failed for any reason.
+ _, err = Getcwd(buf[0:])
+ if err != nil {
+ return "", err
+ }
+ n := clen(buf[:])
+ if n < 1 {
+ return "", EINVAL
+ }
+ return string(buf[:n]), nil
+}
+
+/*
+ * Wrapped
+ */
+
+//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error)
+//sysnb setgroups(ngid int, gid *_Gid_t) (err error)
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ // Check for error and sanity check group count. Newer versions of
+ // Solaris allow up to 1024 (NGROUPS_MAX).
+ if n < 0 || n > 1024 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, EINVAL
+ } else if n == 0 {
+ return nil, nil
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if n == -1 {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ // Final argument is (basep *uintptr) and the syscall doesn't take nil.
+ // TODO(rsc): Can we use a single global basep for all calls?
+ return Getdents(fd, buf, new(uintptr))
+}
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits.
+
+type WaitStatus uint32
+
+const (
+ mask = 0x7F
+ core = 0x80
+ shift = 8
+
+ exited = 0
+ stopped = 0x7F
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) ExitStatus() int {
+ if w&mask != exited {
+ return -1
+ }
+ return int(w >> shift)
+}
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 }
+
+func (w WaitStatus) Signal() syscall.Signal {
+ sig := syscall.Signal(w & mask)
+ if sig == stopped || sig == 0 {
+ return -1
+ }
+ return sig
+}
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
+
+func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+func wait4(pid uintptr, wstatus *WaitStatus, options uintptr, rusage *Rusage) (wpid uintptr, err uintptr)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ r0, e1 := wait4(uintptr(pid), wstatus, uintptr(options), rusage)
+ if e1 != 0 {
+ err = syscall.Errno(e1)
+ }
+ return int(r0), err
+}
+
+func gethostname() (name string, err uintptr)
+
+func Gethostname() (name string, err error) {
+ name, e1 := gethostname()
+ if e1 != 0 {
+ err = syscall.Errno(e1)
+ }
+ return name, err
+}
+
+//sys utimes(path string, times *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) (err error) {
+ if tv == nil {
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error)
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ return utimensat(AT_FDCWD, path, nil, 0)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
+ if ts == nil {
+ return utimensat(dirfd, path, nil, flags)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
+}
+
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
+
+//sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error)
+
+func Futimesat(dirfd int, path string, tv []Timeval) error {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if tv == nil {
+ return futimesat(dirfd, pathp, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+// Solaris doesn't have an futimes function because it allows NULL to be
+// specified as the path for futimesat. However, Go doesn't like
+// NULL-style string interfaces, so this simple wrapper is provided.
+func Futimes(fd int, tv []Timeval) error {
+ if tv == nil {
+ return futimesat(fd, nil, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ // Assume path ends at NUL.
+ // This is not technically the Solaris semantics for
+ // abstract Unix domain sockets -- they are supposed
+ // to be uninterpreted fixed-size binary blobs -- but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = libsocket.accept
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if nfd == -1 {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*int8)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy int8
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Accrights = (*int8)(unsafe.Pointer(&oob[0]))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); n == -1 {
+ return
+ }
+ oobn = int(msg.Accrightslen)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.sendmsg
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*int8)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy int8
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Accrights = (*int8)(unsafe.Pointer(&oob[0]))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+//sys acct(path *byte) (err error)
+
+func Acct(path string) (err error) {
+ if len(path) == 0 {
+ // Assume caller wants to disable accounting.
+ return acct(nil)
+ }
+
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return acct(pathp)
+}
+
+/*
+ * Expose the ioctl function
+ */
+
+//sys ioctl(fd int, req int, arg uintptr) (err error)
+
+func IoctlSetInt(fd int, req int, value int) (err error) {
+ return ioctl(fd, req, uintptr(value))
+}
+
+func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermios(fd int, req int, value *Termios) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermio(fd int, req int, value *Termio) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermio(fd int, req int) (*Termio, error) {
+ var value Termio
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Creat(path string, mode uint32) (fd int, err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys Fdatasync(fd int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sysnb Getgid() (gid int)
+//sysnb Getpid() (pid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgid int, err error)
+//sys Geteuid() (euid int)
+//sys Getegid() (egid int)
+//sys Getppid() (ppid int)
+//sys Getpriority(which int, who int) (n int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error) = libsocket.listen
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mkfifoat(dirfd int, path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Setuid(uid int) (err error)
+//sys Shutdown(s int, how int) (err error) = libsocket.shutdown
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = libc.umount
+//sys Unlink(path string) (err error)
+//sys Unlinkat(dirfd int, path string) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.sendto
+//sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.socket
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.socketpair
+//sys write(fd int, p []byte) (n int, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys sysconf(name int) (n int64, err error)
+
+// pageSize caches the value of Getpagesize, since it can't change
+// once the system is booted.
+var pageSize int64 // accessed atomically
+
+func Getpagesize() int {
+ n := atomic.LoadInt64(&pageSize)
+ if n == 0 {
+ n, _ = sysconf(_SC_PAGESIZE)
+ atomic.StoreInt64(&pageSize, n)
+ }
+ return int(n)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
new file mode 100644
index 00000000..2e44630c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -0,0 +1,37 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,solaris
+
+package unix
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ // TODO(aram): implement this, see issue 5847.
+ panic("unimplemented")
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
new file mode 100644
index 00000000..b46b2502
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -0,0 +1,297 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import (
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ Stdin = 0
+ Stdout = 1
+ Stderr = 2
+)
+
+const (
+ darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8
+ dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8
+ netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
+)
+
+// Do the interface allocations only once for common
+// Errno values.
+var (
+ errEAGAIN error = syscall.EAGAIN
+ errEINVAL error = syscall.EINVAL
+ errENOENT error = syscall.ENOENT
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case EAGAIN:
+ return errEAGAIN
+ case EINVAL:
+ return errEINVAL
+ case ENOENT:
+ return errENOENT
+ }
+ return e
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// Mmap manager, for use by operating system-specific implementations.
+
+type mmapper struct {
+ sync.Mutex
+ active map[*byte][]byte // active mappings; key is last byte in mapping
+ mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error)
+ munmap func(addr uintptr, length uintptr) error
+}
+
+func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ if length <= 0 {
+ return nil, EINVAL
+ }
+
+ // Map the requested memory.
+ addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Slice memory layout
+ var sl = struct {
+ addr uintptr
+ len int
+ cap int
+ }{addr, length, length}
+
+ // Use unsafe to turn sl into a []byte.
+ b := *(*[]byte)(unsafe.Pointer(&sl))
+
+ // Register mapping in m and return it.
+ p := &b[cap(b)-1]
+ m.Lock()
+ defer m.Unlock()
+ m.active[p] = b
+ return b, nil
+}
+
+func (m *mmapper) Munmap(data []byte) (err error) {
+ if len(data) == 0 || len(data) != cap(data) {
+ return EINVAL
+ }
+
+ // Find the base of the mapping.
+ p := &data[cap(data)-1]
+ m.Lock()
+ defer m.Unlock()
+ b := m.active[p]
+ if b == nil || &b[0] != &data[0] {
+ return EINVAL
+ }
+
+ // Unmap the memory and update m.
+ if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil {
+ return errno
+ }
+ delete(m.active, p)
+ return nil
+}
+
+func Read(fd int, p []byte) (n int, err error) {
+ n, err = read(fd, p)
+ if raceenabled {
+ if n > 0 {
+ raceWriteRange(unsafe.Pointer(&p[0]), n)
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+ }
+ return
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = write(fd, p)
+ if raceenabled && n > 0 {
+ raceReadRange(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+type Sockaddr interface {
+ sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
+}
+
+type SockaddrInet4 struct {
+ Port int
+ Addr [4]byte
+ raw RawSockaddrInet4
+}
+
+type SockaddrInet6 struct {
+ Port int
+ ZoneId uint32
+ Addr [16]byte
+ raw RawSockaddrInet6
+}
+
+type SockaddrUnix struct {
+ Name string
+ raw RawSockaddrUnix
+}
+
+func Bind(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return bind(fd, ptr, n)
+}
+
+func Connect(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return connect(fd, ptr, n)
+}
+
+func Getpeername(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getpeername(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInt(fd, level, opt int) (value int, err error) {
+ var n int32
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return int(n), err
+}
+
+func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil {
+ return
+ }
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
+ ptr, n, err := to.sockaddr()
+ if err != nil {
+ return err
+ }
+ return sendto(fd, p, flags, ptr, n)
+}
+
+func SetsockoptByte(fd, level, opt int, value byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1)
+}
+
+func SetsockoptInt(fd, level, opt int, value int) (err error) {
+ var n = int32(value)
+ return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4)
+}
+
+func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4)
+}
+
+func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq)
+}
+
+func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq)
+}
+
+func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter)
+}
+
+func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger)
+}
+
+func SetsockoptString(fd, level, opt int, s string) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s)))
+}
+
+func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
+}
+
+func Socket(domain, typ, proto int) (fd int, err error) {
+ if domain == AF_INET6 && SocketDisableIPv6 {
+ return -1, EAFNOSUPPORT
+ }
+ fd, err = socket(domain, typ, proto)
+ return
+}
+
+func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
+ var fdx [2]int32
+ err = socketpair(domain, typ, proto, &fdx)
+ if err == nil {
+ fd[0] = int(fdx[0])
+ fd[1] = int(fdx[1])
+ }
+ return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ return sendfile(outfd, infd, offset, count)
+}
+
+var ioSync int64
+
+func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) }
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ flag, err := fcntl(fd, F_GETFL, 0)
+ if err != nil {
+ return err
+ }
+ if nonblocking {
+ flag |= O_NONBLOCK
+ } else {
+ flag &= ^O_NONBLOCK
+ }
+ _, err = fcntl(fd, F_SETFL, flag)
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
new file mode 100644
index 00000000..11532618
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_darwin.go
@@ -0,0 +1,250 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define __DARWIN_UNIX03 0
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics; for internal use.
+
+const (
+ sizeofPtr = C.sizeofPtr
+ sizeofShort = C.sizeof_short
+ sizeofInt = C.sizeof_int
+ sizeofLong = C.sizeof_long
+ sizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat64
+
+type Statfs_t C.struct_statfs64
+
+type Flock_t C.struct_flock
+
+type Fstore_t C.struct_fstore
+
+type Radvisory_t C.struct_radvisory
+
+type Fbootstraptransfer_t C.struct_fbootstraptransfer
+
+type Log2phys_t C.struct_log2phys
+
+type Fsid C.struct_fsid
+
+type Dirent C.struct_dirent
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet4Pktinfo C.struct_in_pktinfo
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
+ SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfmaMsghdr C.struct_ifma_msghdr
+
+type IfmaMsghdr2 C.struct_ifma_msghdr2
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
new file mode 100644
index 00000000..f3c971df
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go
@@ -0,0 +1,242 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include