Vendor all dependencies

Uses godep to vendor dependencies. Godeps is not necessary during build,
golang's new vendor support is used instead.
pull/198/head
Tobias Schmidt 2016-01-20 22:19:15 -05:00
parent 041de0e30c
commit 33f99c4fc1
325 changed files with 127219 additions and 42 deletions

View File

@ -2,16 +2,18 @@ sudo: false
language: go
go:
- 1.5.2
- tip
- 1.5.3
- tip
env:
- GO15VENDOREXPERIMENT=1
install:
- go get -d
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/vet
script:
- '! gofmt -l **/*.go | read nothing'
- go vet
- go test -v ./...
- go build
- ./end-to-end-test.sh
- "! gofmt -l $(find . -path ./vendor -prune -o -name '*.go' -print) | read nothing"
- go vet
- go test -v $(go list ./... | grep -v /vendor/)
- go build
- ./end-to-end-test.sh

View File

@ -45,21 +45,32 @@ GOOS ?= $(shell uname | tr A-Z a-z)
GOARCH ?= $(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m)))
GO_VERSION ?= 1.5.3
GOURL ?= https://golang.org/dl
GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH).tar.gz
GOPATH := $(CURDIR)/.build/gopath
# Check for the correct version of go in the path. If we find it, use it.
# Otherwise, prepare to build go locally.
ifeq ($(shell command -v "go" >/dev/null && go version | sed -e 's/^[^0-9.]*\([0-9.]*\).*/\1/'), $(GO_VERSION))
GOCC ?= $(shell command -v "go")
GOFMT ?= $(shell command -v "gofmt")
GO ?= GOPATH=$(GOPATH) $(GOCC)
GO ?= $(GOCC)
else
GOURL ?= https://golang.org/dl
GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH).tar.gz
GOROOT ?= $(CURDIR)/.build/go$(GO_VERSION)
GOCC ?= $(GOROOT)/bin/go
GOFMT ?= $(GOROOT)/bin/gofmt
GO ?= GOROOT=$(GOROOT) GOPATH=$(GOPATH) $(GOCC)
GO ?= GOROOT=$(GOROOT) $(GOCC)
endif
# Use vendored dependencies if available. Otherwise try to download them.
ifneq (,$(wildcard vendor))
DEPENDENCIES := $(shell find vendor/ -type f -iname '*.go')
GO := GO15VENDOREXPERIMENT=1 $(GO)
else
GOPATH := $(CURDIR)/.build/gopath
ROOTPKG ?= github.com/prometheus/$(TARGET)
SELFLINK ?= $(GOPATH)/src/$(ROOTPKG)
DEPENDENCIES := dependencies-stamp
GO := GOPATH=$(GOPATH) $(GO)
endif
# Never honor GOBIN, should it be set at all.
@ -68,11 +79,37 @@ unexport GOBIN
SUFFIX ?= $(GOOS)-$(GOARCH)
BINARY ?= $(TARGET)
ARCHIVE ?= $(TARGET)-$(VERSION).$(SUFFIX).tar.gz
ROOTPKG ?= github.com/prometheus/$(TARGET)
SELFLINK ?= $(GOPATH)/src/$(ROOTPKG)
default: $(BINARY)
$(BINARY): $(GOCC) $(SRC) $(DEPENDENCIES) Makefile Makefile.COMMON
$(GO) build $(GOFLAGS) -o $@
.PHONY: archive
archive: $(ARCHIVE)
$(ARCHIVE): $(BINARY)
tar -czf $@ $<
.PHONY: tag
tag:
git tag $(VERSION)
git push --tags
.PHONY: test
test: $(GOCC) $(DEPENDENCIES)
$(GO) test $$($(GO) list ./... | grep -v /vendor/)
.PHONY: format
format: $(GOCC)
find . -iname '*.go' | egrep -v "^\./\.build|./generated|\./vendor|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true
.PHONY: clean
clean:
rm -rf $(BINARY) $(ARCHIVE) .build *-stamp
$(GOCC):
@echo Go version $(GO_VERSION) required but not found in PATH.
@echo About to download and install go$(GO_VERSION) to $(GOROOT)
@ -89,32 +126,7 @@ $(SELFLINK):
mkdir -p $(dir $@)
ln -s $(CURDIR) $@
# Download dependencies if project doesn't vendor them.
dependencies-stamp: $(GOCC) $(SRC) | $(SELFLINK)
$(GO) get -d
touch $@
$(BINARY): $(GOCC) $(SRC) dependencies-stamp Makefile Makefile.COMMON
$(GO) build $(GOFLAGS) -o $@
.PHONY: archive
archive: $(ARCHIVE)
$(ARCHIVE): $(BINARY)
tar -czf $@ $<
.PHONY: tag
tag:
git tag $(VERSION)
git push --tags
.PHONY: test
test: $(GOCC) dependencies-stamp
$(GO) test ./...
.PHONY: format
format: $(GOCC)
find . -iname '*.go' | egrep -v "^\./\.build|./generated|\./Godeps|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true
.PHONY: clean
clean:
rm -rf $(BINARY) $(ARCHIVE) .build *-stamp

55
vendor/github.com/Sirupsen/logrus/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,55 @@
# 0.9.0 (Unreleased)
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
# 0.8.7
* logrus/core: fix possible race (#216)
* logrus/doc: small typo fixes and doc improvements
# 0.8.6
* hooks/raven: allow passing an initialized client
# 0.8.5
* logrus/core: revert #208
# 0.8.4
* formatter/text: fix data race (#218)
# 0.8.3
* logrus/core: fix entry log level (#208)
* logrus/core: improve performance of text formatter by 40%
* logrus/core: expose `LevelHooks` type
* logrus/core: add support for DragonflyBSD and NetBSD
* formatter/text: print structs more verbosely
# 0.8.2
* logrus: fix more Fatal family functions
# 0.8.1
* logrus: fix not exiting on `Fatalf` and `Fatalln`
# 0.8.0
* logrus: defaults to stderr instead of stdout
* hooks/sentry: add special field for `*http.Request`
* formatter/text: ignore Windows for colors
# 0.7.3
* formatter/\*: allow configuration of timestamp layout
# 0.7.2
* formatter/text: Add configuration option for time format (#158)

21
vendor/github.com/Sirupsen/logrus/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

365
vendor/github.com/Sirupsen/logrus/README.md generated vendored Normal file
View File

@ -0,0 +1,365 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/Sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
```go
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
```
Third party logging formatters:
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

26
vendor/github.com/Sirupsen/logrus/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus
*/
package logrus

264
vendor/github.com/Sirupsen/logrus/entry.go generated vendored Normal file
View File

@ -0,0 +1,264 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
os.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
os.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

193
vendor/github.com/Sirupsen/logrus/exported.go generated vendored Normal file
View File

@ -0,0 +1,193 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

48
vendor/github.com/Sirupsen/logrus/formatter.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

34
vendor/github.com/Sirupsen/logrus/hooks.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

41
vendor/github.com/Sirupsen/logrus/json_formatter.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package logrus
import (
"encoding/json"
"fmt"
)
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

212
vendor/github.com/Sirupsen/logrus/logger.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
os.Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
os.Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
os.Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
}
}

98
vendor/github.com/Sirupsen/logrus/logrus.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

9
vendor/github.com/Sirupsen/logrus/terminal_bsd.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build darwin freebsd openbsd netbsd dragonfly
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

12
vendor/github.com/Sirupsen/logrus/terminal_linux.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -0,0 +1,21 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build solaris
package logrus
import (
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
}

27
vendor/github.com/Sirupsen/logrus/terminal_windows.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

161
vendor/github.com/Sirupsen/logrus/text_formatter.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key)
b.WriteByte('=')
switch value := value.(type) {
case string:
if needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
if needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)
}
default:
fmt.Fprint(b, value)
}
b.WriteByte(' ')
}

31
vendor/github.com/Sirupsen/logrus/writer.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

24
vendor/github.com/beevik/ntp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright 2015 Brett Vickers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

12
vendor/github.com/beevik/ntp/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
ntp
===
The ntp package is a small implementation of a limited NTP client. It
requests the current time from a remote NTP server according to
selected version of the NTP protocol. Client uses version 4 of the NTP
protocol RFC5905 by default.
The approach was inspired by a post to the go-nuts mailing list by
Michael Hofmann:
https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ

113
vendor/github.com/beevik/ntp/ntp.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ntp provides a simple mechanism for querying the current time
// from a remote NTP server. This package only supports NTP client mode
// behavior and version 4 of the NTP protocol. See RFC 5905.
// Approach inspired by go-nuts post by Michael Hofmann:
// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ
package ntp
import (
"encoding/binary"
"net"
"time"
)
type mode byte
const (
reserved mode = 0 + iota
symmetricActive
symmetricPassive
client
server
broadcast
controlMessage
reservedPrivate
)
type ntpTime struct {
Seconds uint32
Fraction uint32
}
func (t ntpTime) UTC() time.Time {
nsec := uint64(t.Seconds)*1e9 + (uint64(t.Fraction) * 1e9 >> 32)
return time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nsec))
}
type msg struct {
LiVnMode byte // Leap Indicator (2) + Version (3) + Mode (3)
Stratum byte
Poll byte
Precision byte
RootDelay uint32
RootDispersion uint32
ReferenceId uint32
ReferenceTime ntpTime
OriginTime ntpTime
ReceiveTime ntpTime
TransmitTime ntpTime
}
// SetVersion sets the NTP protocol version on the message.
func (m *msg) SetVersion(v byte) {
m.LiVnMode = (m.LiVnMode & 0xc7) | v<<3
}
// SetMode sets the NTP protocol mode on the message.
func (m *msg) SetMode(md mode) {
m.LiVnMode = (m.LiVnMode & 0xf8) | byte(md)
}
// Time returns the "receive time" from the remote NTP server
// specifed as host. NTP client mode is used.
func getTime(host string, version byte) (time.Time, error) {
if version < 2 || version > 4 {
panic("ntp: invalid version number")
}
raddr, err := net.ResolveUDPAddr("udp", host+":123")
if err != nil {
return time.Now(), err
}
con, err := net.DialUDP("udp", nil, raddr)
if err != nil {
return time.Now(), err
}
defer con.Close()
con.SetDeadline(time.Now().Add(5 * time.Second))
m := new(msg)
m.SetMode(client)
m.SetVersion(version)
err = binary.Write(con, binary.BigEndian, m)
if err != nil {
return time.Now(), err
}
err = binary.Read(con, binary.BigEndian, m)
if err != nil {
return time.Now(), err
}
t := m.ReceiveTime.UTC().Local()
return t, nil
}
// TimeV returns the "receive time" from the remote NTP server
// specifed as host. Use the NTP client mode with the requested
// version number (2, 3, or 4).
func TimeV(host string, version byte) (time.Time, error) {
return getTime(host, version)
}
// Time returns the "receive time" from the remote NTP server
// specifed as host. NTP client mode version 4 is used.
func Time(host string) (time.Time, error) {
return getTime(host, 4)
}

2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

292
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@ -0,0 +1,292 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(float64(l) * q)
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

191
vendor/github.com/coreos/go-systemd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

198
vendor/github.com/coreos/go-systemd/dbus/dbus.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
package dbus
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
"github.com/godbus/dbus"
)
const (
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
num = `0123456789`
alphanum = alpha + num
signalBuffer = 100
)
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
func needsEscape(i int, b byte) bool {
// Escape everything that is not a-z-A-Z-0-9
// Also escape 0-9 if it's the first character
return strings.IndexByte(alphanum, b) == -1 ||
(i == 0 && strings.IndexByte(num, b) != -1)
}
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
// rules that systemd uses for serializing special characters.
func PathBusEscape(path string) string {
// Special case the empty string
if len(path) == 0 {
return "_"
}
n := []byte{}
for i := 0; i < len(path); i++ {
c := path[i]
if needsEscape(i, c) {
e := fmt.Sprintf("_%x", c)
n = append(n, []byte(e)...)
} else {
n = append(n, c)
}
}
return string(n)
}
// Conn is a connection to systemd's dbus endpoint.
type Conn struct {
// sysconn/sysobj are only used to call dbus methods
sysconn *dbus.Conn
sysobj dbus.BusObject
// sigconn/sigobj are only used to receive dbus signals
sigconn *dbus.Conn
sigobj dbus.BusObject
jobListener struct {
jobs map[dbus.ObjectPath]chan<- string
sync.Mutex
}
subscriber struct {
updateCh chan<- *SubStateUpdate
errCh chan<- error
sync.Mutex
ignore map[dbus.ObjectPath]int64
cleanIgnore int64
}
}
// New establishes a connection to the system bus and authenticates.
// Callers should call Close() when done with the connection.
func New() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SystemBusPrivate)
})
}
// NewUserConnection establishes a connection to the session bus and
// authenticates. This can be used to connect to systemd user instances.
// Callers should call Close() when done with the connection.
func NewUserConnection() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SessionBusPrivate)
})
}
// NewSystemdConnection establishes a private, direct connection to systemd.
// This can be used for communicating with systemd without a dbus daemon.
// Callers should call Close() when done with the connection.
func NewSystemdConnection() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd.
return dbusAuthConnection(func() (*dbus.Conn, error) {
return dbus.Dial("unix:path=/run/systemd/private")
})
})
}
// Close closes an established connection
func (c *Conn) Close() {
c.sysconn.Close()
c.sigconn.Close()
}
func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
sysconn, err := createBus()
if err != nil {
return nil, err
}
sigconn, err := createBus()
if err != nil {
sysconn.Close()
return nil, err
}
c := &Conn{
sysconn: sysconn,
sysobj: systemdObject(sysconn),
sigconn: sigconn,
sigobj: systemdObject(sigconn),
}
c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
// Setup the listeners on jobs so that we can get completions
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
c.dispatch()
return c, nil
}
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
// interface. The value is returned in its string representation, as defined at
// https://developer.gnome.org/glib/unstable/gvariant-text.html
func (c *Conn) GetManagerProperty(prop string) (string, error) {
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
if err != nil {
return "", err
}
return variant.String(), nil
}
func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := createBus()
if err != nil {
return nil, err
}
// Only use EXTERNAL method, and hardcode the uid (not username)
// to avoid a username lookup (which requires a dynamically linked
// libc)
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
err = conn.Auth(methods)
if err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
conn, err := dbusAuthConnection(createBus)
if err != nil {
return nil, err
}
if err = conn.Hello(); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
func systemdObject(conn *dbus.Conn) dbus.BusObject {
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
}

442
vendor/github.com/coreos/go-systemd/dbus/methods.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"errors"
"path"
"strconv"
"github.com/godbus/dbus"
)
func (c *Conn) jobComplete(signal *dbus.Signal) {
var id uint32
var job dbus.ObjectPath
var unit string
var result string
dbus.Store(signal.Body, &id, &job, &unit, &result)
c.jobListener.Lock()
out, ok := c.jobListener.jobs[job]
if ok {
out <- result
delete(c.jobListener.jobs, job)
}
c.jobListener.Unlock()
}
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
if ch != nil {
c.jobListener.Lock()
defer c.jobListener.Unlock()
}
var p dbus.ObjectPath
err := c.sysobj.Call(job, 0, args...).Store(&p)
if err != nil {
return 0, err
}
if ch != nil {
c.jobListener.jobs[p] = ch
}
// ignore error since 0 is fine if conversion fails
jobID, _ := strconv.Atoi(path.Base(string(p)))
return jobID, nil
}
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
// specified by the mode string).
//
// Takes the unit to activate, plus a mode string. The mode needs to be one of
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
// "replace" the call will start the unit and its dependencies, possibly
// replacing already queued jobs that conflict with this. If "fail" the call
// will start the unit and its dependencies, but will fail if this would change
// an already queued job. If "isolate" the call will start the unit in question
// and terminate all units that aren't dependencies of it. If
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
// If "ignore-requirements" it will start a unit but only ignore the
// requirement dependencies. It is not recommended to make use of the latter
// two options.
//
// If the provided channel is non-nil, a result string will be sent to it upon
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
// done indicates successful execution of a job. canceled indicates that a job
// has been canceled before it finished execution. timeout indicates that the
// job timeout was reached. failed indicates that the job failed. dependency
// indicates that a job this job has been depending on failed and the job hence
// has been removed too. skipped indicates that a job was skipped because it
// didn't apply to the units current state.
//
// If no error occurs, the ID of the underlying systemd job will be returned. There
// does exist the possibility for no error to be returned, but for the returned job
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
// should not be considered authoritative.
//
// If an error does occur, it will be returned to the user alongside a job ID of 0.
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
}
// StopUnit is similar to StartUnit but stops the specified unit rather
// than starting it.
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
}
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
}
// RestartUnit restarts a service. If a service is restarted that isn't
// running it will be started.
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
}
// TryRestartUnit is like RestartUnit, except that a service that isn't running
// is not affected by the restart.
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
}
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
// otherwise.
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
}
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
// flavored restart otherwise.
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
}
// StartTransientUnit() may be used to create and start a transient unit, which
// will be released as soon as it is not running or referenced anymore or the
// system is rebooted. name is the unit name including suffix, and must be
// unique. mode is the same as in StartUnit(), properties contains properties
// of the unit.
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
}
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
// processes are killed.
func (c *Conn) KillUnit(name string, signal int32) {
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
}
// ResetFailedUnit resets the "failed" state of a specific unit.
func (c *Conn) ResetFailedUnit(name string) error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
}
// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
var err error
var props map[string]dbus.Variant
path := unitPath(unit)
if !path.IsValid() {
return nil, errors.New("invalid unit name: " + unit)
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
if err != nil {
return nil, err
}
out := make(map[string]interface{}, len(props))
for k, v := range props {
out[k] = v.Value()
}
return out, nil
}
// GetUnitProperties takes the unit name and returns all of its dbus object properties.
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
}
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
var err error
var prop dbus.Variant
path := unitPath(unit)
if !path.IsValid() {
return nil, errors.New("invalid unit name: " + unit)
}
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
if err != nil {
return nil, err
}
return &Property{Name: propertyName, Value: prop}, nil
}
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
}
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
}
// SetUnitProperties() may be used to modify certain unit properties at runtime.
// Not all properties may be changed at runtime, but many resource management
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
// instantly, and stored on disk for future boots, unless runtime is true, in which
// case the settings only apply until the next reboot. name is the name of the unit
// to modify. properties are the settings to set, encoded as an array of property
// name and value pairs.
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
}
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
}
type UnitStatus struct {
Name string // The primary unit name as string
Description string // The human readable description string
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
ActiveState string // The active state (i.e. whether the unit is currently started or not)
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
Path dbus.ObjectPath // The unit object path
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
JobType string // The job type as string
JobPath dbus.ObjectPath // The job object path
}
// ListUnits returns an array with all currently loaded units. Note that
// units may be known by multiple names at the same time, and hence there might
// be more unit names loaded than actual units behind them.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
status := make([]UnitStatus, len(result))
statusInterface := make([]interface{}, len(status))
for i := range status {
statusInterface[i] = &status[i]
}
err = dbus.Store(resultInterface, statusInterface...)
if err != nil {
return nil, err
}
return status, nil
}
type UnitFile struct {
Path string
Type string
}
// ListUnitFiles returns an array of all available units on disk.
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
files := make([]UnitFile, len(result))
fileInterface := make([]interface{}, len(files))
for i := range files {
fileInterface[i] = &files[i]
}
err = dbus.Store(resultInterface, fileInterface...)
if err != nil {
return nil, err
}
return files, nil
}
type LinkUnitFileChange EnableUnitFileChange
// LinkUnitFiles() links unit files (that are located outside of the
// usual unit search paths) into the unit search path.
//
// It takes a list of absolute paths to unit files to link and two
// booleans. The first boolean controls whether the unit shall be
// enabled for runtime only (true, /run), or persistently (false,
// /etc).
// The second controls whether symlinks pointing to other units shall
// be replaced if necessary.
//
// This call returns a list of the changes made. The list consists of
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]LinkUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
// EnableUnitFiles() may be used to enable one or more units in the system (by
// creating symlinks to them in /etc or /run).
//
// It takes a list of unit files to enable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
// search paths), and two booleans: the first controls whether the unit shall
// be enabled for runtime only (true, /run), or persistently (false, /etc).
// The second one controls whether symlinks pointing to other units shall
// be replaced if necessary.
//
// This call returns one boolean and an array with the changes made. The
// boolean signals whether the unit files contained any enablement
// information (i.e. an [Install]) section. The changes list consists of
// structures with three strings: the type of the change (one of symlink
// or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
var carries_install_info bool
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
if err != nil {
return false, nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]EnableUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return false, nil, err
}
return carries_install_info, changes, nil
}
type EnableUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// DisableUnitFiles() may be used to disable one or more units in the system (by
// removing symlinks to them from /etc or /run).
//
// It takes a list of unit files to disable (either just file names or full
// absolute paths if the unit files are residing outside the usual unit
// search paths), and one boolean: whether the unit was enabled for runtime
// only (true, /run), or persistently (false, /etc).
//
// This call returns an array with the changes made. The changes list
// consists of structures with three strings: the type of the change (one of
// symlink or unlink), the file name of the symlink and the destination of the
// symlink.
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]DisableUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type DisableUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// Reload instructs systemd to scan for and reload unit files. This is
// equivalent to a 'systemctl daemon-reload'.
func (c *Conn) Reload() error {
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
}
func unitPath(name string) dbus.ObjectPath {
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
}

218
vendor/github.com/coreos/go-systemd/dbus/properties.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"github.com/godbus/dbus"
)
// From the systemd docs:
//
// The properties array of StartTransientUnit() may take many of the settings
// that may also be configured in unit files. Not all parameters are currently
// accepted though, but we plan to cover more properties with future release.
// Currently you may set the Description, Slice and all dependency types of
// units, as well as RemainAfterExit, ExecStart for service units,
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
// directly to their counterparts in unit files and as normal D-Bus object
// properties. The exception here is the PIDs field of scope units which is
// used for construction of the scope only and specifies the initial PIDs to
// add to the scope object.
type Property struct {
Name string
Value dbus.Variant
}
type PropertyCollection struct {
Name string
Properties []Property
}
type execStart struct {
Path string // the binary path to execute
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
}
// PropExecStart sets the ExecStart service property. The first argument is a
// slice with the binary path to execute followed by the arguments to pass to
// the executed command. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
func PropExecStart(command []string, uncleanIsFailure bool) Property {
execStarts := []execStart{
execStart{
Path: command[0],
Args: command,
UncleanIsFailure: uncleanIsFailure,
},
}
return Property{
Name: "ExecStart",
Value: dbus.MakeVariant(execStarts),
}
}
// PropRemainAfterExit sets the RemainAfterExit service property. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
func PropRemainAfterExit(b bool) Property {
return Property{
Name: "RemainAfterExit",
Value: dbus.MakeVariant(b),
}
}
// PropDescription sets the Description unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
func PropDescription(desc string) Property {
return Property{
Name: "Description",
Value: dbus.MakeVariant(desc),
}
}
func propDependency(name string, units []string) Property {
return Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
// PropRequires sets the Requires unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
func PropRequires(units ...string) Property {
return propDependency("Requires", units)
}
// PropRequiresOverridable sets the RequiresOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
func PropRequiresOverridable(units ...string) Property {
return propDependency("RequiresOverridable", units)
}
// PropRequisite sets the Requisite unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
func PropRequisite(units ...string) Property {
return propDependency("Requisite", units)
}
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
func PropRequisiteOverridable(units ...string) Property {
return propDependency("RequisiteOverridable", units)
}
// PropWants sets the Wants unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
func PropWants(units ...string) Property {
return propDependency("Wants", units)
}
// PropBindsTo sets the BindsTo unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
func PropBindsTo(units ...string) Property {
return propDependency("BindsTo", units)
}
// PropRequiredBy sets the RequiredBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
func PropRequiredBy(units ...string) Property {
return propDependency("RequiredBy", units)
}
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
func PropRequiredByOverridable(units ...string) Property {
return propDependency("RequiredByOverridable", units)
}
// PropWantedBy sets the WantedBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
func PropWantedBy(units ...string) Property {
return propDependency("WantedBy", units)
}
// PropBoundBy sets the BoundBy unit property. See
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
func PropBoundBy(units ...string) Property {
return propDependency("BoundBy", units)
}
// PropConflicts sets the Conflicts unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
func PropConflicts(units ...string) Property {
return propDependency("Conflicts", units)
}
// PropConflictedBy sets the ConflictedBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
func PropConflictedBy(units ...string) Property {
return propDependency("ConflictedBy", units)
}
// PropBefore sets the Before unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
func PropBefore(units ...string) Property {
return propDependency("Before", units)
}
// PropAfter sets the After unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
func PropAfter(units ...string) Property {
return propDependency("After", units)
}
// PropOnFailure sets the OnFailure unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
func PropOnFailure(units ...string) Property {
return propDependency("OnFailure", units)
}
// PropTriggers sets the Triggers unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
func PropTriggers(units ...string) Property {
return propDependency("Triggers", units)
}
// PropTriggeredBy sets the TriggeredBy unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
func PropTriggeredBy(units ...string) Property {
return propDependency("TriggeredBy", units)
}
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
func PropPropagatesReloadTo(units ...string) Property {
return propDependency("PropagatesReloadTo", units)
}
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
func PropRequiresMountsFor(units ...string) Property {
return propDependency("RequiresMountsFor", units)
}
// PropSlice sets the Slice unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
func PropSlice(slice string) Property {
return Property{
Name: "Slice",
Value: dbus.MakeVariant(slice),
}
}

47
vendor/github.com/coreos/go-systemd/dbus/set.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
type set struct {
data map[string]bool
}
func (s *set) Add(value string) {
s.data[value] = true
}
func (s *set) Remove(value string) {
delete(s.data, value)
}
func (s *set) Contains(value string) (exists bool) {
_, exists = s.data[value]
return
}
func (s *set) Length() int {
return len(s.data)
}
func (s *set) Values() (values []string) {
for val, _ := range s.data {
values = append(values, val)
}
return
}
func newSet() *set {
return &set{make(map[string]bool)}
}

View File

@ -0,0 +1,250 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"errors"
"time"
"github.com/godbus/dbus"
)
const (
cleanIgnoreInterval = int64(10 * time.Second)
ignoreInterval = int64(30 * time.Millisecond)
)
// Subscribe sets up this connection to subscribe to all systemd dbus events.
// This is required before calling SubscribeUnits. When the connection closes
// systemd will automatically stop sending signals so there is no need to
// explicitly call Unsubscribe().
func (c *Conn) Subscribe() error {
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
if err != nil {
return err
}
return nil
}
// Unsubscribe this connection from systemd dbus events.
func (c *Conn) Unsubscribe() error {
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
if err != nil {
return err
}
return nil
}
func (c *Conn) dispatch() {
ch := make(chan *dbus.Signal, signalBuffer)
c.sigconn.Signal(ch)
go func() {
for {
signal, ok := <-ch
if !ok {
return
}
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
c.jobComplete(signal)
}
if c.subscriber.updateCh == nil {
continue
}
var unitPath dbus.ObjectPath
switch signal.Name {
case "org.freedesktop.systemd1.Manager.JobRemoved":
unitName := signal.Body[2].(string)
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
case "org.freedesktop.systemd1.Manager.UnitNew":
unitPath = signal.Body[1].(dbus.ObjectPath)
case "org.freedesktop.DBus.Properties.PropertiesChanged":
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
unitPath = signal.Path
}
}
if unitPath == dbus.ObjectPath("") {
continue
}
c.sendSubStateUpdate(unitPath)
}
}()
}
// Returns two unbuffered channels which will receive all changed units every
// interval. Deleted units are sent as nil.
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
}
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
// size of the channels, the comparison function for detecting changes and a filter
// function for cutting down on the noise that your channel receives.
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
old := make(map[string]*UnitStatus)
statusChan := make(chan map[string]*UnitStatus, buffer)
errChan := make(chan error, buffer)
go func() {
for {
timerChan := time.After(interval)
units, err := c.ListUnits()
if err == nil {
cur := make(map[string]*UnitStatus)
for i := range units {
if filterUnit != nil && filterUnit(units[i].Name) {
continue
}
cur[units[i].Name] = &units[i]
}
// add all new or changed units
changed := make(map[string]*UnitStatus)
for n, u := range cur {
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
changed[n] = u
}
delete(old, n)
}
// add all deleted units
for oldN := range old {
changed[oldN] = nil
}
old = cur
if len(changed) != 0 {
statusChan <- changed
}
} else {
errChan <- err
}
<-timerChan
}
}()
return statusChan, errChan
}
type SubStateUpdate struct {
UnitName string
SubState string
}
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
// Although this writes to updateCh on every state change, the reported state
// may be more recent than the change that generated it (due to an unavoidable
// race in the systemd dbus interface). That is, this method provides a good
// way to keep a current view of all units' states, but is not guaranteed to
// show every state transition they go through. Furthermore, state changes
// will only be written to the channel with non-blocking writes. If updateCh
// is full, it attempts to write an error to errCh; if errCh is full, the error
// passes silently.
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
c.subscriber.Lock()
defer c.subscriber.Unlock()
c.subscriber.updateCh = updateCh
c.subscriber.errCh = errCh
}
func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
c.subscriber.Lock()
defer c.subscriber.Unlock()
if c.shouldIgnore(path) {
return
}
info, err := c.GetUnitProperties(string(path))
if err != nil {
select {
case c.subscriber.errCh <- err:
default:
}
}
name := info["Id"].(string)
substate := info["SubState"].(string)
update := &SubStateUpdate{name, substate}
select {
case c.subscriber.updateCh <- update:
default:
select {
case c.subscriber.errCh <- errors.New("update channel full!"):
default:
}
}
c.updateIgnore(path, info)
}
// The ignore functions work around a wart in the systemd dbus interface.
// Requesting the properties of an unloaded unit will cause systemd to send a
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
// properties on UnitNew (as that's the only indication of a new unit coming up
// for the first time), we would enter an infinite loop if we did not attempt
// to detect and ignore these spurious signals. The signal themselves are
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
// unloaded unit's signals for a short time after requesting its properties.
// This means that we will miss e.g. a transient unit being restarted
// *immediately* upon failure and also a transient unit being started
// immediately after requesting its status (with systemctl status, for example,
// because this causes a UnitNew signal to be sent which then causes us to fetch
// the properties).
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
t, ok := c.subscriber.ignore[path]
return ok && t >= time.Now().UnixNano()
}
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
c.cleanIgnore()
// unit is unloaded - it will trigger bad systemd dbus behavior
if info["LoadState"].(string) == "not-found" {
c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
}
}
// without this, ignore would grow unboundedly over time
func (c *Conn) cleanIgnore() {
now := time.Now().UnixNano()
if c.subscriber.cleanIgnore < now {
c.subscriber.cleanIgnore = now + cleanIgnoreInterval
for p, t := range c.subscriber.ignore {
if t < now {
delete(c.subscriber.ignore, p)
}
}
}
}

View File

@ -0,0 +1,57 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbus
import (
"time"
)
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
// can filter to only return events for a set of units.
type SubscriptionSet struct {
*set
conn *Conn
}
func (s *SubscriptionSet) filter(unit string) bool {
return !s.Contains(unit)
}
// Subscribe starts listening for dbus events for all of the units in the set.
// Returns channels identical to conn.SubscribeUnits.
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
// TODO: Make fully evented by using systemd 209 with properties changed values
return s.conn.SubscribeUnitsCustom(time.Second, 0,
mismatchUnitStatus,
func(unit string) bool { return s.filter(unit) },
)
}
// NewSubscriptionSet returns a new subscription set.
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
return &SubscriptionSet{newSet(), conn}
}
// mismatchUnitStatus returns true if the provided UnitStatus objects
// are not equivalent. false is returned if the objects are equivalent.
// Only the Name, Description and state-related fields are used in
// the comparison.
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
return u1.Name != u2.Name ||
u1.Description != u2.Description ||
u1.LoadState != u2.LoadState ||
u1.ActiveState != u2.ActiveState ||
u1.SubState != u2.SubState
}

50
vendor/github.com/godbus/dbus/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,50 @@
# How to Contribute
## Getting Started
- Fork the repository on GitHub
- Read the [README](README.markdown) for build and test instructions
- Play with the project, submit bugs, submit patches!
## Contribution Flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work (usually master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Make sure the tests pass, and add any new tests as appropriate.
- Submit a pull request to the original repository.
Thanks for your contributions!
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

25
vendor/github.com/godbus/dbus/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2013, Georg Reinke (<guelfey at gmail dot com>), Google
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

2
vendor/github.com/godbus/dbus/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,2 @@
Brandon Philips <brandon@ifup.org> (@philips)
Brian Waldon <brian@waldon.cc> (@bcwaldon)

41
vendor/github.com/godbus/dbus/README.markdown generated vendored Normal file
View File

@ -0,0 +1,41 @@
dbus
----
dbus is a simple library that implements native Go client bindings for the
D-Bus message bus system.
### Features
* Complete native implementation of the D-Bus message protocol
* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
* Subpackages that help with the introspection / property interfaces
### Installation
This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
```
go get github.com/godbus/dbus
```
If you want to use the subpackages, you can install them the same way.
### Usage
The complete package documentation and some simple examples are available at
[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
gives a short overview over the basic usage.
#### Projects using godbus
- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
Please note that the API is considered unstable for now and may change without
further notice.
### License
go.dbus is available under the Simplified BSD License; see LICENSE for the full
text.
Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.

253
vendor/github.com/godbus/dbus/auth.go generated vendored Normal file
View File

@ -0,0 +1,253 @@
package dbus
import (
"bufio"
"bytes"
"errors"
"io"
"os"
"strconv"
)
// AuthStatus represents the Status of an authentication mechanism.
type AuthStatus byte
const (
// AuthOk signals that authentication is finished; the next command
// from the server should be an OK.
AuthOk AuthStatus = iota
// AuthContinue signals that additional data is needed; the next command
// from the server should be a DATA.
AuthContinue
// AuthError signals an error; the server sent invalid data or some
// other unexpected thing happened and the current authentication
// process should be aborted.
AuthError
)
type authState byte
const (
waitingForData authState = iota
waitingForOk
waitingForReject
)
// Auth defines the behaviour of an authentication mechanism.
type Auth interface {
// Return the name of the mechnism, the argument to the first AUTH command
// and the next status.
FirstData() (name, resp []byte, status AuthStatus)
// Process the given DATA command, and return the argument to the DATA
// command and the next status. If len(resp) == 0, no DATA command is sent.
HandleData(data []byte) (resp []byte, status AuthStatus)
}
// Auth authenticates the connection, trying the given list of authentication
// mechanisms (in that order). If nil is passed, the EXTERNAL and
// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
// connections, this method must be called before sending any messages to the
// bus. Auth must not be called on shared connections.
func (conn *Conn) Auth(methods []Auth) error {
if methods == nil {
uid := strconv.Itoa(os.Getuid())
methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
}
in := bufio.NewReader(conn.transport)
err := conn.transport.SendNullByte()
if err != nil {
return err
}
err = authWriteLine(conn.transport, []byte("AUTH"))
if err != nil {
return err
}
s, err := authReadLine(in)
if err != nil {
return err
}
if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
return errors.New("dbus: authentication protocol error")
}
s = s[1:]
for _, v := range s {
for _, m := range methods {
if name, data, status := m.FirstData(); bytes.Equal(v, name) {
var ok bool
err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
if err != nil {
return err
}
switch status {
case AuthOk:
err, ok = conn.tryAuth(m, waitingForOk, in)
case AuthContinue:
err, ok = conn.tryAuth(m, waitingForData, in)
default:
panic("dbus: invalid authentication status")
}
if err != nil {
return err
}
if ok {
if conn.transport.SupportsUnixFDs() {
err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
if err != nil {
return err
}
line, err := authReadLine(in)
if err != nil {
return err
}
switch {
case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
conn.EnableUnixFDs()
conn.unixFD = true
case bytes.Equal(line[0], []byte("ERROR")):
default:
return errors.New("dbus: authentication protocol error")
}
}
err = authWriteLine(conn.transport, []byte("BEGIN"))
if err != nil {
return err
}
go conn.inWorker()
go conn.outWorker()
return nil
}
}
}
}
return errors.New("dbus: authentication failed")
}
// tryAuth tries to authenticate with m as the mechanism, using state as the
// initial authState and in for reading input. It returns (nil, true) on
// success, (nil, false) on a REJECTED and (someErr, false) if some other
// error occured.
func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
for {
s, err := authReadLine(in)
if err != nil {
return err, false
}
switch {
case state == waitingForData && string(s[0]) == "DATA":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
continue
}
data, status := m.HandleData(s[1])
switch status {
case AuthOk, AuthContinue:
if len(data) != 0 {
err = authWriteLine(conn.transport, []byte("DATA"), data)
if err != nil {
return err, false
}
}
if status == AuthOk {
state = waitingForOk
}
case AuthError:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
}
case state == waitingForData && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForData && string(s[0]) == "ERROR":
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
case state == waitingForData && string(s[0]) == "OK":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForData:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
case state == waitingForOk && string(s[0]) == "OK":
if len(s) != 2 {
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForOk && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForOk && (string(s[0]) == "DATA" ||
string(s[0]) == "ERROR"):
err = authWriteLine(conn.transport, []byte("CANCEL"))
if err != nil {
return err, false
}
state = waitingForReject
case state == waitingForOk:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
return err, false
}
case state == waitingForReject && string(s[0]) == "REJECTED":
return nil, false
case state == waitingForReject:
return errors.New("dbus: authentication protocol error"), false
default:
panic("dbus: invalid auth state")
}
}
}
// authReadLine reads a line and separates it into its fields.
func authReadLine(in *bufio.Reader) ([][]byte, error) {
data, err := in.ReadBytes('\n')
if err != nil {
return nil, err
}
data = bytes.TrimSuffix(data, []byte("\r\n"))
return bytes.Split(data, []byte{' '}), nil
}
// authWriteLine writes the given line in the authentication protocol format
// (elements of data separated by a " " and terminated by "\r\n").
func authWriteLine(out io.Writer, data ...[]byte) error {
buf := make([]byte, 0)
for i, v := range data {
buf = append(buf, v...)
if i != len(data)-1 {
buf = append(buf, ' ')
}
}
buf = append(buf, '\r')
buf = append(buf, '\n')
n, err := out.Write(buf)
if err != nil {
return err
}
if n != len(buf) {
return io.ErrUnexpectedEOF
}
return nil
}

26
vendor/github.com/godbus/dbus/auth_external.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package dbus
import (
"encoding/hex"
)
// AuthExternal returns an Auth that authenticates as the given user with the
// EXTERNAL mechanism.
func AuthExternal(user string) Auth {
return authExternal{user}
}
// AuthExternal implements the EXTERNAL authentication mechanism.
type authExternal struct {
user string
}
func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
b := make([]byte, 2*len(a.user))
hex.Encode(b, []byte(a.user))
return []byte("EXTERNAL"), b, AuthOk
}
func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
return nil, AuthError
}

102
vendor/github.com/godbus/dbus/auth_sha1.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
package dbus
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"os"
)
// AuthCookieSha1 returns an Auth that authenticates as the given user with the
// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
// directory of the user.
func AuthCookieSha1(user, home string) Auth {
return authCookieSha1{user, home}
}
type authCookieSha1 struct {
user, home string
}
func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
b := make([]byte, 2*len(a.user))
hex.Encode(b, []byte(a.user))
return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
}
func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
challenge := make([]byte, len(data)/2)
_, err := hex.Decode(challenge, data)
if err != nil {
return nil, AuthError
}
b := bytes.Split(challenge, []byte{' '})
if len(b) != 3 {
return nil, AuthError
}
context := b[0]
id := b[1]
svchallenge := b[2]
cookie := a.getCookie(context, id)
if cookie == nil {
return nil, AuthError
}
clchallenge := a.generateChallenge()
if clchallenge == nil {
return nil, AuthError
}
hash := sha1.New()
hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
hexhash := make([]byte, 2*hash.Size())
hex.Encode(hexhash, hash.Sum(nil))
data = append(clchallenge, ' ')
data = append(data, hexhash...)
resp := make([]byte, 2*len(data))
hex.Encode(resp, data)
return resp, AuthOk
}
// getCookie searches for the cookie identified by id in context and returns
// the cookie content or nil. (Since HandleData can't return a specific error,
// but only whether an error occured, this function also doesn't bother to
// return an error.)
func (a authCookieSha1) getCookie(context, id []byte) []byte {
file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
if err != nil {
return nil
}
defer file.Close()
rd := bufio.NewReader(file)
for {
line, err := rd.ReadBytes('\n')
if err != nil {
return nil
}
line = line[:len(line)-1]
b := bytes.Split(line, []byte{' '})
if len(b) != 3 {
return nil
}
if bytes.Equal(b[0], id) {
return b[2]
}
}
}
// generateChallenge returns a random, hex-encoded challenge, or nil on error
// (see above).
func (a authCookieSha1) generateChallenge() []byte {
b := make([]byte, 16)
n, err := rand.Read(b)
if err != nil {
return nil
}
if n != 16 {
return nil
}
enc := make([]byte, 32)
hex.Encode(enc, b)
return enc
}

36
vendor/github.com/godbus/dbus/call.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package dbus
import (
"errors"
)
// Call represents a pending or completed method call.
type Call struct {
Destination string
Path ObjectPath
Method string
Args []interface{}
// Strobes when the call is complete.
Done chan *Call
// After completion, the error status. If this is non-nil, it may be an
// error message from the peer (with Error as its type) or some other error.
Err error
// Holds the response once the call is done.
Body []interface{}
}
var errSignature = errors.New("dbus: mismatched signature")
// Store stores the body of the reply into the provided pointers. It returns
// an error if the signatures of the body and retvalues don't match, or if
// the error status is not nil.
func (c *Call) Store(retvalues ...interface{}) error {
if c.Err != nil {
return c.Err
}
return Store(c.Body, retvalues...)
}

636
vendor/github.com/godbus/dbus/conn.go generated vendored Normal file
View File

@ -0,0 +1,636 @@
package dbus
import (
"errors"
"io"
"os"
"reflect"
"strings"
"sync"
)
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
var (
systemBus *Conn
systemBusLck sync.Mutex
sessionBus *Conn
sessionBusLck sync.Mutex
)
// ErrClosed is the error returned by calls on a closed connection.
var ErrClosed = errors.New("dbus: connection closed by user")
// Conn represents a connection to a message bus (usually, the system or
// session bus).
//
// Connections are either shared or private. Shared connections
// are shared between calls to the functions that return them. As a result,
// the methods Close, Auth and Hello must not be called on them.
//
// Multiple goroutines may invoke methods on a connection simultaneously.
type Conn struct {
transport
busObj BusObject
unixFD bool
uuid string
names []string
namesLck sync.RWMutex
serialLck sync.Mutex
nextSerial uint32
serialUsed map[uint32]bool
calls map[uint32]*Call
callsLck sync.RWMutex
handlers map[ObjectPath]map[string]exportWithMapping
handlersLck sync.RWMutex
out chan *Message
closed bool
outLck sync.RWMutex
signals []chan<- *Signal
signalsLck sync.Mutex
eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex
}
// SessionBus returns a shared connection to the session bus, connecting to it
// if not already done.
func SessionBus() (conn *Conn, err error) {
sessionBusLck.Lock()
defer sessionBusLck.Unlock()
if sessionBus != nil {
return sessionBus, nil
}
defer func() {
if conn != nil {
sessionBus = conn
}
}()
conn, err = SessionBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
if address != "" && address != "autolaunch:" {
return Dial(address)
}
return sessionBusPlatform()
}
// SystemBus returns a shared connection to the system bus, connecting to it if
// not already done.
func SystemBus() (conn *Conn, err error) {
systemBusLck.Lock()
defer systemBusLck.Unlock()
if systemBus != nil {
return systemBus, nil
}
defer func() {
if conn != nil {
systemBus = conn
}
}()
conn, err = SystemBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SystemBusPrivate returns a new private connection to the system bus.
func SystemBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return Dial(address)
}
return Dial(defaultSystemBusAddress)
}
// Dial establishes a new private connection to the message bus specified by address.
func Dial(address string) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
return newConn(tr)
}
// NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
return newConn(genericTransport{conn})
}
// newConn creates a new *Conn from a transport.
func newConn(tr transport) (*Conn, error) {
conn := new(Conn)
conn.transport = tr
conn.calls = make(map[uint32]*Call)
conn.out = make(chan *Message, 10)
conn.handlers = make(map[ObjectPath]map[string]exportWithMapping)
conn.nextSerial = 1
conn.serialUsed = map[uint32]bool{0: true}
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
return conn, nil
}
// BusObject returns the object owned by the bus daemon which handles
// administrative requests.
func (conn *Conn) BusObject() BusObject {
return conn.busObj
}
// Close closes the connection. Any blocked operations will return with errors
// and the channels passed to Eavesdrop and Signal are closed. This method must
// not be called on shared connections.
func (conn *Conn) Close() error {
conn.outLck.Lock()
if conn.closed {
// inWorker calls Close on read error, the read error may
// be caused by another caller calling Close to shutdown the
// dbus connection, a double-close scenario we prevent here.
conn.outLck.Unlock()
return nil
}
close(conn.out)
conn.closed = true
conn.outLck.Unlock()
conn.signalsLck.Lock()
for _, ch := range conn.signals {
close(ch)
}
conn.signalsLck.Unlock()
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
return conn.transport.Close()
}
// Eavesdrop causes conn to send all incoming messages to the given channel
// without further processing. Method replies, errors and signals will not be
// sent to the appropiate channels and method calls will not be handled. If nil
// is passed, the normal behaviour is restored.
//
// The caller has to make sure that ch is sufficiently buffered;
// if a message arrives when a write to ch is not possible, the message is
// discarded.
func (conn *Conn) Eavesdrop(ch chan<- *Message) {
conn.eavesdroppedLck.Lock()
conn.eavesdropped = ch
conn.eavesdroppedLck.Unlock()
}
// getSerial returns an unused serial.
func (conn *Conn) getSerial() uint32 {
conn.serialLck.Lock()
defer conn.serialLck.Unlock()
n := conn.nextSerial
for conn.serialUsed[n] {
n++
}
conn.serialUsed[n] = true
conn.nextSerial = n + 1
return n
}
// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
// called after authentication, but before sending any other messages to the
// bus. Hello must not be called for shared connections.
func (conn *Conn) Hello() error {
var s string
err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
if err != nil {
return err
}
conn.namesLck.Lock()
conn.names = make([]string, 1)
conn.names[0] = s
conn.namesLck.Unlock()
return nil
}
// inWorker runs in an own goroutine, reading incoming messages from the
// transport and dispatching them appropiately.
func (conn *Conn) inWorker() {
for {
msg, err := conn.ReadMessage()
if err == nil {
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
select {
case conn.eavesdropped <- msg:
default:
}
conn.eavesdroppedLck.Unlock()
continue
}
conn.eavesdroppedLck.Unlock()
dest, _ := msg.Headers[FieldDestination].value.(string)
found := false
if dest == "" {
found = true
} else {
conn.namesLck.RLock()
if len(conn.names) == 0 {
found = true
}
for _, v := range conn.names {
if dest == v {
found = true
break
}
}
conn.namesLck.RUnlock()
}
if !found {
// Eavesdropped a message, but no channel for it is registered.
// Ignore it.
continue
}
switch msg.Type {
case TypeMethodReply, TypeError:
serial := msg.Headers[FieldReplySerial].value.(uint32)
conn.callsLck.Lock()
if c, ok := conn.calls[serial]; ok {
if msg.Type == TypeError {
name, _ := msg.Headers[FieldErrorName].value.(string)
c.Err = Error{name, msg.Body}
} else {
c.Body = msg.Body
}
c.Done <- c
conn.serialLck.Lock()
delete(conn.serialUsed, serial)
conn.serialLck.Unlock()
delete(conn.calls, serial)
}
conn.callsLck.Unlock()
case TypeSignal:
iface := msg.Headers[FieldInterface].value.(string)
member := msg.Headers[FieldMember].value.(string)
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
if member == "NameLost" {
// If we lost the name on the bus, remove it from our
// tracking list.
name, ok := msg.Body[0].(string)
if !ok {
panic("Unable to read the lost name")
}
conn.namesLck.Lock()
for i, v := range conn.names {
if v == name {
conn.names = append(conn.names[:i],
conn.names[i+1:]...)
}
}
conn.namesLck.Unlock()
} else if member == "NameAcquired" {
// If we acquired the name on the bus, add it to our
// tracking list.
name, ok := msg.Body[0].(string)
if !ok {
panic("Unable to read the acquired name")
}
conn.namesLck.Lock()
conn.names = append(conn.names, name)
conn.namesLck.Unlock()
}
}
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalsLck.Lock()
for _, ch := range conn.signals {
ch <- signal
}
conn.signalsLck.Unlock()
case TypeMethodCall:
go conn.handleCall(msg)
}
} else if _, ok := err.(InvalidMessageError); !ok {
// Some read error occured (usually EOF); we can't really do
// anything but to shut down all stuff and returns errors to all
// pending replies.
conn.Close()
conn.callsLck.RLock()
for _, v := range conn.calls {
v.Err = err
v.Done <- v
}
conn.callsLck.RUnlock()
return
}
// invalid messages are ignored
}
}
// Names returns the list of all names that are currently owned by this
// connection. The slice is always at least one element long, the first element
// being the unique name of the connection.
func (conn *Conn) Names() []string {
conn.namesLck.RLock()
// copy the slice so it can't be modified
s := make([]string, len(conn.names))
copy(s, conn.names)
conn.namesLck.RUnlock()
return s
}
// Object returns the object identified by the given destination name and path.
func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
return &Object{conn, dest, path}
}
// outWorker runs in an own goroutine, encoding and sending messages that are
// sent to conn.out.
func (conn *Conn) outWorker() {
for msg := range conn.out {
err := conn.SendMessage(msg)
conn.callsLck.RLock()
if err != nil {
if c := conn.calls[msg.serial]; c != nil {
c.Err = err
c.Done <- c
}
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
} else if msg.Type != TypeMethodCall {
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
}
conn.callsLck.RUnlock()
}
}
// Send sends the given message to the message bus. You usually don't need to
// use this; use the higher-level equivalents (Call / Go, Emit and Export)
// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
// call is returned and the same value is sent to ch (which must be buffered)
// once the call is complete. Otherwise, ch is ignored and a Call structure is
// returned of which only the Err member is valid.
func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
var call *Call
msg.serial = conn.getSerial()
if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 5)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Conn).Send")
}
call = new(Call)
call.Destination, _ = msg.Headers[FieldDestination].value.(string)
call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
iface, _ := msg.Headers[FieldInterface].value.(string)
member, _ := msg.Headers[FieldMember].value.(string)
call.Method = iface + "." + member
call.Args = msg.Body
call.Done = ch
conn.callsLck.Lock()
conn.calls[msg.serial] = call
conn.callsLck.Unlock()
conn.outLck.RLock()
if conn.closed {
call.Err = ErrClosed
call.Done <- call
} else {
conn.out <- msg
}
conn.outLck.RUnlock()
} else {
conn.outLck.RLock()
if conn.closed {
call = &Call{Err: ErrClosed}
} else {
conn.out <- msg
call = &Call{Err: nil}
}
conn.outLck.RUnlock()
}
return call
}
// sendError creates an error message corresponding to the parameters and sends
// it to conn.out.
func (conn *Conn) sendError(e Error, dest string, serial uint32) {
msg := new(Message)
msg.Type = TypeError
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldErrorName] = MakeVariant(e.Name)
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = e.Body
if len(e.Body) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// sendReply creates a method reply message corresponding to the parameters and
// sends it to conn.out.
func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
msg := new(Message)
msg.Type = TypeMethodReply
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = values
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded.
//
// Multiple of these channels can be registered at the same time.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals.
func (conn *Conn) Signal(ch chan<- *Signal) {
conn.signalsLck.Lock()
conn.signals = append(conn.signals, ch)
conn.signalsLck.Unlock()
}
// RemoveSignal removes the given channel from the list of the registered channels.
func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
conn.signalsLck.Lock()
for i := len(conn.signals) - 1; i >= 0; i-- {
if ch == conn.signals[i] {
copy(conn.signals[i:], conn.signals[i+1:])
conn.signals[len(conn.signals)-1] = nil
conn.signals = conn.signals[:len(conn.signals)-1]
}
}
conn.signalsLck.Unlock()
}
// SupportsUnixFDs returns whether the underlying transport supports passing of
// unix file descriptors. If this is false, method calls containing unix file
// descriptors will return an error and emitted signals containing them will
// not be sent.
func (conn *Conn) SupportsUnixFDs() bool {
return conn.unixFD
}
// Error represents a D-Bus message of type Error.
type Error struct {
Name string
Body []interface{}
}
func NewError(name string, body []interface{}) *Error {
return &Error{name, body}
}
func (e Error) Error() string {
if len(e.Body) >= 1 {
s, ok := e.Body[0].(string)
if ok {
return s
}
}
return e.Name
}
// Signal represents a D-Bus message of type Signal. The name member is given in
// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
type Signal struct {
Sender string
Path ObjectPath
Name string
Body []interface{}
}
// transport is a D-Bus transport.
type transport interface {
// Read and Write raw data (for example, for the authentication protocol).
io.ReadWriteCloser
// Send the initial null byte used for the EXTERNAL mechanism.
SendNullByte() error
// Returns whether this transport supports passing Unix FDs.
SupportsUnixFDs() bool
// Signal the transport that Unix FD passing is enabled for this connection.
EnableUnixFDs()
// Read / send a message, handling things like Unix FDs.
ReadMessage() (*Message, error)
SendMessage(*Message) error
}
var (
transports = make(map[string]func(string) (transport, error))
)
func getTransport(address string) (transport, error) {
var err error
var t transport
addresses := strings.Split(address, ";")
for _, v := range addresses {
i := strings.IndexRune(v, ':')
if i == -1 {
err = errors.New("dbus: invalid bus address (no transport)")
continue
}
f := transports[v[:i]]
if f == nil {
err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
continue
}
t, err = f(v[i+1:])
if err == nil {
return t, nil
}
}
return nil, err
}
// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
// of arbitrary types, containes the values that are obtained from dereferencing
// all elements in vs.
func dereferenceAll(vs []interface{}) []interface{} {
for i := range vs {
v := reflect.ValueOf(vs[i])
v = v.Elem()
vs[i] = v.Interface()
}
return vs
}
// getKey gets a key from a the list of keys. Returns "" on error / not found...
func getKey(s, key string) string {
i := strings.Index(s, key)
if i == -1 {
return ""
}
if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
return ""
}
j := strings.Index(s, ",")
if j == -1 {
j = len(s)
}
return s[i+len(key)+1 : j]
}

21
vendor/github.com/godbus/dbus/conn_darwin.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package dbus
import (
"errors"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
if len(b) == 0 {
return nil, errors.New("dbus: couldn't determine address of session bus")
}
return Dial("unix:path=" + string(b[:len(b)-1]))
}

27
vendor/github.com/godbus/dbus/conn_other.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// +build !darwin
package dbus
import (
"bytes"
"errors"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
cmd := exec.Command("dbus-launch")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
i := bytes.IndexByte(b, '=')
j := bytes.IndexByte(b, '\n')
if i == -1 || j == -1 {
return nil, errors.New("dbus: couldn't determine address of session bus")
}
return Dial(string(b[i+1 : j]))
}

258
vendor/github.com/godbus/dbus/dbus.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
package dbus
import (
"errors"
"reflect"
"strings"
)
var (
byteType = reflect.TypeOf(byte(0))
boolType = reflect.TypeOf(false)
uint8Type = reflect.TypeOf(uint8(0))
int16Type = reflect.TypeOf(int16(0))
uint16Type = reflect.TypeOf(uint16(0))
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
stringType = reflect.TypeOf("")
signatureType = reflect.TypeOf(Signature{""})
objectPathType = reflect.TypeOf(ObjectPath(""))
variantType = reflect.TypeOf(Variant{Signature{""}, nil})
interfacesType = reflect.TypeOf([]interface{}{})
unixFDType = reflect.TypeOf(UnixFD(0))
unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
)
// An InvalidTypeError signals that a value which cannot be represented in the
// D-Bus wire format was passed to a function.
type InvalidTypeError struct {
Type reflect.Type
}
func (e InvalidTypeError) Error() string {
return "dbus: invalid type " + e.Type.String()
}
// Store copies the values contained in src to dest, which must be a slice of
// pointers. It converts slices of interfaces from src to corresponding structs
// in dest. An error is returned if the lengths of src and dest or the types of
// their elements don't match.
func Store(src []interface{}, dest ...interface{}) error {
if len(src) != len(dest) {
return errors.New("dbus.Store: length mismatch")
}
for i := range src {
if err := store(src[i], dest[i]); err != nil {
return err
}
}
return nil
}
func store(src, dest interface{}) error {
if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return nil
} else if hasStruct(dest) {
rv := reflect.ValueOf(dest).Elem()
switch rv.Kind() {
case reflect.Struct:
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
} else {
return errors.New("dbus.Store: type mismatch")
}
}
func hasStruct(v interface{}) bool {
t := reflect.TypeOf(v)
for {
switch t.Kind() {
case reflect.Struct:
return true
case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem()
default:
return false
}
}
}
// An ObjectPath is an object path as defined by the D-Bus spec.
type ObjectPath string
// IsValid returns whether the object path is valid.
func (o ObjectPath) IsValid() bool {
s := string(o)
if len(s) == 0 {
return false
}
if s[0] != '/' {
return false
}
if s[len(s)-1] == '/' && len(s) != 1 {
return false
}
// probably not used, but technically possible
if s == "/" {
return true
}
split := strings.Split(s[1:], "/")
for _, v := range split {
if len(v) == 0 {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
// documentation for more information about Unix file descriptor passsing.
type UnixFD int32
// A UnixFDIndex is the representation of a Unix file descriptor in a message.
type UnixFDIndex uint32
// alignment returns the alignment of values of type t.
func alignment(t reflect.Type) int {
switch t {
case variantType:
return 1
case objectPathType:
return 4
case signatureType:
return 1
case interfacesType: // sometimes used for structs
return 8
}
switch t.Kind() {
case reflect.Uint8:
return 1
case reflect.Uint16, reflect.Int16:
return 2
case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
return 4
case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
return 8
case reflect.Ptr:
return alignment(t.Elem())
}
return 1
}
// isKeyType returns whether t is a valid type for a D-Bus dict.
func isKeyType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
reflect.String:
return true
}
return false
}
// isValidInterface returns whether s is a valid name for an interface.
func isValidInterface(s string) bool {
if len(s) == 0 || len(s) > 255 || s[0] == '.' {
return false
}
elem := strings.Split(s, ".")
if len(elem) < 2 {
return false
}
for _, v := range elem {
if len(v) == 0 {
return false
}
if v[0] >= '0' && v[0] <= '9' {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// isValidMember returns whether s is a valid name for a member.
func isValidMember(s string) bool {
if len(s) == 0 || len(s) > 255 {
return false
}
i := strings.Index(s, ".")
if i != -1 {
return false
}
if s[0] >= '0' && s[0] <= '9' {
return false
}
for _, c := range s {
if !isMemberChar(c) {
return false
}
}
return true
}
func isMemberChar(c rune) bool {
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') || c == '_'
}

228
vendor/github.com/godbus/dbus/decoder.go generated vendored Normal file
View File

@ -0,0 +1,228 @@
package dbus
import (
"encoding/binary"
"io"
"reflect"
)
type decoder struct {
in io.Reader
order binary.ByteOrder
pos int
}
// newDecoder returns a new decoder that reads values from in. The input is
// expected to be in the given byte order.
func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
dec := new(decoder)
dec.in = in
dec.order = order
return dec
}
// align aligns the input to the given boundary and panics on error.
func (dec *decoder) align(n int) {
if dec.pos%n != 0 {
newpos := (dec.pos + n - 1) & ^(n - 1)
empty := make([]byte, newpos-dec.pos)
if _, err := io.ReadFull(dec.in, empty); err != nil {
panic(err)
}
dec.pos = newpos
}
}
// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
func (dec *decoder) binread(v interface{}) {
if err := binary.Read(dec.in, dec.order, v); err != nil {
panic(err)
}
}
func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
defer func() {
var ok bool
v := recover()
if err, ok = v.(error); ok {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = FormatError("unexpected EOF")
}
}
}()
vs = make([]interface{}, 0)
s := sig.str
for s != "" {
err, rem := validSingle(s, 0)
if err != nil {
return nil, err
}
v := dec.decode(s[:len(s)-len(rem)], 0)
vs = append(vs, v)
s = rem
}
return vs, nil
}
func (dec *decoder) decode(s string, depth int) interface{} {
dec.align(alignment(typeFor(s)))
switch s[0] {
case 'y':
var b [1]byte
if _, err := dec.in.Read(b[:]); err != nil {
panic(err)
}
dec.pos++
return b[0]
case 'b':
i := dec.decode("u", depth).(uint32)
switch {
case i == 0:
return false
case i == 1:
return true
default:
panic(FormatError("invalid value for boolean"))
}
case 'n':
var i int16
dec.binread(&i)
dec.pos += 2
return i
case 'i':
var i int32
dec.binread(&i)
dec.pos += 4
return i
case 'x':
var i int64
dec.binread(&i)
dec.pos += 8
return i
case 'q':
var i uint16
dec.binread(&i)
dec.pos += 2
return i
case 'u':
var i uint32
dec.binread(&i)
dec.pos += 4
return i
case 't':
var i uint64
dec.binread(&i)
dec.pos += 8
return i
case 'd':
var f float64
dec.binread(&f)
dec.pos += 8
return f
case 's':
length := dec.decode("u", depth).(uint32)
b := make([]byte, int(length)+1)
if _, err := io.ReadFull(dec.in, b); err != nil {
panic(err)
}
dec.pos += int(length) + 1
return string(b[:len(b)-1])
case 'o':
return ObjectPath(dec.decode("s", depth).(string))
case 'g':
length := dec.decode("y", depth).(byte)
b := make([]byte, int(length)+1)
if _, err := io.ReadFull(dec.in, b); err != nil {
panic(err)
}
dec.pos += int(length) + 1
sig, err := ParseSignature(string(b[:len(b)-1]))
if err != nil {
panic(err)
}
return sig
case 'v':
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
var variant Variant
sig := dec.decode("g", depth).(Signature)
if len(sig.str) == 0 {
panic(FormatError("variant signature is empty"))
}
err, rem := validSingle(sig.str, 0)
if err != nil {
panic(err)
}
if rem != "" {
panic(FormatError("variant signature has multiple types"))
}
variant.sig = sig
variant.value = dec.decode(sig.str, depth+1)
return variant
case 'h':
return UnixFDIndex(dec.decode("u", depth).(uint32))
case 'a':
if len(s) > 1 && s[1] == '{' {
ksig := s[2:3]
vsig := s[3 : len(s)-1]
v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
if depth >= 63 {
panic(FormatError("input exceeds container depth limit"))
}
length := dec.decode("u", depth).(uint32)
// Even for empty maps, the correct padding must be included
dec.align(8)
spos := dec.pos
for dec.pos < spos+int(length) {
dec.align(8)
if !isKeyType(v.Type().Key()) {
panic(InvalidTypeError{v.Type()})
}
kv := dec.decode(ksig, depth+2)
vv := dec.decode(vsig, depth+2)
v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
}
return v.Interface()
}
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
length := dec.decode("u", depth).(uint32)
v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
// Even for empty arrays, the correct padding must be included
dec.align(alignment(typeFor(s[1:])))
spos := dec.pos
for dec.pos < spos+int(length) {
ev := dec.decode(s[1:], depth+1)
v = reflect.Append(v, reflect.ValueOf(ev))
}
return v.Interface()
case '(':
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
dec.align(8)
v := make([]interface{}, 0)
s = s[1 : len(s)-1]
for s != "" {
err, rem := validSingle(s, 0)
if err != nil {
panic(err)
}
ev := dec.decode(s[:len(s)-len(rem)], depth+1)
v = append(v, ev)
s = rem
}
return v
default:
panic(SignatureError{Sig: s})
}
}
// A FormatError is an error in the wire format.
type FormatError string
func (e FormatError) Error() string {
return "dbus: wire format error: " + string(e)
}

63
vendor/github.com/godbus/dbus/doc.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
/*
Package dbus implements bindings to the D-Bus message bus system.
To use the message bus API, you first need to connect to a bus (usually the
session or system bus). The acquired connection then can be used to call methods
on remote objects and emit or receive signals. Using the Export method, you can
arrange D-Bus methods calls to be directly translated to method calls on a Go
value.
Conversion Rules
For outgoing messages, Go types are automatically converted to the
corresponding D-Bus types. The following types are directly encoded as their
respective D-Bus equivalents:
Go type | D-Bus type
------------+-----------
byte | BYTE
bool | BOOLEAN
int16 | INT16
uint16 | UINT16
int32 | INT32
uint32 | UINT32
int64 | INT64
uint64 | UINT64
float64 | DOUBLE
string | STRING
ObjectPath | OBJECT_PATH
Signature | SIGNATURE
Variant | VARIANT
UnixFDIndex | UNIX_FD
Slices and arrays encode as ARRAYs of their element type.
Maps encode as DICTs, provided that their key type can be used as a key for
a DICT.
Structs other than Variant and Signature encode as a STRUCT containing their
exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
be skipped.
Pointers encode as the value they're pointed to.
Trying to encode any other type or a slice, map or struct containing an
unsupported type will result in an InvalidTypeError.
For incoming messages, the inverse of these rules are used, with the exception
of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
containing the struct fields in the correct order. The Store function can be
used to convert such values to Go structs.
Unix FD passing
Handling Unix file descriptors deserves special mention. To use them, you should
first check that they are supported on a connection by calling SupportsUnixFDs.
If it returns true, all method of Connection will translate messages containing
UnixFD's to messages that are accompanied by the given file descriptors with the
UnixFD values being substituted by the correct indices. Similarily, the indices
of incoming messages are automatically resolved. It shouldn't be necessary to use
UnixFDIndex.
*/
package dbus

208
vendor/github.com/godbus/dbus/encoder.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
package dbus
import (
"bytes"
"encoding/binary"
"io"
"reflect"
)
// An encoder encodes values to the D-Bus wire format.
type encoder struct {
out io.Writer
order binary.ByteOrder
pos int
}
// NewEncoder returns a new encoder that writes to out in the given byte order.
func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
return newEncoderAtOffset(out, 0, order)
}
// newEncoderAtOffset returns a new encoder that writes to out in the given
// byte order. Specify the offset to initialize pos for proper alignment
// computation.
func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
enc := new(encoder)
enc.out = out
enc.order = order
enc.pos = offset
return enc
}
// Aligns the next output to be on a multiple of n. Panics on write errors.
func (enc *encoder) align(n int) {
pad := enc.padding(0, n)
if pad > 0 {
empty := make([]byte, pad)
if _, err := enc.out.Write(empty); err != nil {
panic(err)
}
enc.pos += pad
}
}
// pad returns the number of bytes of padding, based on current position and additional offset.
// and alignment.
func (enc *encoder) padding(offset, algn int) int {
abs := enc.pos + offset
if abs%algn != 0 {
newabs := (abs + algn - 1) & ^(algn - 1)
return newabs - abs
}
return 0
}
// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
func (enc *encoder) binwrite(v interface{}) {
if err := binary.Write(enc.out, enc.order, v); err != nil {
panic(err)
}
}
// Encode encodes the given values to the underyling reader. All written values
// are aligned properly as required by the D-Bus spec.
func (enc *encoder) Encode(vs ...interface{}) (err error) {
defer func() {
err, _ = recover().(error)
}()
for _, v := range vs {
enc.encode(reflect.ValueOf(v), 0)
}
return nil
}
// encode encodes the given value to the writer and panics on error. depth holds
// the depth of the container nesting.
func (enc *encoder) encode(v reflect.Value, depth int) {
enc.align(alignment(v.Type()))
switch v.Kind() {
case reflect.Uint8:
var b [1]byte
b[0] = byte(v.Uint())
if _, err := enc.out.Write(b[:]); err != nil {
panic(err)
}
enc.pos++
case reflect.Bool:
if v.Bool() {
enc.encode(reflect.ValueOf(uint32(1)), depth)
} else {
enc.encode(reflect.ValueOf(uint32(0)), depth)
}
case reflect.Int16:
enc.binwrite(int16(v.Int()))
enc.pos += 2
case reflect.Uint16:
enc.binwrite(uint16(v.Uint()))
enc.pos += 2
case reflect.Int32:
enc.binwrite(int32(v.Int()))
enc.pos += 4
case reflect.Uint32:
enc.binwrite(uint32(v.Uint()))
enc.pos += 4
case reflect.Int64:
enc.binwrite(v.Int())
enc.pos += 8
case reflect.Uint64:
enc.binwrite(v.Uint())
enc.pos += 8
case reflect.Float64:
enc.binwrite(v.Float())
enc.pos += 8
case reflect.String:
enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
b := make([]byte, v.Len()+1)
copy(b, v.String())
b[len(b)-1] = 0
n, err := enc.out.Write(b)
if err != nil {
panic(err)
}
enc.pos += n
case reflect.Ptr:
enc.encode(v.Elem(), depth)
case reflect.Slice, reflect.Array:
if depth >= 64 {
panic(FormatError("input exceeds container depth limit"))
}
// Lookahead offset: 4 bytes for uint32 length (with alignment),
// plus alignment for elements.
n := enc.padding(0, 4) + 4
offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
var buf bytes.Buffer
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
for i := 0; i < v.Len(); i++ {
bufenc.encode(v.Index(i), depth+1)
}
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
length := buf.Len()
enc.align(alignment(v.Type().Elem()))
if _, err := buf.WriteTo(enc.out); err != nil {
panic(err)
}
enc.pos += length
case reflect.Struct:
if depth >= 64 && v.Type() != signatureType {
panic(FormatError("input exceeds container depth limit"))
}
switch t := v.Type(); t {
case signatureType:
str := v.Field(0)
enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
b := make([]byte, str.Len()+1)
copy(b, str.String())
b[len(b)-1] = 0
n, err := enc.out.Write(b)
if err != nil {
panic(err)
}
enc.pos += n
case variantType:
variant := v.Interface().(Variant)
enc.encode(reflect.ValueOf(variant.sig), depth+1)
enc.encode(reflect.ValueOf(variant.value), depth+1)
default:
for i := 0; i < v.Type().NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
enc.encode(v.Field(i), depth+1)
}
}
}
case reflect.Map:
// Maps are arrays of structures, so they actually increase the depth by
// 2.
if depth >= 63 {
panic(FormatError("input exceeds container depth limit"))
}
if !isKeyType(v.Type().Key()) {
panic(InvalidTypeError{v.Type()})
}
keys := v.MapKeys()
// Lookahead offset: 4 bytes for uint32 length (with alignment),
// plus 8-byte alignment
n := enc.padding(0, 4) + 4
offset := enc.pos + n + enc.padding(n, 8)
var buf bytes.Buffer
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
for _, k := range keys {
bufenc.align(8)
bufenc.encode(k, depth+2)
bufenc.encode(v.MapIndex(k), depth+2)
}
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
length := buf.Len()
enc.align(8)
if _, err := buf.WriteTo(enc.out); err != nil {
panic(err)
}
enc.pos += length
default:
panic(InvalidTypeError{v.Type()})
}
}

411
vendor/github.com/godbus/dbus/export.go generated vendored Normal file
View File

@ -0,0 +1,411 @@
package dbus
import (
"errors"
"fmt"
"reflect"
"strings"
)
var (
errmsgInvalidArg = Error{
"org.freedesktop.DBus.Error.InvalidArgs",
[]interface{}{"Invalid type / number of args"},
}
errmsgNoObject = Error{
"org.freedesktop.DBus.Error.NoSuchObject",
[]interface{}{"No such object"},
}
errmsgUnknownMethod = Error{
"org.freedesktop.DBus.Error.UnknownMethod",
[]interface{}{"Unknown / invalid method"},
}
)
// exportWithMapping represents an exported struct along with a method name
// mapping to allow for exporting lower-case methods, etc.
type exportWithMapping struct {
export interface{}
// Method name mapping; key -> struct method, value -> dbus method.
mapping map[string]string
// Whether or not this export is for the entire subtree
includeSubtree bool
}
// Sender is a type which can be used in exported methods to receive the message
// sender.
type Sender string
func exportedMethod(export exportWithMapping, name string) reflect.Value {
if export.export == nil {
return reflect.Value{}
}
// If a mapping was included in the export, check the map to see if we
// should be looking for a different method in the export.
if export.mapping != nil {
for key, value := range export.mapping {
if value == name {
name = key
break
}
// Catch the case where a method is aliased but the client is calling
// the original, e.g. the "Foo" method was exported mapped to
// "foo," and dbus client called the original "Foo."
if key == name {
return reflect.Value{}
}
}
}
value := reflect.ValueOf(export.export)
m := value.MethodByName(name)
// Catch the case of attempting to call an unexported method
method, ok := value.Type().MethodByName(name)
if !m.IsValid() || !ok || method.PkgPath != "" {
return reflect.Value{}
}
t := m.Type()
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
return reflect.Value{}
}
return m
}
// searchHandlers will look through all registered handlers looking for one
// to handle the given path. If a verbatim one isn't found, it will check for
// a subtree registration for the path as well.
func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
conn.handlersLck.RLock()
defer conn.handlersLck.RUnlock()
handlers, ok := conn.handlers[path]
if ok {
return handlers, ok
}
// If handlers weren't found for this exact path, look for a matching subtree
// registration
handlers = make(map[string]exportWithMapping)
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
var subtreeHandlers map[string]exportWithMapping
subtreeHandlers, ok = conn.handlers[path]
if ok {
for iface, handler := range subtreeHandlers {
// Only include this handler if it registered for the subtree
if handler.includeSubtree {
handlers[iface] = handler
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
return handlers, ok
}
// handleCall handles the given method call (i.e. looks if it's one of the
// pre-implemented ones and searches for a corresponding handler if not).
func (conn *Conn) handleCall(msg *Message) {
name := msg.Headers[FieldMember].value.(string)
path := msg.Headers[FieldPath].value.(ObjectPath)
ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
sender, hasSender := msg.Headers[FieldSender].value.(string)
serial := msg.serial
if ifaceName == "org.freedesktop.DBus.Peer" {
switch name {
case "Ping":
conn.sendReply(sender, serial)
case "GetMachineId":
conn.sendReply(sender, serial, conn.uuid)
default:
conn.sendError(errmsgUnknownMethod, sender, serial)
}
return
}
if len(name) == 0 {
conn.sendError(errmsgUnknownMethod, sender, serial)
}
// Find the exported handler (if any) for this path
handlers, ok := conn.searchHandlers(path)
if !ok {
conn.sendError(errmsgNoObject, sender, serial)
return
}
var m reflect.Value
if hasIface {
iface := handlers[ifaceName]
m = exportedMethod(iface, name)
} else {
for _, v := range handlers {
m = exportedMethod(v, name)
if m.IsValid() {
break
}
}
}
if !m.IsValid() {
conn.sendError(errmsgUnknownMethod, sender, serial)
return
}
t := m.Type()
vs := msg.Body
pointers := make([]interface{}, t.NumIn())
decode := make([]interface{}, 0, len(vs))
for i := 0; i < t.NumIn(); i++ {
tp := t.In(i)
val := reflect.New(tp)
pointers[i] = val.Interface()
if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
val.Elem().SetString(sender)
} else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
val.Elem().Set(reflect.ValueOf(*msg))
} else {
decode = append(decode, pointers[i])
}
}
if len(decode) != len(vs) {
conn.sendError(errmsgInvalidArg, sender, serial)
return
}
if err := Store(vs, decode...); err != nil {
conn.sendError(errmsgInvalidArg, sender, serial)
return
}
// Extract parameters
params := make([]reflect.Value, len(pointers))
for i := 0; i < len(pointers); i++ {
params[i] = reflect.ValueOf(pointers[i]).Elem()
}
// Call method
ret := m.Call(params)
if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
conn.sendError(*em, sender, serial)
return
}
if msg.Flags&FlagNoReplyExpected == 0 {
reply := new(Message)
reply.Type = TypeMethodReply
reply.serial = conn.getSerial()
reply.Headers = make(map[HeaderField]Variant)
if hasSender {
reply.Headers[FieldDestination] = msg.Headers[FieldSender]
}
reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
reply.Body = make([]interface{}, len(ret)-1)
for i := 0; i < len(ret)-1; i++ {
reply.Body[i] = ret[i].Interface()
}
if len(ret) != 1 {
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- reply
}
conn.outLck.RUnlock()
}
}
// Emit emits the given signal on the message bus. The name parameter must be
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
if !path.IsValid() {
return errors.New("dbus: invalid object path")
}
i := strings.LastIndex(name, ".")
if i == -1 {
return errors.New("dbus: invalid method name")
}
iface := name[:i]
member := name[i+1:]
if !isValidMember(member) {
return errors.New("dbus: invalid method name")
}
if !isValidInterface(iface) {
return errors.New("dbus: invalid interface name")
}
msg := new(Message)
msg.Type = TypeSignal
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
msg.Headers[FieldInterface] = MakeVariant(iface)
msg.Headers[FieldMember] = MakeVariant(member)
msg.Headers[FieldPath] = MakeVariant(path)
msg.Body = values
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.outLck.RLock()
defer conn.outLck.RUnlock()
if conn.closed {
return ErrClosed
}
conn.out <- msg
return nil
}
// Export registers the given value to be exported as an object on the
// message bus.
//
// If a method call on the given path and interface is received, an exported
// method with the same name is called with v as the receiver if the
// parameters match and the last return value is of type *Error. If this
// *Error is not nil, it is sent back to the caller as an error.
// Otherwise, a method reply is sent with the other return values as its body.
//
// Any parameters with the special type Sender are set to the sender of the
// dbus message when the method is called. Parameters of this type do not
// contribute to the dbus signature of the method (i.e. the method is exposed
// as if the parameters of type Sender were not there).
//
// Similarly, any parameters with the type Message are set to the raw message
// received on the bus. Again, parameters of this type do not contribute to the
// dbus signature of the method.
//
// Every method call is executed in a new goroutine, so the method may be called
// in multiple goroutines at once.
//
// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
// handled for every object.
//
// Passing nil as the first parameter will cause conn to cease handling calls on
// the given combination of path and interface.
//
// Export returns an error if path is not a valid path name.
func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
return conn.ExportWithMap(v, nil, path, iface)
}
// ExportWithMap works exactly like Export but provides the ability to remap
// method names (e.g. export a lower-case method).
//
// The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus.
func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, false)
}
// ExportSubtree works exactly like Export but registers the given value for
// an entire subtree rather under the root path provided.
//
// In order to make this useful, one parameter in each of the value's exported
// methods should be a Message, in which case it will contain the raw message
// (allowing one to get access to the path that caused the method to be called).
//
// Note that more specific export paths take precedence over less specific. For
// example, a method call using the ObjectPath /foo/bar/baz will call a method
// exported on /foo/bar before a method exported on /foo.
func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
return conn.ExportSubtreeWithMap(v, nil, path, iface)
}
// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
// ability to remap method names (e.g. export a lower-case method).
//
// The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus.
func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, true)
}
// exportWithMap is the worker function for all exports/registrations.
func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error {
if !path.IsValid() {
return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
}
conn.handlersLck.Lock()
defer conn.handlersLck.Unlock()
// Remove a previous export if the interface is nil
if v == nil {
if _, ok := conn.handlers[path]; ok {
delete(conn.handlers[path], iface)
if len(conn.handlers[path]) == 0 {
delete(conn.handlers, path)
}
}
return nil
}
// If this is the first handler for this path, make a new map to hold all
// handlers for this path.
if _, ok := conn.handlers[path]; !ok {
conn.handlers[path] = make(map[string]exportWithMapping)
}
// Finally, save this handler
conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree}
return nil
}
// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
var r uint32
err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
if err != nil {
return 0, err
}
return ReleaseNameReply(r), nil
}
// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
var r uint32
err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
if err != nil {
return 0, err
}
return RequestNameReply(r), nil
}
// ReleaseNameReply is the reply to a ReleaseName call.
type ReleaseNameReply uint32
const (
ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
ReleaseNameReplyNonExistent
ReleaseNameReplyNotOwner
)
// RequestNameFlags represents the possible flags for a RequestName call.
type RequestNameFlags uint32
const (
NameFlagAllowReplacement RequestNameFlags = 1 << iota
NameFlagReplaceExisting
NameFlagDoNotQueue
)
// RequestNameReply is the reply to a RequestName call.
type RequestNameReply uint32
const (
RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
RequestNameReplyInQueue
RequestNameReplyExists
RequestNameReplyAlreadyOwner
)

28
vendor/github.com/godbus/dbus/homedir.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package dbus
import (
"os"
"sync"
)
var (
homeDir string
homeDirLock sync.Mutex
)
func getHomeDir() string {
homeDirLock.Lock()
defer homeDirLock.Unlock()
if homeDir != "" {
return homeDir
}
homeDir = os.Getenv("HOME")
if homeDir != "" {
return homeDir
}
homeDir = lookupHomeDir()
return homeDir
}

15
vendor/github.com/godbus/dbus/homedir_dynamic.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build !static_build
package dbus
import (
"os/user"
)
func lookupHomeDir() string {
u, err := user.Current()
if err != nil {
return "/"
}
return u.HomeDir
}

45
vendor/github.com/godbus/dbus/homedir_static.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
// +build static_build
package dbus
import (
"bufio"
"os"
"strconv"
"strings"
)
func lookupHomeDir() string {
myUid := os.Getuid()
f, err := os.Open("/etc/passwd")
if err != nil {
return "/"
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
break
}
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
parts := strings.Split(line, ":")
if len(parts) >= 6 {
uid, err := strconv.Atoi(parts[2])
if err == nil && uid == myUid {
return parts[5]
}
}
}
// Default to / if we can't get a better value
return "/"
}

346
vendor/github.com/godbus/dbus/message.go generated vendored Normal file
View File

@ -0,0 +1,346 @@
package dbus
import (
"bytes"
"encoding/binary"
"errors"
"io"
"reflect"
"strconv"
)
const protoVersion byte = 1
// Flags represents the possible flags of a D-Bus message.
type Flags byte
const (
// FlagNoReplyExpected signals that the message is not expected to generate
// a reply. If this flag is set on outgoing messages, any possible reply
// will be discarded.
FlagNoReplyExpected Flags = 1 << iota
// FlagNoAutoStart signals that the message bus should not automatically
// start an application when handling this message.
FlagNoAutoStart
)
// Type represents the possible types of a D-Bus message.
type Type byte
const (
TypeMethodCall Type = 1 + iota
TypeMethodReply
TypeError
TypeSignal
typeMax
)
func (t Type) String() string {
switch t {
case TypeMethodCall:
return "method call"
case TypeMethodReply:
return "reply"
case TypeError:
return "error"
case TypeSignal:
return "signal"
}
return "invalid"
}
// HeaderField represents the possible byte codes for the headers
// of a D-Bus message.
type HeaderField byte
const (
FieldPath HeaderField = 1 + iota
FieldInterface
FieldMember
FieldErrorName
FieldReplySerial
FieldDestination
FieldSender
FieldSignature
FieldUnixFDs
fieldMax
)
// An InvalidMessageError describes the reason why a D-Bus message is regarded as
// invalid.
type InvalidMessageError string
func (e InvalidMessageError) Error() string {
return "dbus: invalid message: " + string(e)
}
// fieldType are the types of the various header fields.
var fieldTypes = [fieldMax]reflect.Type{
FieldPath: objectPathType,
FieldInterface: stringType,
FieldMember: stringType,
FieldErrorName: stringType,
FieldReplySerial: uint32Type,
FieldDestination: stringType,
FieldSender: stringType,
FieldSignature: signatureType,
FieldUnixFDs: uint32Type,
}
// requiredFields lists the header fields that are required by the different
// message types.
var requiredFields = [typeMax][]HeaderField{
TypeMethodCall: {FieldPath, FieldMember},
TypeMethodReply: {FieldReplySerial},
TypeError: {FieldErrorName, FieldReplySerial},
TypeSignal: {FieldPath, FieldInterface, FieldMember},
}
// Message represents a single D-Bus message.
type Message struct {
Type
Flags
Headers map[HeaderField]Variant
Body []interface{}
serial uint32
}
type header struct {
Field byte
Variant
}
// DecodeMessage tries to decode a single message in the D-Bus wire format
// from the given reader. The byte order is figured out from the first byte.
// The possibly returned error can be an error of the underlying reader, an
// InvalidMessageError or a FormatError.
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
var order binary.ByteOrder
var hlength, length uint32
var typ, flags, proto byte
var headers []header
b := make([]byte, 1)
_, err = rd.Read(b)
if err != nil {
return
}
switch b[0] {
case 'l':
order = binary.LittleEndian
case 'B':
order = binary.BigEndian
default:
return nil, InvalidMessageError("invalid byte order")
}
dec := newDecoder(rd, order)
dec.pos = 1
msg = new(Message)
vs, err := dec.Decode(Signature{"yyyuu"})
if err != nil {
return nil, err
}
if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
return nil, err
}
msg.Type = Type(typ)
msg.Flags = Flags(flags)
// get the header length separately because we need it later
b = make([]byte, 4)
_, err = io.ReadFull(rd, b)
if err != nil {
return nil, err
}
binary.Read(bytes.NewBuffer(b), order, &hlength)
if hlength+length+16 > 1<<27 {
return nil, InvalidMessageError("message is too long")
}
dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
dec.pos = 12
vs, err = dec.Decode(Signature{"a(yv)"})
if err != nil {
return nil, err
}
if err = Store(vs, &headers); err != nil {
return nil, err
}
msg.Headers = make(map[HeaderField]Variant)
for _, v := range headers {
msg.Headers[HeaderField(v.Field)] = v.Variant
}
dec.align(8)
body := make([]byte, int(length))
if length != 0 {
_, err := io.ReadFull(rd, body)
if err != nil {
return nil, err
}
}
if err = msg.IsValid(); err != nil {
return nil, err
}
sig, _ := msg.Headers[FieldSignature].value.(Signature)
if sig.str != "" {
buf := bytes.NewBuffer(body)
dec = newDecoder(buf, order)
vs, err := dec.Decode(sig)
if err != nil {
return nil, err
}
msg.Body = vs
}
return
}
// EncodeTo encodes and sends a message to the given writer. The byte order must
// be either binary.LittleEndian or binary.BigEndian. If the message is not
// valid or an error occurs when writing, an error is returned.
func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
if err := msg.IsValid(); err != nil {
return err
}
var vs [7]interface{}
switch order {
case binary.LittleEndian:
vs[0] = byte('l')
case binary.BigEndian:
vs[0] = byte('B')
default:
return errors.New("dbus: invalid byte order")
}
body := new(bytes.Buffer)
enc := newEncoder(body, order)
if len(msg.Body) != 0 {
enc.Encode(msg.Body...)
}
vs[1] = msg.Type
vs[2] = msg.Flags
vs[3] = protoVersion
vs[4] = uint32(len(body.Bytes()))
vs[5] = msg.serial
headers := make([]header, 0, len(msg.Headers))
for k, v := range msg.Headers {
headers = append(headers, header{byte(k), v})
}
vs[6] = headers
var buf bytes.Buffer
enc = newEncoder(&buf, order)
enc.Encode(vs[:]...)
enc.align(8)
body.WriteTo(&buf)
if buf.Len() > 1<<27 {
return InvalidMessageError("message is too long")
}
if _, err := buf.WriteTo(out); err != nil {
return err
}
return nil
}
// IsValid checks whether msg is a valid message and returns an
// InvalidMessageError if it is not.
func (msg *Message) IsValid() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
return InvalidMessageError("invalid flags")
}
if msg.Type == 0 || msg.Type >= typeMax {
return InvalidMessageError("invalid message type")
}
for k, v := range msg.Headers {
if k == 0 || k >= fieldMax {
return InvalidMessageError("invalid header")
}
if reflect.TypeOf(v.value) != fieldTypes[k] {
return InvalidMessageError("invalid type of header field")
}
}
for _, v := range requiredFields[msg.Type] {
if _, ok := msg.Headers[v]; !ok {
return InvalidMessageError("missing required header")
}
}
if path, ok := msg.Headers[FieldPath]; ok {
if !path.value.(ObjectPath).IsValid() {
return InvalidMessageError("invalid path name")
}
}
if iface, ok := msg.Headers[FieldInterface]; ok {
if !isValidInterface(iface.value.(string)) {
return InvalidMessageError("invalid interface name")
}
}
if member, ok := msg.Headers[FieldMember]; ok {
if !isValidMember(member.value.(string)) {
return InvalidMessageError("invalid member name")
}
}
if errname, ok := msg.Headers[FieldErrorName]; ok {
if !isValidInterface(errname.value.(string)) {
return InvalidMessageError("invalid error name")
}
}
if len(msg.Body) != 0 {
if _, ok := msg.Headers[FieldSignature]; !ok {
return InvalidMessageError("missing signature")
}
}
return nil
}
// Serial returns the message's serial number. The returned value is only valid
// for messages received by eavesdropping.
func (msg *Message) Serial() uint32 {
return msg.serial
}
// String returns a string representation of a message similar to the format of
// dbus-monitor.
func (msg *Message) String() string {
if err := msg.IsValid(); err != nil {
return "<invalid>"
}
s := msg.Type.String()
if v, ok := msg.Headers[FieldSender]; ok {
s += " from " + v.value.(string)
}
if v, ok := msg.Headers[FieldDestination]; ok {
s += " to " + v.value.(string)
}
s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
if v, ok := msg.Headers[FieldReplySerial]; ok {
s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
}
if v, ok := msg.Headers[FieldUnixFDs]; ok {
s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
}
if v, ok := msg.Headers[FieldPath]; ok {
s += " path " + string(v.value.(ObjectPath))
}
if v, ok := msg.Headers[FieldInterface]; ok {
s += " interface " + v.value.(string)
}
if v, ok := msg.Headers[FieldErrorName]; ok {
s += " error " + v.value.(string)
}
if v, ok := msg.Headers[FieldMember]; ok {
s += " member " + v.value.(string)
}
if len(msg.Body) != 0 {
s += "\n"
}
for i, v := range msg.Body {
s += " " + MakeVariant(v).String()
if i != len(msg.Body)-1 {
s += "\n"
}
}
return s
}

136
vendor/github.com/godbus/dbus/object.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
package dbus
import (
"errors"
"strings"
)
// BusObject is the interface of a remote object on which methods can be
// invoked.
type BusObject interface {
Call(method string, flags Flags, args ...interface{}) *Call
Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
GetProperty(p string) (Variant, error)
Destination() string
Path() ObjectPath
}
// Object represents a remote object on which methods can be invoked.
type Object struct {
conn *Conn
dest string
path ObjectPath
}
// Call calls a method with (*Object).Go and waits for its reply.
func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
}
// AddMatchSignal subscribes BusObject to signals from specified interface and
// method (member).
func (o *Object) AddMatchSignal(iface, member string) *Call {
return o.Call(
"org.freedesktop.DBus.AddMatch",
0,
"type='signal',interface='"+iface+"',member='"+member+"'",
)
}
// Go calls a method with the given arguments asynchronously. It returns a
// Call structure representing this method call. The passed channel will
// return the same value once the call is done. If ch is nil, a new channel
// will be allocated. Otherwise, ch has to be buffered or Go will panic.
//
// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
// is returned of which only the Err member is valid.
//
// If the method parameter contains a dot ('.'), the part before the last dot
// specifies the interface on which the method is called.
func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
iface := ""
i := strings.LastIndex(method, ".")
if i != -1 {
iface = method[:i]
}
method = method[i+1:]
msg := new(Message)
msg.Type = TypeMethodCall
msg.serial = o.conn.getSerial()
msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
msg.Headers = make(map[HeaderField]Variant)
msg.Headers[FieldPath] = MakeVariant(o.path)
msg.Headers[FieldDestination] = MakeVariant(o.dest)
msg.Headers[FieldMember] = MakeVariant(method)
if iface != "" {
msg.Headers[FieldInterface] = MakeVariant(iface)
}
msg.Body = args
if len(args) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
}
if msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 10)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Object).Go")
}
call := &Call{
Destination: o.dest,
Path: o.path,
Method: method,
Args: args,
Done: ch,
}
o.conn.callsLck.Lock()
o.conn.calls[msg.serial] = call
o.conn.callsLck.Unlock()
o.conn.outLck.RLock()
if o.conn.closed {
call.Err = ErrClosed
call.Done <- call
} else {
o.conn.out <- msg
}
o.conn.outLck.RUnlock()
return call
}
o.conn.outLck.RLock()
defer o.conn.outLck.RUnlock()
if o.conn.closed {
return &Call{Err: ErrClosed}
}
o.conn.out <- msg
return &Call{Err: nil}
}
// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
// object. The property name must be given in interface.member notation.
func (o *Object) GetProperty(p string) (Variant, error) {
idx := strings.LastIndex(p, ".")
if idx == -1 || idx+1 == len(p) {
return Variant{}, errors.New("dbus: invalid property " + p)
}
iface := p[:idx]
prop := p[idx+1:]
result := Variant{}
err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
if err != nil {
return Variant{}, err
}
return result, nil
}
// Destination returns the destination that calls on o are sent to.
func (o *Object) Destination() string {
return o.dest
}
// Path returns the path that calls on o are sent to.
func (o *Object) Path() ObjectPath {
return o.path
}

257
vendor/github.com/godbus/dbus/sig.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package dbus
import (
"fmt"
"reflect"
"strings"
)
var sigToType = map[byte]reflect.Type{
'y': byteType,
'b': boolType,
'n': int16Type,
'q': uint16Type,
'i': int32Type,
'u': uint32Type,
'x': int64Type,
't': uint64Type,
'd': float64Type,
's': stringType,
'g': signatureType,
'o': objectPathType,
'v': variantType,
'h': unixFDIndexType,
}
// Signature represents a correct type signature as specified by the D-Bus
// specification. The zero value represents the empty signature, "".
type Signature struct {
str string
}
// SignatureOf returns the concatenation of all the signatures of the given
// values. It panics if one of them is not representable in D-Bus.
func SignatureOf(vs ...interface{}) Signature {
var s string
for _, v := range vs {
s += getSignature(reflect.TypeOf(v))
}
return Signature{s}
}
// SignatureOfType returns the signature of the given type. It panics if the
// type is not representable in D-Bus.
func SignatureOfType(t reflect.Type) Signature {
return Signature{getSignature(t)}
}
// getSignature returns the signature of the given type and panics on unknown types.
func getSignature(t reflect.Type) string {
// handle simple types first
switch t.Kind() {
case reflect.Uint8:
return "y"
case reflect.Bool:
return "b"
case reflect.Int16:
return "n"
case reflect.Uint16:
return "q"
case reflect.Int32:
if t == unixFDType {
return "h"
}
return "i"
case reflect.Uint32:
if t == unixFDIndexType {
return "h"
}
return "u"
case reflect.Int64:
return "x"
case reflect.Uint64:
return "t"
case reflect.Float64:
return "d"
case reflect.Ptr:
return getSignature(t.Elem())
case reflect.String:
if t == objectPathType {
return "o"
}
return "s"
case reflect.Struct:
if t == variantType {
return "v"
} else if t == signatureType {
return "g"
}
var s string
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
s += getSignature(t.Field(i).Type)
}
}
return "(" + s + ")"
case reflect.Array, reflect.Slice:
return "a" + getSignature(t.Elem())
case reflect.Map:
if !isKeyType(t.Key()) {
panic(InvalidTypeError{t})
}
return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
}
panic(InvalidTypeError{t})
}
// ParseSignature returns the signature represented by this string, or a
// SignatureError if the string is not a valid signature.
func ParseSignature(s string) (sig Signature, err error) {
if len(s) == 0 {
return
}
if len(s) > 255 {
return Signature{""}, SignatureError{s, "too long"}
}
sig.str = s
for err == nil && len(s) != 0 {
err, s = validSingle(s, 0)
}
if err != nil {
sig = Signature{""}
}
return
}
// ParseSignatureMust behaves like ParseSignature, except that it panics if s
// is not valid.
func ParseSignatureMust(s string) Signature {
sig, err := ParseSignature(s)
if err != nil {
panic(err)
}
return sig
}
// Empty retruns whether the signature is the empty signature.
func (s Signature) Empty() bool {
return s.str == ""
}
// Single returns whether the signature represents a single, complete type.
func (s Signature) Single() bool {
err, r := validSingle(s.str, 0)
return err != nil && r == ""
}
// String returns the signature's string representation.
func (s Signature) String() string {
return s.str
}
// A SignatureError indicates that a signature passed to a function or received
// on a connection is not a valid signature.
type SignatureError struct {
Sig string
Reason string
}
func (e SignatureError) Error() string {
return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
}
// Try to read a single type from this string. If it was successfull, err is nil
// and rem is the remaining unparsed part. Otherwise, err is a non-nil
// SignatureError and rem is "". depth is the current recursion depth which may
// not be greater than 64 and should be given as 0 on the first call.
func validSingle(s string, depth int) (err error, rem string) {
if s == "" {
return SignatureError{Sig: s, Reason: "empty signature"}, ""
}
if depth > 64 {
return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
}
switch s[0] {
case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
return nil, s[1:]
case 'a':
if len(s) > 1 && s[1] == '{' {
i := findMatching(s[1:], '{', '}')
if i == -1 {
return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
}
i++
rem = s[i+1:]
s = s[2:i]
if err, _ = validSingle(s[:1], depth+1); err != nil {
return err, ""
}
err, nr := validSingle(s[1:], depth+1)
if err != nil {
return err, ""
}
if nr != "" {
return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
}
return nil, rem
}
return validSingle(s[1:], depth+1)
case '(':
i := findMatching(s, '(', ')')
if i == -1 {
return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
}
rem = s[i+1:]
s = s[1:i]
for err == nil && s != "" {
err, s = validSingle(s, depth+1)
}
if err != nil {
rem = ""
}
return
}
return SignatureError{Sig: s, Reason: "invalid type character"}, ""
}
func findMatching(s string, left, right rune) int {
n := 0
for i, v := range s {
if v == left {
n++
} else if v == right {
n--
}
if n == 0 {
return i
}
}
return -1
}
// typeFor returns the type of the given signature. It ignores any left over
// characters and panics if s doesn't start with a valid type signature.
func typeFor(s string) (t reflect.Type) {
err, _ := validSingle(s, 0)
if err != nil {
panic(err)
}
if t, ok := sigToType[s[0]]; ok {
return t
}
switch s[0] {
case 'a':
if s[1] == '{' {
i := strings.LastIndex(s, "}")
t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
} else {
t = reflect.SliceOf(typeFor(s[1:]))
}
case '(':
t = interfacesType
}
return
}

6
vendor/github.com/godbus/dbus/transport_darwin.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
package dbus
func (t *unixTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}

35
vendor/github.com/godbus/dbus/transport_generic.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package dbus
import (
"encoding/binary"
"errors"
"io"
)
type genericTransport struct {
io.ReadWriteCloser
}
func (t genericTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}
func (t genericTransport) SupportsUnixFDs() bool {
return false
}
func (t genericTransport) EnableUnixFDs() {}
func (t genericTransport) ReadMessage() (*Message, error) {
return DecodeMessage(t)
}
func (t genericTransport) SendMessage(msg *Message) error {
for _, v := range msg.Body {
if _, ok := v.(UnixFD); ok {
return errors.New("dbus: unix fd passing not enabled")
}
}
return msg.EncodeTo(t, binary.LittleEndian)
}

196
vendor/github.com/godbus/dbus/transport_unix.go generated vendored Normal file
View File

@ -0,0 +1,196 @@
//+build !windows
package dbus
import (
"bytes"
"encoding/binary"
"errors"
"io"
"net"
"syscall"
)
type oobReader struct {
conn *net.UnixConn
oob []byte
buf [4096]byte
}
func (o *oobReader) Read(b []byte) (n int, err error) {
n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
if err != nil {
return n, err
}
if flags&syscall.MSG_CTRUNC != 0 {
return n, errors.New("dbus: control data truncated (too many fds received)")
}
o.oob = append(o.oob, o.buf[:oobn]...)
return n, nil
}
type unixTransport struct {
*net.UnixConn
hasUnixFDs bool
}
func newUnixTransport(keys string) (transport, error) {
var err error
t := new(unixTransport)
abstract := getKey(keys, "abstract")
path := getKey(keys, "path")
switch {
case abstract == "" && path == "":
return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
case abstract != "" && path == "":
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
if err != nil {
return nil, err
}
return t, nil
case abstract == "" && path != "":
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
if err != nil {
return nil, err
}
return t, nil
default:
return nil, errors.New("dbus: invalid address (both path and abstract set)")
}
}
func init() {
transports["unix"] = newUnixTransport
}
func (t *unixTransport) EnableUnixFDs() {
t.hasUnixFDs = true
}
func (t *unixTransport) ReadMessage() (*Message, error) {
var (
blen, hlen uint32
csheader [16]byte
headers []header
order binary.ByteOrder
unixfds uint32
)
// To be sure that all bytes of out-of-band data are read, we use a special
// reader that uses ReadUnix on the underlying connection instead of Read
// and gathers the out-of-band data in a buffer.
rd := &oobReader{conn: t.UnixConn}
// read the first 16 bytes (the part of the header that has a constant size),
// from which we can figure out the length of the rest of the message
if _, err := io.ReadFull(rd, csheader[:]); err != nil {
return nil, err
}
switch csheader[0] {
case 'l':
order = binary.LittleEndian
case 'B':
order = binary.BigEndian
default:
return nil, InvalidMessageError("invalid byte order")
}
// csheader[4:8] -> length of message body, csheader[12:16] -> length of
// header fields (without alignment)
binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
if hlen%8 != 0 {
hlen += 8 - (hlen % 8)
}
// decode headers and look for unix fds
headerdata := make([]byte, hlen+4)
copy(headerdata, csheader[12:])
if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
return nil, err
}
dec := newDecoder(bytes.NewBuffer(headerdata), order)
dec.pos = 12
vs, err := dec.Decode(Signature{"a(yv)"})
if err != nil {
return nil, err
}
Store(vs, &headers)
for _, v := range headers {
if v.Field == byte(FieldUnixFDs) {
unixfds, _ = v.Variant.value.(uint32)
}
}
all := make([]byte, 16+hlen+blen)
copy(all, csheader[:])
copy(all[16:], headerdata[4:])
if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
return nil, err
}
if unixfds != 0 {
if !t.hasUnixFDs {
return nil, errors.New("dbus: got unix fds on unsupported transport")
}
// read the fds from the OOB data
scms, err := syscall.ParseSocketControlMessage(rd.oob)
if err != nil {
return nil, err
}
if len(scms) != 1 {
return nil, errors.New("dbus: received more than one socket control message")
}
fds, err := syscall.ParseUnixRights(&scms[0])
if err != nil {
return nil, err
}
msg, err := DecodeMessage(bytes.NewBuffer(all))
if err != nil {
return nil, err
}
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
if j, ok := v.(UnixFDIndex); ok {
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
msg.Body[i] = UnixFD(fds[j])
}
}
return msg, nil
}
return DecodeMessage(bytes.NewBuffer(all))
}
func (t *unixTransport) SendMessage(msg *Message) error {
fds := make([]int, 0)
for i, v := range msg.Body {
if fd, ok := v.(UnixFD); ok {
msg.Body[i] = UnixFDIndex(len(fds))
fds = append(fds, int(fd))
}
}
if len(fds) != 0 {
if !t.hasUnixFDs {
return errors.New("dbus: unix fd passing not enabled")
}
msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
oob := syscall.UnixRights(fds...)
buf := new(bytes.Buffer)
msg.EncodeTo(buf, binary.LittleEndian)
n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
if err != nil {
return err
}
if n != buf.Len() || oobn != len(oob) {
return io.ErrShortWrite
}
} else {
if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
return nil
}
}
return nil
}
func (t *unixTransport) SupportsUnixFDs() bool {
return true
}

View File

@ -0,0 +1,95 @@
// The UnixCredentials system call is currently only implemented on Linux
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// https://golang.org/s/go1.4-syscall
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
// Local implementation of the UnixCredentials system call for DragonFly BSD
package dbus
/*
#include <sys/ucred.h>
*/
import "C"
import (
"io"
"os"
"syscall"
"unsafe"
)
// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
type Ucred struct {
Pid int32
Uid uint32
Gid uint32
}
// http://golang.org/src/pkg/syscall/types_linux.go
// http://golang.org/src/pkg/syscall/types_dragonfly.go
// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
const (
SizeofUcred = C.sizeof_struct_ucred
)
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
func cmsgAlignOf(salen int) int {
// From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
//salign := sizeofPtr
// NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
// still require 32-bit aligned access to network subsystem.
//if darwin64Bit || dragonfly64Bit {
// salign = 4
//}
salign := 4
return (salen + salign - 1) & ^(salign - 1)
}
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
}
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// UnixCredentials encodes credentials into a socket control message
// for sending to another process. This can be used for
// authentication.
func UnixCredentials(ucred *Ucred) []byte {
b := make([]byte, syscall.CmsgSpace(SizeofUcred))
h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
h.Level = syscall.SOL_SOCKET
h.Type = syscall.SCM_CREDS
h.SetLen(syscall.CmsgLen(SizeofUcred))
*((*Ucred)(cmsgData(h))) = *ucred
return b
}
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// ParseUnixCredentials decodes a socket control message that contains
// credentials in a Ucred structure. To receive such a message, the
// SO_PASSCRED option must be enabled on the socket.
func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
if m.Header.Level != syscall.SOL_SOCKET {
return nil, syscall.EINVAL
}
if m.Header.Type != syscall.SCM_CREDS {
return nil, syscall.EINVAL
}
ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
return &ucred, nil
}
func (t *unixTransport) SendNullByte() error {
ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
b := UnixCredentials(ucred)
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
if err != nil {
return err
}
if oobn != len(b) {
return io.ErrShortWrite
}
return nil
}

View File

@ -0,0 +1,25 @@
// The UnixCredentials system call is currently only implemented on Linux
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
// https://golang.org/s/go1.4-syscall
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
package dbus
import (
"io"
"os"
"syscall"
)
func (t *unixTransport) SendNullByte() error {
ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
b := syscall.UnixCredentials(ucred)
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
if err != nil {
return err
}
if oobn != len(b) {
return io.ErrShortWrite
}
return nil
}

139
vendor/github.com/godbus/dbus/variant.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package dbus
import (
"bytes"
"fmt"
"reflect"
"sort"
"strconv"
)
// Variant represents the D-Bus variant type.
type Variant struct {
sig Signature
value interface{}
}
// MakeVariant converts the given value to a Variant. It panics if v cannot be
// represented as a D-Bus type.
func MakeVariant(v interface{}) Variant {
return Variant{SignatureOf(v), v}
}
// ParseVariant parses the given string as a variant as described at
// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
// empty, it is taken to be the expected signature for the variant.
func ParseVariant(s string, sig Signature) (Variant, error) {
tokens := varLex(s)
p := &varParser{tokens: tokens}
n, err := varMakeNode(p)
if err != nil {
return Variant{}, err
}
if sig.str == "" {
sig, err = varInfer(n)
if err != nil {
return Variant{}, err
}
}
v, err := n.Value(sig)
if err != nil {
return Variant{}, err
}
return MakeVariant(v), nil
}
// format returns a formatted version of v and whether this string can be parsed
// unambigously.
func (v Variant) format() (string, bool) {
switch v.sig.str[0] {
case 'b', 'i':
return fmt.Sprint(v.value), true
case 'n', 'q', 'u', 'x', 't', 'd', 'h':
return fmt.Sprint(v.value), false
case 's':
return strconv.Quote(v.value.(string)), true
case 'o':
return strconv.Quote(string(v.value.(ObjectPath))), false
case 'g':
return strconv.Quote(v.value.(Signature).str), false
case 'v':
s, unamb := v.value.(Variant).format()
if !unamb {
return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
}
return "<" + s + ">", true
case 'y':
return fmt.Sprintf("%#x", v.value.(byte)), false
}
rv := reflect.ValueOf(v.value)
switch rv.Kind() {
case reflect.Slice:
if rv.Len() == 0 {
return "[]", false
}
unamb := true
buf := bytes.NewBuffer([]byte("["))
for i := 0; i < rv.Len(); i++ {
// TODO: slooow
s, b := MakeVariant(rv.Index(i).Interface()).format()
unamb = unamb && b
buf.WriteString(s)
if i != rv.Len()-1 {
buf.WriteString(", ")
}
}
buf.WriteByte(']')
return buf.String(), unamb
case reflect.Map:
if rv.Len() == 0 {
return "{}", false
}
unamb := true
var buf bytes.Buffer
kvs := make([]string, rv.Len())
for i, k := range rv.MapKeys() {
s, b := MakeVariant(k.Interface()).format()
unamb = unamb && b
buf.Reset()
buf.WriteString(s)
buf.WriteString(": ")
s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
unamb = unamb && b
buf.WriteString(s)
kvs[i] = buf.String()
}
buf.Reset()
buf.WriteByte('{')
sort.Strings(kvs)
for i, kv := range kvs {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(kv)
}
buf.WriteByte('}')
return buf.String(), unamb
}
return `"INVALID"`, true
}
// Signature returns the D-Bus signature of the underlying value of v.
func (v Variant) Signature() Signature {
return v.sig
}
// String returns the string representation of the underlying value of v as
// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
func (v Variant) String() string {
s, unamb := v.format()
if !unamb {
return "@" + v.sig.str + " " + s
}
return s
}
// Value returns the underlying value of v.
func (v Variant) Value() interface{} {
return v.value
}

284
vendor/github.com/godbus/dbus/variant_lexer.go generated vendored Normal file
View File

@ -0,0 +1,284 @@
package dbus
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// Heavily inspired by the lexer from text/template.
type varToken struct {
typ varTokenType
val string
}
type varTokenType byte
const (
tokEOF varTokenType = iota
tokError
tokNumber
tokString
tokBool
tokArrayStart
tokArrayEnd
tokDictStart
tokDictEnd
tokVariantStart
tokVariantEnd
tokComma
tokColon
tokType
tokByteString
)
type varLexer struct {
input string
start int
pos int
width int
tokens []varToken
}
type lexState func(*varLexer) lexState
func varLex(s string) []varToken {
l := &varLexer{input: s}
l.run()
return l.tokens
}
func (l *varLexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
return true
}
l.backup()
return false
}
func (l *varLexer) backup() {
l.pos -= l.width
}
func (l *varLexer) emit(t varTokenType) {
l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
l.start = l.pos
}
func (l *varLexer) errorf(format string, v ...interface{}) lexState {
l.tokens = append(l.tokens, varToken{
tokError,
fmt.Sprintf(format, v...),
})
return nil
}
func (l *varLexer) ignore() {
l.start = l.pos
}
func (l *varLexer) next() rune {
var r rune
if l.pos >= len(l.input) {
l.width = 0
return -1
}
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += l.width
return r
}
func (l *varLexer) run() {
for state := varLexNormal; state != nil; {
state = state(l)
}
}
func (l *varLexer) peek() rune {
r := l.next()
l.backup()
return r
}
func varLexNormal(l *varLexer) lexState {
for {
r := l.next()
switch {
case r == -1:
l.emit(tokEOF)
return nil
case r == '[':
l.emit(tokArrayStart)
case r == ']':
l.emit(tokArrayEnd)
case r == '{':
l.emit(tokDictStart)
case r == '}':
l.emit(tokDictEnd)
case r == '<':
l.emit(tokVariantStart)
case r == '>':
l.emit(tokVariantEnd)
case r == ':':
l.emit(tokColon)
case r == ',':
l.emit(tokComma)
case r == '\'' || r == '"':
l.backup()
return varLexString
case r == '@':
l.backup()
return varLexType
case unicode.IsSpace(r):
l.ignore()
case unicode.IsNumber(r) || r == '+' || r == '-':
l.backup()
return varLexNumber
case r == 'b':
pos := l.start
if n := l.peek(); n == '"' || n == '\'' {
return varLexByteString
}
// not a byte string; try to parse it as a type or bool below
l.pos = pos + 1
l.width = 1
fallthrough
default:
// either a bool or a type. Try bools first.
l.backup()
if l.pos+4 <= len(l.input) {
if l.input[l.pos:l.pos+4] == "true" {
l.pos += 4
l.emit(tokBool)
continue
}
}
if l.pos+5 <= len(l.input) {
if l.input[l.pos:l.pos+5] == "false" {
l.pos += 5
l.emit(tokBool)
continue
}
}
// must be a type.
return varLexType
}
}
}
var varTypeMap = map[string]string{
"boolean": "b",
"byte": "y",
"int16": "n",
"uint16": "q",
"int32": "i",
"uint32": "u",
"int64": "x",
"uint64": "t",
"double": "f",
"string": "s",
"objectpath": "o",
"signature": "g",
}
func varLexByteString(l *varLexer) lexState {
q := l.next()
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != -1 {
break
}
fallthrough
case -1:
return l.errorf("unterminated bytestring")
case q:
break Loop
}
}
l.emit(tokByteString)
return varLexNormal
}
func varLexNumber(l *varLexer) lexState {
l.accept("+-")
digits := "0123456789"
if l.accept("0") {
if l.accept("x") {
digits = "0123456789abcdefABCDEF"
} else {
digits = "01234567"
}
}
for strings.IndexRune(digits, l.next()) >= 0 {
}
l.backup()
if l.accept(".") {
for strings.IndexRune(digits, l.next()) >= 0 {
}
l.backup()
}
if l.accept("eE") {
l.accept("+-")
for strings.IndexRune("0123456789", l.next()) >= 0 {
}
l.backup()
}
if r := l.peek(); unicode.IsLetter(r) {
l.next()
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
l.emit(tokNumber)
return varLexNormal
}
func varLexString(l *varLexer) lexState {
q := l.next()
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != -1 {
break
}
fallthrough
case -1:
return l.errorf("unterminated string")
case q:
break Loop
}
}
l.emit(tokString)
return varLexNormal
}
func varLexType(l *varLexer) lexState {
at := l.accept("@")
for {
r := l.next()
if r == -1 {
break
}
if unicode.IsSpace(r) {
l.backup()
break
}
}
if at {
if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
return l.errorf("%s", err)
}
} else {
if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
l.emit(tokType)
return varLexNormal
}
return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
}
l.emit(tokType)
return varLexNormal
}

817
vendor/github.com/godbus/dbus/variant_parser.go generated vendored Normal file
View File

@ -0,0 +1,817 @@
package dbus
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type varParser struct {
tokens []varToken
i int
}
func (p *varParser) backup() {
p.i--
}
func (p *varParser) next() varToken {
if p.i < len(p.tokens) {
t := p.tokens[p.i]
p.i++
return t
}
return varToken{typ: tokEOF}
}
type varNode interface {
Infer() (Signature, error)
String() string
Sigs() sigSet
Value(Signature) (interface{}, error)
}
func varMakeNode(p *varParser) (varNode, error) {
var sig Signature
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokNumber:
return varMakeNumNode(t, sig)
case tokString:
return varMakeStringNode(t, sig)
case tokBool:
if sig.str != "" && sig.str != "b" {
return nil, varTypeError{t.val, sig}
}
b, err := strconv.ParseBool(t.val)
if err != nil {
return nil, err
}
return boolNode(b), nil
case tokArrayStart:
return varMakeArrayNode(p, sig)
case tokVariantStart:
return varMakeVariantNode(p, sig)
case tokDictStart:
return varMakeDictNode(p, sig)
case tokType:
if sig.str != "" {
return nil, errors.New("unexpected type annotation")
}
if t.val[0] == '@' {
sig.str = t.val[1:]
} else {
sig.str = varTypeMap[t.val]
}
case tokByteString:
if sig.str != "" && sig.str != "ay" {
return nil, varTypeError{t.val, sig}
}
b, err := varParseByteString(t.val)
if err != nil {
return nil, err
}
return byteStringNode(b), nil
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
}
type varTypeError struct {
val string
sig Signature
}
func (e varTypeError) Error() string {
return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
}
type sigSet map[Signature]bool
func (s sigSet) Empty() bool {
return len(s) == 0
}
func (s sigSet) Intersect(s2 sigSet) sigSet {
r := make(sigSet)
for k := range s {
if s2[k] {
r[k] = true
}
}
return r
}
func (s sigSet) Single() (Signature, bool) {
if len(s) == 1 {
for k := range s {
return k, true
}
}
return Signature{}, false
}
func (s sigSet) ToArray() sigSet {
r := make(sigSet, len(s))
for k := range s {
r[Signature{"a" + k.str}] = true
}
return r
}
type numNode struct {
sig Signature
str string
val interface{}
}
var numSigSet = sigSet{
Signature{"y"}: true,
Signature{"n"}: true,
Signature{"q"}: true,
Signature{"i"}: true,
Signature{"u"}: true,
Signature{"x"}: true,
Signature{"t"}: true,
Signature{"d"}: true,
}
func (n numNode) Infer() (Signature, error) {
if strings.ContainsAny(n.str, ".e") {
return Signature{"d"}, nil
}
return Signature{"i"}, nil
}
func (n numNode) String() string {
return n.str
}
func (n numNode) Sigs() sigSet {
if n.sig.str != "" {
return sigSet{n.sig: true}
}
if strings.ContainsAny(n.str, ".e") {
return sigSet{Signature{"d"}: true}
}
return numSigSet
}
func (n numNode) Value(sig Signature) (interface{}, error) {
if n.sig.str != "" && n.sig != sig {
return nil, varTypeError{n.str, sig}
}
if n.val != nil {
return n.val, nil
}
return varNumAs(n.str, sig)
}
func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
if sig.str == "" {
return numNode{str: tok.val}, nil
}
num, err := varNumAs(tok.val, sig)
if err != nil {
return nil, err
}
return numNode{sig: sig, val: num}, nil
}
func varNumAs(s string, sig Signature) (interface{}, error) {
isUnsigned := false
size := 32
switch sig.str {
case "n":
size = 16
case "i":
case "x":
size = 64
case "y":
size = 8
isUnsigned = true
case "q":
size = 16
isUnsigned = true
case "u":
isUnsigned = true
case "t":
size = 64
isUnsigned = true
case "d":
d, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, err
}
return d, nil
default:
return nil, varTypeError{s, sig}
}
base := 10
if strings.HasPrefix(s, "0x") {
base = 16
s = s[2:]
}
if strings.HasPrefix(s, "0") && len(s) != 1 {
base = 8
s = s[1:]
}
if isUnsigned {
i, err := strconv.ParseUint(s, base, size)
if err != nil {
return nil, err
}
var v interface{} = i
switch sig.str {
case "y":
v = byte(i)
case "q":
v = uint16(i)
case "u":
v = uint32(i)
}
return v, nil
}
i, err := strconv.ParseInt(s, base, size)
if err != nil {
return nil, err
}
var v interface{} = i
switch sig.str {
case "n":
v = int16(i)
case "i":
v = int32(i)
}
return v, nil
}
type stringNode struct {
sig Signature
str string // parsed
val interface{} // has correct type
}
var stringSigSet = sigSet{
Signature{"s"}: true,
Signature{"g"}: true,
Signature{"o"}: true,
}
func (n stringNode) Infer() (Signature, error) {
return Signature{"s"}, nil
}
func (n stringNode) String() string {
return n.str
}
func (n stringNode) Sigs() sigSet {
if n.sig.str != "" {
return sigSet{n.sig: true}
}
return stringSigSet
}
func (n stringNode) Value(sig Signature) (interface{}, error) {
if n.sig.str != "" && n.sig != sig {
return nil, varTypeError{n.str, sig}
}
if n.val != nil {
return n.val, nil
}
switch {
case sig.str == "g":
return Signature{n.str}, nil
case sig.str == "o":
return ObjectPath(n.str), nil
case sig.str == "s":
return n.str, nil
default:
return nil, varTypeError{n.str, sig}
}
}
func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
return nil, fmt.Errorf("invalid type %q for string", sig.str)
}
s, err := varParseString(tok.val)
if err != nil {
return nil, err
}
n := stringNode{str: s}
if sig.str == "" {
return stringNode{str: s}, nil
}
n.sig = sig
switch sig.str {
case "o":
n.val = ObjectPath(s)
case "g":
n.val = Signature{s}
case "s":
n.val = s
}
return n, nil
}
func varParseString(s string) (string, error) {
// quotes are guaranteed to be there
s = s[1 : len(s)-1]
buf := new(bytes.Buffer)
for len(s) != 0 {
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && size == 1 {
return "", errors.New("invalid UTF-8")
}
s = s[size:]
if r != '\\' {
buf.WriteRune(r)
continue
}
r, size = utf8.DecodeRuneInString(s)
if r == utf8.RuneError && size == 1 {
return "", errors.New("invalid UTF-8")
}
s = s[size:]
switch r {
case 'a':
buf.WriteRune(0x7)
case 'b':
buf.WriteRune(0x8)
case 'f':
buf.WriteRune(0xc)
case 'n':
buf.WriteRune('\n')
case 'r':
buf.WriteRune('\r')
case 't':
buf.WriteRune('\t')
case '\n':
case 'u':
if len(s) < 4 {
return "", errors.New("short unicode escape")
}
r, err := strconv.ParseUint(s[:4], 16, 32)
if err != nil {
return "", err
}
buf.WriteRune(rune(r))
s = s[4:]
case 'U':
if len(s) < 8 {
return "", errors.New("short unicode escape")
}
r, err := strconv.ParseUint(s[:8], 16, 32)
if err != nil {
return "", err
}
buf.WriteRune(rune(r))
s = s[8:]
default:
buf.WriteRune(r)
}
}
return buf.String(), nil
}
var boolSigSet = sigSet{Signature{"b"}: true}
type boolNode bool
func (boolNode) Infer() (Signature, error) {
return Signature{"b"}, nil
}
func (b boolNode) String() string {
if b {
return "true"
}
return "false"
}
func (boolNode) Sigs() sigSet {
return boolSigSet
}
func (b boolNode) Value(sig Signature) (interface{}, error) {
if sig.str != "b" {
return nil, varTypeError{b.String(), sig}
}
return bool(b), nil
}
type arrayNode struct {
set sigSet
children []varNode
val interface{}
}
func (n arrayNode) Infer() (Signature, error) {
for _, v := range n.children {
csig, err := varInfer(v)
if err != nil {
continue
}
return Signature{"a" + csig.str}, nil
}
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
}
func (n arrayNode) String() string {
s := "["
for i, v := range n.children {
s += v.String()
if i != len(n.children)-1 {
s += ", "
}
}
return s + "]"
}
func (n arrayNode) Sigs() sigSet {
return n.set
}
func (n arrayNode) Value(sig Signature) (interface{}, error) {
if n.set.Empty() {
// no type information whatsoever, so this must be an empty slice
return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
}
if !n.set[sig] {
return nil, varTypeError{n.String(), sig}
}
s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
for i, v := range n.children {
rv, err := v.Value(Signature{sig.str[1:]})
if err != nil {
return nil, err
}
s.Index(i).Set(reflect.ValueOf(rv))
}
return s.Interface(), nil
}
func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
var n arrayNode
if sig.str != "" {
n.set = sigSet{sig: true}
}
if t := p.next(); t.typ == tokArrayEnd {
return n, nil
} else {
p.backup()
}
Loop:
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
cn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if cset := cn.Sigs(); !cset.Empty() {
if n.set.Empty() {
n.set = cset.ToArray()
} else {
nset := cset.ToArray().Intersect(n.set)
if nset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
}
n.set = nset
}
}
n.children = append(n.children, cn)
switch t := p.next(); t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokArrayEnd:
break Loop
case tokComma:
continue
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
return n, nil
}
type variantNode struct {
n varNode
}
var variantSet = sigSet{
Signature{"v"}: true,
}
func (variantNode) Infer() (Signature, error) {
return Signature{"v"}, nil
}
func (n variantNode) String() string {
return "<" + n.n.String() + ">"
}
func (variantNode) Sigs() sigSet {
return variantSet
}
func (n variantNode) Value(sig Signature) (interface{}, error) {
if sig.str != "v" {
return nil, varTypeError{n.String(), sig}
}
sig, err := varInfer(n.n)
if err != nil {
return nil, err
}
v, err := n.n.Value(sig)
if err != nil {
return nil, err
}
return MakeVariant(v), nil
}
func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
n, err := varMakeNode(p)
if err != nil {
return nil, err
}
if t := p.next(); t.typ != tokVariantEnd {
return nil, fmt.Errorf("unexpected %q", t.val)
}
vn := variantNode{n}
if sig.str != "" && sig.str != "v" {
return nil, varTypeError{vn.String(), sig}
}
return variantNode{n}, nil
}
type dictEntry struct {
key, val varNode
}
type dictNode struct {
kset, vset sigSet
children []dictEntry
val interface{}
}
func (n dictNode) Infer() (Signature, error) {
for _, v := range n.children {
ksig, err := varInfer(v.key)
if err != nil {
continue
}
vsig, err := varInfer(v.val)
if err != nil {
continue
}
return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
}
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
}
func (n dictNode) String() string {
s := "{"
for i, v := range n.children {
s += v.key.String() + ": " + v.val.String()
if i != len(n.children)-1 {
s += ", "
}
}
return s + "}"
}
func (n dictNode) Sigs() sigSet {
r := sigSet{}
for k := range n.kset {
for v := range n.vset {
sig := "a{" + k.str + v.str + "}"
r[Signature{sig}] = true
}
}
return r
}
func (n dictNode) Value(sig Signature) (interface{}, error) {
set := n.Sigs()
if set.Empty() {
// no type information -> empty dict
return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
}
if !set[sig] {
return nil, varTypeError{n.String(), sig}
}
m := reflect.MakeMap(typeFor(sig.str))
ksig := Signature{sig.str[2:3]}
vsig := Signature{sig.str[3 : len(sig.str)-1]}
for _, v := range n.children {
kv, err := v.key.Value(ksig)
if err != nil {
return nil, err
}
vv, err := v.val.Value(vsig)
if err != nil {
return nil, err
}
m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
}
return m.Interface(), nil
}
func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
var n dictNode
if sig.str != "" {
if len(sig.str) < 5 {
return nil, fmt.Errorf("invalid signature %q for dict type", sig)
}
ksig := Signature{string(sig.str[2])}
vsig := Signature{sig.str[3 : len(sig.str)-1]}
n.kset = sigSet{ksig: true}
n.vset = sigSet{vsig: true}
}
if t := p.next(); t.typ == tokDictEnd {
return n, nil
} else {
p.backup()
}
Loop:
for {
t := p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
kn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if kset := kn.Sigs(); !kset.Empty() {
if n.kset.Empty() {
n.kset = kset
} else {
n.kset = kset.Intersect(n.kset)
if n.kset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
}
}
}
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokColon:
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
}
p.backup()
vn, err := varMakeNode(p)
if err != nil {
return nil, err
}
if vset := vn.Sigs(); !vset.Empty() {
if n.vset.Empty() {
n.vset = vset
} else {
n.vset = n.vset.Intersect(vset)
if n.vset.Empty() {
return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
}
}
}
n.children = append(n.children, dictEntry{kn, vn})
t = p.next()
switch t.typ {
case tokEOF:
return nil, io.ErrUnexpectedEOF
case tokError:
return nil, errors.New(t.val)
case tokDictEnd:
break Loop
case tokComma:
continue
default:
return nil, fmt.Errorf("unexpected %q", t.val)
}
}
return n, nil
}
type byteStringNode []byte
var byteStringSet = sigSet{
Signature{"ay"}: true,
}
func (byteStringNode) Infer() (Signature, error) {
return Signature{"ay"}, nil
}
func (b byteStringNode) String() string {
return string(b)
}
func (b byteStringNode) Sigs() sigSet {
return byteStringSet
}
func (b byteStringNode) Value(sig Signature) (interface{}, error) {
if sig.str != "ay" {
return nil, varTypeError{b.String(), sig}
}
return []byte(b), nil
}
func varParseByteString(s string) ([]byte, error) {
// quotes and b at start are guaranteed to be there
b := make([]byte, 0, 1)
s = s[2 : len(s)-1]
for len(s) != 0 {
c := s[0]
s = s[1:]
if c != '\\' {
b = append(b, c)
continue
}
c = s[0]
s = s[1:]
switch c {
case 'a':
b = append(b, 0x7)
case 'b':
b = append(b, 0x8)
case 'f':
b = append(b, 0xc)
case 'n':
b = append(b, '\n')
case 'r':
b = append(b, '\r')
case 't':
b = append(b, '\t')
case 'x':
if len(s) < 2 {
return nil, errors.New("short escape")
}
n, err := strconv.ParseUint(s[:2], 16, 8)
if err != nil {
return nil, err
}
b = append(b, byte(n))
s = s[2:]
case '0':
if len(s) < 3 {
return nil, errors.New("short escape")
}
n, err := strconv.ParseUint(s[:3], 8, 8)
if err != nil {
return nil, err
}
b = append(b, byte(n))
s = s[3:]
default:
b = append(b, c)
}
}
return append(b, 0), nil
}
func varInfer(n varNode) (Signature, error) {
if sig, ok := n.Sigs().Single(); ok {
return sig, nil
}
return n.Infer()
}

31
vendor/github.com/golang/protobuf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,31 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

43
vendor/github.com/golang/protobuf/proto/Makefile generated vendored Normal file
View File

@ -0,0 +1,43 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make

223
vendor/github.com/golang/protobuf/proto/clone.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

867
vendor/github.com/golang/protobuf/proto/decode.go generated vendored Normal file
View File

@ -0,0 +1,867 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

1325
vendor/github.com/golang/protobuf/proto/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

276
vendor/github.com/golang/protobuf/proto/equal.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

399
vendor/github.com/golang/protobuf/proto/extensions.go generated vendored Normal file
View File

@ -0,0 +1,399 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
// Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto)
if !ok {
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

893
vendor/github.com/golang/protobuf/proto/lib.go generated vendored Normal file
View File

@ -0,0 +1,893 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

280
vendor/github.com/golang/protobuf/proto/message_set.go generated vendored Normal file
View File

@ -0,0 +1,280 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -0,0 +1,479 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -0,0 +1,266 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

842
vendor/github.com/golang/protobuf/proto/properties.go generated vendored Normal file
View File

@ -0,0 +1,842 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
if p.OrigName != p.Name {
s += ",name=" + p.OrigName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

751
vendor/github.com/golang/protobuf/proto/text.go generated vendored Normal file
View File

@ -0,0 +1,751 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func writeStruct(w *textWriter, sv reflect.Value) error {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) {
if err := writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
func marshalText(w io.Writer, pb Message, compact bool) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: compact,
}
if tm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error {
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, false)
return buf.String()
}
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, true)
return buf.String()
}

798
vendor/github.com/golang/protobuf/proto/text_parser.go generated vendored Normal file
View File

@ -0,0 +1,798 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || p.s[0] != '"' {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]".
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
tok = p.next()
if tok.err != nil {
return tok.err
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == tok.value {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", tok.value)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

19
vendor/github.com/kolo/xmlrpc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (C) 2012 Dmitry Maksimov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

79
vendor/github.com/kolo/xmlrpc/README.md generated vendored Normal file
View File

@ -0,0 +1,79 @@
## Overview
xmlrpc is an implementation of client side part of XMLRPC protocol in Go language.
## Installation
To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use
it in application add `"github.com/kolo/xmlrpc"` string to `import`
statement.
## Usage
client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil)
result := struct{
Version string `xmlrpc:"version"`
}{}
client.Call("Bugzilla.version", nil, &result)
fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+
Second argument of NewClient function is an object that implements
[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper)
interface, it can be used to get more control over connection options.
By default it initialized by http.DefaultTransport object.
### Arguments encoding
xmlrpc package supports encoding of native Go data types to method
arguments.
Data types encoding rules:
* int, int8, int16, int32, int64 encoded to int;
* float32, float64 encoded to double;
* bool encoded to boolean;
* string encoded to string;
* time.Time encoded to datetime.iso8601;
* xmlrpc.Base64 encoded to base64;
* slice decoded to array;
Structs decoded to struct by following rules:
* all public field become struct members;
* field name become member name;
* if field has xmlrpc tag, its value become member name.
Server method can accept few arguments, to handle this case there is
special approach to handle slice of empty interfaces (`[]interface{}`).
Each value of such slice encoded as separate argument.
### Result decoding
Result of remote function is decoded to native Go data type.
Data types decoding rules:
* int, i4 decoded to int, int8, int16, int32, int64;
* double decoded to float32, float64;
* boolean decoded to bool;
* string decoded to string;
* array decoded to slice;
* structs decoded following the rules described in previous section;
* datetime.iso8601 decoded as time.Time data type;
* base64 decoded to string.
## Implementation details
xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec)
interface of [net/rpc](http://golang.org/pkg/net/rpc) package.
xmlrpc package works over HTTP protocol, but some internal functions
and data type were made public to make it easier to create another
implementation of xmlrpc that works over another protocol. To encode
request body there is EncodeMethodCall function. To decode server
response Response data type can be used.
## Contribution
Feel free to fork the project, submit pull requests, ask questions.
## Authors
Dmitry Maksimov (dmtmax@gmail.com)

144
vendor/github.com/kolo/xmlrpc/client.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package xmlrpc
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/rpc"
"net/url"
)
type Client struct {
*rpc.Client
}
// clientCodec is rpc.ClientCodec interface implementation.
type clientCodec struct {
// url presents url of xmlrpc service
url *url.URL
// httpClient works with HTTP protocol
httpClient *http.Client
// cookies stores cookies received on last request
cookies http.CookieJar
// responses presents map of active requests. It is required to return request id, that
// rpc.Client can mark them as done.
responses map[uint64]*http.Response
response *Response
// ready presents channel, that is used to link request and it`s response.
ready chan uint64
}
func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) {
httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args)
if codec.cookies != nil {
for _, cookie := range codec.cookies.Cookies(codec.url) {
httpRequest.AddCookie(cookie)
}
}
if err != nil {
return err
}
var httpResponse *http.Response
httpResponse, err = codec.httpClient.Do(httpRequest)
if err != nil {
return err
}
if codec.cookies != nil {
codec.cookies.SetCookies(codec.url, httpResponse.Cookies())
}
codec.responses[request.Seq] = httpResponse
codec.ready <- request.Seq
return nil
}
func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) (err error) {
seq := <-codec.ready
httpResponse := codec.responses[seq]
if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 {
return fmt.Errorf("request error: bad status code - %d", httpResponse.StatusCode)
}
respData, err := ioutil.ReadAll(httpResponse.Body)
if err != nil {
return err
}
httpResponse.Body.Close()
resp := NewResponse(respData)
if resp.Failed() {
response.Error = fmt.Sprintf("%v", resp.Err())
}
codec.response = resp
response.Seq = seq
delete(codec.responses, seq)
return nil
}
func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) {
if v == nil {
return nil
}
if err = codec.response.Unmarshal(v); err != nil {
return err
}
return nil
}
func (codec *clientCodec) Close() error {
transport := codec.httpClient.Transport.(*http.Transport)
transport.CloseIdleConnections()
return nil
}
// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service.
func NewClient(requrl string, transport http.RoundTripper) (*Client, error) {
if transport == nil {
transport = http.DefaultTransport
}
httpClient := &http.Client{Transport: transport}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
u, err := url.Parse(requrl)
if err != nil {
return nil, err
}
codec := clientCodec{
url: u,
httpClient: httpClient,
ready: make(chan uint64),
responses: make(map[uint64]*http.Response),
cookies: jar,
}
return &Client{rpc.NewClientWithCodec(&codec)}, nil
}

449
vendor/github.com/kolo/xmlrpc/decoder.go generated vendored Normal file
View File

@ -0,0 +1,449 @@
package xmlrpc
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"time"
)
const iso8601 = "20060102T15:04:05"
var (
// CharsetReader is a function to generate reader which converts a non UTF-8
// charset into UTF-8.
CharsetReader func(string, io.Reader) (io.Reader, error)
invalidXmlError = errors.New("invalid xml")
)
type TypeMismatchError string
func (e TypeMismatchError) Error() string { return string(e) }
type decoder struct {
*xml.Decoder
}
func unmarshal(data []byte, v interface{}) (err error) {
dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))}
if CharsetReader != nil {
dec.CharsetReader = CharsetReader
}
var tok xml.Token
for {
if tok, err = dec.Token(); err != nil {
return err
}
if t, ok := tok.(xml.StartElement); ok {
if t.Name.Local == "value" {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer value passed to unmarshal")
}
if err = dec.decodeValue(val.Elem()); err != nil {
return err
}
break
}
}
}
// read until end of document
err = dec.Skip()
if err != nil && err != io.EOF {
return err
}
return nil
}
func (dec *decoder) decodeValue(val reflect.Value) error {
var tok xml.Token
var err error
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
var typeName string
for {
if tok, err = dec.Token(); err != nil {
return err
}
if t, ok := tok.(xml.EndElement); ok {
if t.Name.Local == "value" {
return nil
} else {
return invalidXmlError
}
}
if t, ok := tok.(xml.StartElement); ok {
typeName = t.Name.Local
break
}
// Treat value data without type identifier as string
if t, ok := tok.(xml.CharData); ok {
if value := strings.TrimSpace(string(t)); value != "" {
if err = checkType(val, reflect.String); err != nil {
return err
}
val.SetString(value)
return nil
}
}
}
switch typeName {
case "struct":
ismap := false
pmap := val
valType := val.Type()
if err = checkType(val, reflect.Struct); err != nil {
if checkType(val, reflect.Map) == nil {
if valType.Key().Kind() != reflect.String {
return fmt.Errorf("only maps with string key type can be unmarshalled")
}
ismap = true
} else if checkType(val, reflect.Interface) == nil && val.IsNil() {
var dummy map[string]interface{}
pmap = reflect.New(reflect.TypeOf(dummy)).Elem()
valType = pmap.Type()
ismap = true
} else {
return err
}
}
var fields map[string]reflect.Value
if !ismap {
fields = make(map[string]reflect.Value)
for i := 0; i < valType.NumField(); i++ {
field := valType.Field(i)
fieldVal := val.FieldByName(field.Name)
if fieldVal.CanSet() {
if fn := field.Tag.Get("xmlrpc"); fn != "" {
fields[fn] = fieldVal
} else {
fields[field.Name] = fieldVal
}
}
}
} else {
// Create initial empty map
pmap.Set(reflect.MakeMap(valType))
}
// Process struct members.
StructLoop:
for {
if tok, err = dec.Token(); err != nil {
return err
}
switch t := tok.(type) {
case xml.StartElement:
if t.Name.Local != "member" {
return invalidXmlError
}
tagName, fieldName, err := dec.readTag()
if err != nil {
return err
}
if tagName != "name" {
return invalidXmlError
}
var fv reflect.Value
ok := true
if !ismap {
fv, ok = fields[string(fieldName)]
} else {
fv = reflect.New(valType.Elem())
}
if ok {
for {
if tok, err = dec.Token(); err != nil {
return err
}
if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" {
if err = dec.decodeValue(fv); err != nil {
return err
}
// </value>
if err = dec.Skip(); err != nil {
return err
}
break
}
}
}
// </member>
if err = dec.Skip(); err != nil {
return err
}
if ismap {
pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv))
val.Set(pmap)
}
case xml.EndElement:
break StructLoop
}
}
case "array":
pslice := val
if checkType(val, reflect.Interface) == nil && val.IsNil() {
var dummy []interface{}
pslice = reflect.New(reflect.TypeOf(dummy)).Elem()
} else if err = checkType(val, reflect.Slice); err != nil {
return err
}
ArrayLoop:
for {
if tok, err = dec.Token(); err != nil {
return err
}
switch t := tok.(type) {
case xml.StartElement:
if t.Name.Local != "data" {
return invalidXmlError
}
slice := reflect.MakeSlice(pslice.Type(), 0, 0)
DataLoop:
for {
if tok, err = dec.Token(); err != nil {
return err
}
switch tt := tok.(type) {
case xml.StartElement:
if tt.Name.Local != "value" {
return invalidXmlError
}
v := reflect.New(pslice.Type().Elem())
if err = dec.decodeValue(v); err != nil {
return err
}
slice = reflect.Append(slice, v.Elem())
// </value>
if err = dec.Skip(); err != nil {
return err
}
case xml.EndElement:
pslice.Set(slice)
val.Set(pslice)
break DataLoop
}
}
case xml.EndElement:
break ArrayLoop
}
}
default:
if tok, err = dec.Token(); err != nil {
return err
}
var data []byte
switch t := tok.(type) {
case xml.EndElement:
return nil
case xml.CharData:
data = []byte(t.Copy())
default:
return invalidXmlError
}
switch typeName {
case "int", "i4", "i8":
if checkType(val, reflect.Interface) == nil && val.IsNil() {
i, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
pi := reflect.New(reflect.TypeOf(i)).Elem()
pi.SetInt(i)
val.Set(pi)
} else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64); err != nil {
return err
} else {
i, err := strconv.ParseInt(string(data), 10, val.Type().Bits())
if err != nil {
return err
}
val.SetInt(i)
}
case "string", "base64":
str := string(data)
if checkType(val, reflect.Interface) == nil && val.IsNil() {
pstr := reflect.New(reflect.TypeOf(str)).Elem()
pstr.SetString(str)
val.Set(pstr)
} else if err = checkType(val, reflect.String); err != nil {
return err
} else {
val.SetString(str)
}
case "dateTime.iso8601":
t, err := time.Parse(iso8601, string(data))
if err != nil {
return err
}
if checkType(val, reflect.Interface) == nil && val.IsNil() {
ptime := reflect.New(reflect.TypeOf(t)).Elem()
ptime.Set(reflect.ValueOf(t))
val.Set(ptime)
} else if _, ok := val.Interface().(time.Time); !ok {
return TypeMismatchError(fmt.Sprintf("error: type mismatch error - can't decode %v to time", val.Kind()))
} else {
val.Set(reflect.ValueOf(t))
}
case "boolean":
v, err := strconv.ParseBool(string(data))
if err != nil {
return err
}
if checkType(val, reflect.Interface) == nil && val.IsNil() {
pv := reflect.New(reflect.TypeOf(v)).Elem()
pv.SetBool(v)
val.Set(pv)
} else if err = checkType(val, reflect.Bool); err != nil {
return err
} else {
val.SetBool(v)
}
case "double":
if checkType(val, reflect.Interface) == nil && val.IsNil() {
i, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
pdouble := reflect.New(reflect.TypeOf(i)).Elem()
pdouble.SetFloat(i)
val.Set(pdouble)
} else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil {
return err
} else {
i, err := strconv.ParseFloat(string(data), val.Type().Bits())
if err != nil {
return err
}
val.SetFloat(i)
}
default:
return errors.New("unsupported type")
}
// </type>
if err = dec.Skip(); err != nil {
return err
}
}
return nil
}
func (dec *decoder) readTag() (string, []byte, error) {
var tok xml.Token
var err error
var name string
for {
if tok, err = dec.Token(); err != nil {
return "", nil, err
}
if t, ok := tok.(xml.StartElement); ok {
name = t.Name.Local
break
}
}
value, err := dec.readCharData()
if err != nil {
return "", nil, err
}
return name, value, dec.Skip()
}
func (dec *decoder) readCharData() ([]byte, error) {
var tok xml.Token
var err error
if tok, err = dec.Token(); err != nil {
return nil, err
}
if t, ok := tok.(xml.CharData); ok {
return []byte(t.Copy()), nil
} else {
return nil, invalidXmlError
}
}
func checkType(val reflect.Value, kinds ...reflect.Kind) error {
if len(kinds) == 0 {
return nil
}
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
match := false
for _, kind := range kinds {
if val.Kind() == kind {
match = true
break
}
}
if !match {
return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v",
val.Kind(), kinds[0]))
}
return nil
}

164
vendor/github.com/kolo/xmlrpc/encoder.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package xmlrpc
import (
"bytes"
"encoding/xml"
"fmt"
"reflect"
"strconv"
"time"
)
type encodeFunc func(reflect.Value) ([]byte, error)
func marshal(v interface{}) ([]byte, error) {
if v == nil {
return []byte{}, nil
}
val := reflect.ValueOf(v)
return encodeValue(val)
}
func encodeValue(val reflect.Value) ([]byte, error) {
var b []byte
var err error
if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface {
if val.IsNil() {
return []byte("<value/>"), nil
}
val = val.Elem()
}
switch val.Kind() {
case reflect.Struct:
switch val.Interface().(type) {
case time.Time:
t := val.Interface().(time.Time)
b = []byte(fmt.Sprintf("<dateTime.iso8601>%s</dateTime.iso8601>", t.Format(iso8601)))
default:
b, err = encodeStruct(val)
}
case reflect.Map:
b, err = encodeMap(val)
case reflect.Slice:
b, err = encodeSlice(val)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
b = []byte(fmt.Sprintf("<int>%s</int>", strconv.FormatInt(val.Int(), 10)))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
b = []byte(fmt.Sprintf("<i4>%s</i4>", strconv.FormatUint(val.Uint(), 10)))
case reflect.Float32, reflect.Float64:
b = []byte(fmt.Sprintf("<double>%s</double>",
strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits())))
case reflect.Bool:
if val.Bool() {
b = []byte("<boolean>1</boolean>")
} else {
b = []byte("<boolean>0</boolean>")
}
case reflect.String:
var buf bytes.Buffer
xml.Escape(&buf, []byte(val.String()))
if _, ok := val.Interface().(Base64); ok {
b = []byte(fmt.Sprintf("<base64>%s</base64>", buf.String()))
} else {
b = []byte(fmt.Sprintf("<string>%s</string>", buf.String()))
}
default:
return nil, fmt.Errorf("xmlrpc encode error: unsupported type")
}
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("<value>%s</value>", string(b))), nil
}
func encodeStruct(val reflect.Value) ([]byte, error) {
var b bytes.Buffer
b.WriteString("<struct>")
t := val.Type()
for i := 0; i < t.NumField(); i++ {
b.WriteString("<member>")
f := t.Field(i)
name := f.Tag.Get("xmlrpc")
if name == "" {
name = f.Name
}
b.WriteString(fmt.Sprintf("<name>%s</name>", name))
p, err := encodeValue(val.FieldByName(f.Name))
if err != nil {
return nil, err
}
b.Write(p)
b.WriteString("</member>")
}
b.WriteString("</struct>")
return b.Bytes(), nil
}
func encodeMap(val reflect.Value) ([]byte, error) {
var t = val.Type()
if t.Key().Kind() != reflect.String {
return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported")
}
var b bytes.Buffer
b.WriteString("<struct>")
keys := val.MapKeys()
for i := 0; i < val.Len(); i++ {
key := keys[i]
kval := val.MapIndex(key)
b.WriteString("<member>")
b.WriteString(fmt.Sprintf("<name>%s</name>", key.String()))
p, err := encodeValue(kval)
if err != nil {
return nil, err
}
b.Write(p)
b.WriteString("</member>")
}
b.WriteString("</struct>")
return b.Bytes(), nil
}
func encodeSlice(val reflect.Value) ([]byte, error) {
var b bytes.Buffer
b.WriteString("<array><data>")
for i := 0; i < val.Len(); i++ {
p, err := encodeValue(val.Index(i))
if err != nil {
return nil, err
}
b.Write(p)
}
b.WriteString("</data></array>")
return b.Bytes(), nil
}

57
vendor/github.com/kolo/xmlrpc/request.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package xmlrpc
import (
"bytes"
"fmt"
"net/http"
)
func NewRequest(url string, method string, args interface{}) (*http.Request, error) {
var t []interface{}
var ok bool
if t, ok = args.([]interface{}); !ok {
if args != nil {
t = []interface{}{args}
}
}
body, err := EncodeMethodCall(method, t...)
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
return nil, err
}
request.Header.Set("Content-Type", "text/xml")
request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body)))
return request, nil
}
func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) {
var b bytes.Buffer
b.WriteString(`<?xml version="1.0" encoding="UTF-8"?>`)
b.WriteString(fmt.Sprintf("<methodCall><methodName>%s</methodName>", method))
if args != nil {
b.WriteString("<params>")
for _, arg := range args {
p, err := marshal(arg)
if err != nil {
return nil, err
}
b.WriteString(fmt.Sprintf("<param>%s</param>", string(p)))
}
b.WriteString("</params>")
}
b.WriteString("</methodCall>")
return b.Bytes(), nil
}

52
vendor/github.com/kolo/xmlrpc/response.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package xmlrpc
import (
"regexp"
)
var (
faultRx = regexp.MustCompile(`<fault>(\s|\S)+</fault>`)
)
type failedResponse struct {
Code int `xmlrpc:"faultCode"`
Error string `xmlrpc:"faultString"`
}
func (r *failedResponse) err() error {
return &xmlrpcError{
code: r.Code,
err: r.Error,
}
}
type Response struct {
data []byte
}
func NewResponse(data []byte) *Response {
return &Response{
data: data,
}
}
func (r *Response) Failed() bool {
return faultRx.Match(r.data)
}
func (r *Response) Err() error {
failedResp := new(failedResponse)
if err := unmarshal(r.data, failedResp); err != nil {
return err
}
return failedResp.err()
}
func (r *Response) Unmarshal(v interface{}) error {
if err := unmarshal(r.data, v); err != nil {
return err
}
return nil
}

25
vendor/github.com/kolo/xmlrpc/test_server.rb generated vendored Normal file
View File

@ -0,0 +1,25 @@
# encoding: utf-8
require "xmlrpc/server"
class Service
def time
Time.now
end
def upcase(s)
s.upcase
end
def sum(x, y)
x + y
end
def error
raise XMLRPC::FaultException.new(500, "Server error")
end
end
server = XMLRPC::Server.new 5001, 'localhost'
server.add_handler "service", Service.new
server.serve

19
vendor/github.com/kolo/xmlrpc/xmlrpc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package xmlrpc
import (
"fmt"
)
// xmlrpcError represents errors returned on xmlrpc request.
type xmlrpcError struct {
code int
err string
}
// Error() method implements Error interface
func (e *xmlrpcError) Error() string {
return fmt.Sprintf("error: \"%s\" code: %d", e.err, e.code)
}
// Base64 represents value in base64 encoding
type Base64 string

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

View File

@ -0,0 +1,75 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"github.com/golang/protobuf/proto"
)
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
headerBuf := make([]byte, binary.MaxVarintLen32)
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil), but if it does,
// it should be treated as no-op (according to the
// Reader contract). So let's go on...
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@ -0,0 +1,16 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View File

@ -0,0 +1,46 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"github.com/golang/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
buf := make([]byte, binary.MaxVarintLen32)
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

201
vendor/github.com/prometheus/client_golang/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

28
vendor/github.com/prometheus/client_golang/NOTICE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Prometheus instrumentation library for Go applications
Copyright 2012-2015 The Prometheus Authors
This product includes software developed at
SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
goautoneg
http://bitbucket.org/ww/goautoneg
Copyright 2011, Open Knowledge Foundation Ltd.
See README.txt for license details.
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
See https://github.com/beorn7/perks/blob/master/README.md for license details.
Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
Support for streaming Protocol Buffer messages for the Go language (golang).
https://github.com/matttproud/golang_protobuf_extensions
Copyright 2013 Matt T. Proud
Licensed under the Apache License, Version 2.0

View File

@ -0,0 +1,53 @@
# Overview
This is the [Prometheus](http://www.prometheus.io) telemetric
instrumentation client [Go](http://golang.org) client library. It
enable authors to define process-space metrics for their servers and
expose them through a web service interface for extraction,
aggregation, and a whole slew of other post processing techniques.
# Installing
$ go get github.com/prometheus/client_golang/prometheus
# Example
```go
package main
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
indexed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "my_company",
Subsystem: "indexer",
Name: "documents_indexed",
Help: "The number of documents indexed.",
})
size = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "my_company",
Subsystem: "storage",
Name: "documents_total_size_bytes",
Help: "The total size of all documents in the storage.",
})
)
func main() {
http.Handle("/metrics", prometheus.Handler())
indexed.Inc()
size.Set(5)
http.ListenAndServe(":8080", nil)
}
func init() {
prometheus.MustRegister(indexed)
prometheus.MustRegister(size)
}
```
# Documentation
[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)

View File

@ -0,0 +1,75 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
//
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
// also Collectors (which only ever collect one metric, namely itself). An
// implementer of Collector may, however, collect multiple metrics in a
// coordinated fashion and/or create metrics on the fly. Examples for collectors
// already implemented in this library are the metric vectors (i.e. collection
// of multiple instances of the same Metric but with different label values)
// like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
// documentation. (It is valid if one and the same Collector sends
// duplicate descriptors. Those duplicates are simply ignored. However,
// two different Collectors must not send duplicate descriptors.) This
// method idempotently sends the same descriptors throughout the
// lifetime of the Collector. If a Collector encounters an error while
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by Prometheus when collecting metrics. The
// implementation sends each collected metric via the provided channel
// and returns once the last metric has been sent. The descriptor of
// each sent metric is one of those returned by Describe. Returned
// metrics that share the same descriptor must differ in their variable
// label values. This method may be called concurrently and must
// therefore be implemented in a concurrency safe way. Blocking occurs
// at the expense of total performance of rendering all registered
// metrics. Ideally, Collector implementations support concurrent
// readers.
Collect(chan<- Metric)
}
// SelfCollector implements Collector for a single Metric so that that the
// Metric collects itself. Add it as an anonymous field to a struct that
// implements Metric, and call Init with the Metric itself as an argument.
type SelfCollector struct {
self Metric
}
// Init provides the SelfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *SelfCollector) Init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *SelfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *SelfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}

View File

@ -0,0 +1,173 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
)
// Counter is a Metric that represents a single numerical value that only ever
// goes up. That implies that it cannot be used to count items whose number can
// also go down, e.g. the number of currently running goroutines. Those
// "counters" are represented by Gauges.
//
// A Counter is typically used to count requests served, tasks completed, errors
// occurred, etc.
//
// To create Counter instances, use NewCounter.
type Counter interface {
Metric
Collector
// Set is used to set the Counter to an arbitrary value. It is only used
// if you have to transfer a value from an external counter into this
// Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of
// monotonically increasing values).
Set(float64)
// Inc increments the counter by 1.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
Add(float64)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.Init(result) // Init self-collection.
return result
}
type counter struct {
value
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
c.value.Add(v)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
//
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct {
MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names. At least one label name must be
// provided.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.Init(result) // Init self-collection.
return result
},
},
}
}
// GetMetricWithLabelValues replaces the method of the same name in
// MetricVec. The difference is that this method returns a Counter and not a
// Metric so that no type conversion is required.
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
// difference is that this method returns a Counter and not a Metric so that no
// type conversion is required.
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
return m.MetricVec.WithLabelValues(lvs...).(Counter)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *CounterVec) With(labels Labels) Counter {
return m.MetricVec.With(labels).(Counter)
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
// provided function.
//
// To create CounterFunc instances, use NewCounterFunc.
type CounterFunc interface {
Metric
Collector
}
// NewCounterFunc creates a new CounterFunc based on the provided
// CounterOpts. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where a CounterFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), CounterValue, function)
}

View File

@ -0,0 +1,192 @@
package prometheus
import (
"errors"
"fmt"
"regexp"
"sort"
"strings"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
var (
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
)
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
// deal with Desc if they use advanced features like the ExpvarCollector or
// custom Collectors and Metrics.
//
// Descriptors registered with the same registry have to fulfill certain
// consistency and uniqueness criteria if they share the same fully-qualified
// name: They must have the same help string and the same label names (aka label
// dimensions) in each, constLabels and variableLabels, but they must differ in
// the values of the constLabels.
//
// Descriptors that share the same fully-qualified names and the same label
// values of their constLabels are considered equal.
//
// Use NewDesc to create new Desc instances.
type Desc struct {
// fqName has been built from Namespace, Subsystem, and Name.
fqName string
// help provides some helpful information about this metric.
help string
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
id uint64
// dimHash is a hash of the label names (preset and variable) and the
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
// err is an error that occured during construction. It is reported on
// registration time.
err error
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName and help must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Opts documentation for the implications of
// constant labels.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels,
}
if help == "" {
d.err = errors.New("empty help string")
return d
}
if !metricNameRE.MatchString(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
// labelValues contains the label values of const labels (in order of
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName)
return d
}
labelNames = append(labelNames, labelName)
labelNameSet[labelName] = struct{}{}
}
sort.Strings(labelNames)
// ... so that we can now add const label values in the order of their names.
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName)
return d
}
labelNames = append(labelNames, "$"+labelName)
labelNameSet[labelName] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names")
return d
}
vh := hashNew()
for _, val := range labelValues {
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
}
d.id = vh
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
for _, labelName := range labelNames {
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
}
d.dimHash = lh
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
Name: proto.String(n),
Value: proto.String(v),
})
}
sort.Sort(LabelPairSorter(d.constLabelPairs))
return d
}
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
// provided error set. If a collector returning such a descriptor is registered,
// registration will fail with the provided error. NewInvalidDesc can be used by
// a Collector to signal inability to describe itself.
func NewInvalidDesc(err error) *Desc {
return &Desc{
err: err,
}
}
func (d *Desc) String() string {
lpStrings := make([]string, 0, len(d.constLabelPairs))
for _, lp := range d.constLabelPairs {
lpStrings = append(
lpStrings,
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
)
}
func checkLabelName(l string) bool {
return labelNameRE.MatchString(l) &&
!strings.HasPrefix(l, reservedLabelPrefix)
}

View File

@ -0,0 +1,109 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides embeddable metric primitives for servers and
// standardized exposition of telemetry through a web services interface.
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
// To expose metrics registered with the Prometheus registry, an HTTP server
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
//
// http.Handle("/metrics", prometheus.Handler())
//
// As a starting point a very basic usage example:
//
// package main
//
// import (
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// )
//
// var (
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// })
// )
//
// func init() {
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.Inc()
//
// http.Handle("/metrics", prometheus.Handler())
// http.ListenAndServe(":8080", nil)
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter.
// It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.)
//
// Two more advanced metric types are the Summary and Histogram.
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
//
// Those are all the parts needed for basic usage. Detailed documentation and
// examples are provided below.
//
// Everything else this package offers is essentially for "power users" only. A
// few pointers to "power user features":
//
// All the various ...Opts structs have a ConstLabels field for labels that
// never change their value (which is only useful under special circumstances,
// see documentation of the Opts type).
//
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
// not to assume anything about its type.
//
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
//
// For custom metric collection, there are two entry points: Custom Metric
// implementations and custom Collector implementations. A Metric is the
// fundamental unit in the Prometheus data model: a sample at a point in time
// together with its meta-data (like its fully-qualified name and any number of
// pairs of label name and label value) that knows how to marshal itself into a
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
// gets registered with the Prometheus registry and manages the collection of
// one or more Metrics. Many parts of this package are building blocks for
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
// metrics under the hood, and by Collectors to describe the Metrics to be
// collected, but only to be dealt with by users if they implement their own
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
// in handy. Other useful components for Metric and Collector implementation
// include: LabelPairSorter to sort the DTO version of label pairs,
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
// collection time, MetricVec to bundle custom Metrics into a metric vector
// Collector, SelfCollector to make a custom Metric collect itself.
//
// A good example for a custom Collector is the ExpVarCollector included in this
// package, which exports variables exported via the "expvar" package as
// Prometheus metrics.
package prometheus

View File

@ -0,0 +1,119 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"encoding/json"
"expvar"
)
// ExpvarCollector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the ExpvarCollector is inherently
// slow. Thus, the ExpvarCollector is probably great for experiments and
// prototying, but you should seriously consider a more direct implementation of
// Prometheus metrics for monitoring production systems.
//
// Use NewExpvarCollector to create new instances.
type ExpvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
// to be registered with the Prometheus registry.
//
// The exports map has the following meaning:
//
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
// want to export as Prometheus metric, you need an entry in the exports
// map. The descriptor mapped to each key describes how to export the expvar
// value. It defines the name and the help string of the Prometheus metric
// proxying the expvar value. The type will always be Untyped.
//
// For descriptors without variable labels, the expvar value must be a number or
// a bool. The number is then directly exported as the Prometheus sample
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
// that are not numbers or bools are silently ignored.
//
// If the descriptor has one variable label, the expvar value must be an expvar
// map. The keys in the expvar map become the various values of the one
// Prometheus label. The values in the expvar map must be numbers or bools again
// as above.
//
// For descriptors with more than one variable label, the expvar must be a
// nested expvar map, i.e. where the values of the topmost map are maps again
// etc. until a depth is reached that corresponds to the number of labels. The
// leaves of that structure must be numbers or bools as above to serve as the
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
return &ExpvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)
if expVar == nil {
continue
}
var v interface{}
labels := make([]string, len(desc.variableLabels))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
}
var processValue func(v interface{}, i int)
processValue = func(v interface{}, i int) {
if i >= len(labels) {
copiedLabels := append(make([]string, 0, len(labels)), labels...)
switch v := v.(type) {
case float64:
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
case bool:
if v {
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
} else {
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
}
default:
return
}
ch <- m
return
}
vm, ok := v.(map[string]interface{})
if !ok {
return
}
for lv, val := range vm {
labels[i] = lv
processValue(val, i+1)
}
}
processValue(v, 0)
}
}

View File

@ -0,0 +1,29 @@
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View File

@ -0,0 +1,144 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
// A Gauge is typically used for measured values like temperatures or current
// memory usage, but also "counts" that can go up and down, like the number of
// running goroutines.
//
// To create Gauge instances, use NewGauge.
type Gauge interface {
Metric
Collector
// Set sets the Gauge to an arbitrary value.
Set(float64)
// Inc increments the Gauge by 1.
Inc()
// Dec decrements the Gauge by 1.
Dec()
// Add adds the given value to the Gauge. (The value can be
// negative, resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
}
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts.
func NewGauge(opts GaugeOpts) Gauge {
return newValue(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), GaugeValue, 0)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
// Desc, but have different values for their variable labels. This is used if
// you want to count the same thing partitioned by various dimensions
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names. At least one label name must be
// provided.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},
},
}
}
// GetMetricWithLabelValues replaces the method of the same name in
// MetricVec. The difference is that this method returns a Gauge and not a
// Metric so that no type conversion is required.
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
// difference is that this method returns a Gauge and not a Metric so that no
// type conversion is required.
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *GaugeVec) With(labels Labels) Gauge {
return m.MetricVec.With(labels).(Gauge)
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
// provided function.
//
// To create GaugeFunc instances, use NewGaugeFunc.
type GaugeFunc interface {
Metric
Collector
}
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
// concurrently. If that results in concurrent calls to Write, like in the case
// where a GaugeFunc is directly registered with Prometheus, the provided
// function must be concurrency-safe.
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), GaugeValue, function)
}

Some files were not shown because too many files have changed in this diff Show More