mirror of https://github.com/k3s-io/k3s
remove 1.4 staging; adding 1.5
parent
c06e5f60b6
commit
ec4fe281b6
|
@ -1,10 +0,0 @@
|
|||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
|
@ -1,10 +0,0 @@
|
|||
go test -test.v ...restful && \
|
||||
go test -test.v ...swagger && \
|
||||
go vet ...restful && \
|
||||
go fmt ...swagger && \
|
||||
go install ...swagger && \
|
||||
go fmt ...restful && \
|
||||
go install ...restful
|
||||
cd examples
|
||||
ls *.go | xargs -I {} go build -o /tmp/ignore {}
|
||||
cd ..
|
|
@ -1,7 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
|
@ -1,13 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.3
|
||||
- 1.2
|
||||
- tip
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
|
||||
script:
|
||||
- go test -cover
|
|
@ -1,2 +0,0 @@
|
|||
language: go
|
||||
install: go get -t
|
|
@ -1,17 +0,0 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- go install ./...
|
||||
|
||||
script:
|
||||
- verify/all.sh -v
|
||||
- go test ./...
|
|
@ -1,199 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# _needgen is a helper function to tell if we need to generate files for msgp, codecgen.
|
||||
_needgen() {
|
||||
local a="$1"
|
||||
zneedgen=0
|
||||
if [[ ! -e "$a" ]]
|
||||
then
|
||||
zneedgen=1
|
||||
echo 1
|
||||
return 0
|
||||
fi
|
||||
for i in `ls -1 *.go.tmpl gen.go values_test.go`
|
||||
do
|
||||
if [[ "$a" -ot "$i" ]]
|
||||
then
|
||||
zneedgen=1
|
||||
echo 1
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# _build generates fast-path.go and gen-helper.go.
|
||||
#
|
||||
# It is needed because there is some dependency between the generated code
|
||||
# and the other classes. Consequently, we have to totally remove the
|
||||
# generated files and put stubs in place, before calling "go run" again
|
||||
# to recreate them.
|
||||
_build() {
|
||||
if ! [[ "${zforce}" == "1" ||
|
||||
"1" == $( _needgen "fast-path.generated.go" ) ||
|
||||
"1" == $( _needgen "gen-helper.generated.go" ) ||
|
||||
"1" == $( _needgen "gen.generated.go" ) ||
|
||||
1 == 0 ]]
|
||||
then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# echo "Running prebuild"
|
||||
if [ "${zbak}" == "1" ]
|
||||
then
|
||||
# echo "Backing up old generated files"
|
||||
_zts=`date '+%m%d%Y_%H%M%S'`
|
||||
_gg=".generated.go"
|
||||
[ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
|
||||
[ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
|
||||
# [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
|
||||
# [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
|
||||
else
|
||||
rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \
|
||||
*safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
|
||||
fi
|
||||
|
||||
cat > gen.generated.go <<EOF
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
|
||||
|
||||
const genDecMapTmpl = \`
|
||||
EOF
|
||||
|
||||
cat >> gen.generated.go < gen-dec-map.go.tmpl
|
||||
|
||||
cat >> gen.generated.go <<EOF
|
||||
\`
|
||||
|
||||
const genDecListTmpl = \`
|
||||
EOF
|
||||
|
||||
cat >> gen.generated.go < gen-dec-array.go.tmpl
|
||||
|
||||
cat >> gen.generated.go <<EOF
|
||||
\`
|
||||
|
||||
EOF
|
||||
|
||||
cat > gen-from-tmpl.codec.generated.go <<EOF
|
||||
package codec
|
||||
import "io"
|
||||
func GenInternalGoFile(r io.Reader, w io.Writer, safe bool) error {
|
||||
return genInternalGoFile(r, w, safe)
|
||||
}
|
||||
EOF
|
||||
|
||||
cat > gen-from-tmpl.generated.go <<EOF
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
//import "flag"
|
||||
import "ugorji.net/codec"
|
||||
import "os"
|
||||
|
||||
func run(fnameIn, fnameOut string, safe bool) {
|
||||
fin, err := os.Open(fnameIn)
|
||||
if err != nil { panic(err) }
|
||||
defer fin.Close()
|
||||
fout, err := os.Create(fnameOut)
|
||||
if err != nil { panic(err) }
|
||||
defer fout.Close()
|
||||
err = codec.GenInternalGoFile(fin, fout, safe)
|
||||
if err != nil { panic(err) }
|
||||
}
|
||||
|
||||
func main() {
|
||||
// do not make safe/unsafe variants.
|
||||
// Instead, depend on escape analysis, and place string creation and usage appropriately.
|
||||
// run("unsafe.go.tmpl", "safe.generated.go", true)
|
||||
// run("unsafe.go.tmpl", "unsafe.generated.go", false)
|
||||
run("fast-path.go.tmpl", "fast-path.generated.go", false)
|
||||
run("gen-helper.go.tmpl", "gen-helper.generated.go", false)
|
||||
}
|
||||
|
||||
EOF
|
||||
go run -tags=notfastpath gen-from-tmpl.generated.go && \
|
||||
rm -f gen-from-tmpl.*generated.go
|
||||
}
|
||||
|
||||
_codegenerators() {
|
||||
if [[ $zforce == "1" ||
|
||||
"1" == $( _needgen "values_codecgen${zsfx}" ) ||
|
||||
"1" == $( _needgen "values_msgp${zsfx}" ) ||
|
||||
"1" == $( _needgen "values_ffjson${zsfx}" ) ||
|
||||
1 == 0 ]]
|
||||
then
|
||||
# codecgen creates some temporary files in the directory (main, pkg).
|
||||
# Consequently, we should start msgp and ffjson first, and also put a small time latency before
|
||||
# starting codecgen.
|
||||
# Without this, ffjson chokes on one of the temporary files from codecgen.
|
||||
if [[ $zexternal == "1" ]]
|
||||
then
|
||||
echo "ffjson ... " && \
|
||||
ffjson -w values_ffjson${zsfx} $zfin &
|
||||
zzzIdFF=$!
|
||||
echo "msgp ... " && \
|
||||
msgp -tests=false -o=values_msgp${zsfx} -file=$zfin &
|
||||
zzzIdMsgp=$!
|
||||
|
||||
sleep 1 # give ffjson and msgp some buffer time. see note above.
|
||||
fi
|
||||
|
||||
echo "codecgen - !unsafe ... " && \
|
||||
codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} -d 19780 $zfin &
|
||||
zzzIdC=$!
|
||||
echo "codecgen - unsafe ... " && \
|
||||
codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} -d 19781 $zfin &
|
||||
zzzIdCU=$!
|
||||
wait $zzzIdC $zzzIdCU $zzzIdMsgp $zzzIdFF && \
|
||||
# remove (M|Unm)arshalJSON implementations, so they don't conflict with encoding/json bench \
|
||||
if [[ $zexternal == "1" ]]
|
||||
then
|
||||
sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
|
||||
sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx}
|
||||
fi && \
|
||||
echo "generators done!" && \
|
||||
true
|
||||
fi
|
||||
}
|
||||
|
||||
# _init reads the arguments and sets up the flags
|
||||
_init() {
|
||||
OPTIND=1
|
||||
while getopts "fbx" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xf') zforce=1;;
|
||||
'xb') zbak=1;;
|
||||
'xx') zexternal=1;;
|
||||
*) echo "prebuild.sh accepts [-fb] only"; return 1;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
OPTIND=1
|
||||
}
|
||||
|
||||
# main script.
|
||||
# First ensure that this is being run from the basedir (i.e. dirname of script is .)
|
||||
if [ "." = `dirname $0` ]
|
||||
then
|
||||
zmydir=`pwd`
|
||||
zfin="test_values.generated.go"
|
||||
zsfx="_generated_test.go"
|
||||
# rm -f *_generated_test.go
|
||||
rm -f codecgen-*.go && \
|
||||
_init "$@" && \
|
||||
_build && \
|
||||
cp $zmydir/values_test.go $zmydir/$zfin && \
|
||||
_codegenerators && \
|
||||
echo prebuild done successfully
|
||||
rm -f $zmydir/$zfin
|
||||
else
|
||||
echo "Script must be run from the directory it resides in"
|
||||
fi
|
||||
|
|
@ -1,639 +0,0 @@
|
|||
[
|
||||
{
|
||||
"cbor": "AA==",
|
||||
"hex": "00",
|
||||
"roundtrip": true,
|
||||
"decoded": 0
|
||||
},
|
||||
{
|
||||
"cbor": "AQ==",
|
||||
"hex": "01",
|
||||
"roundtrip": true,
|
||||
"decoded": 1
|
||||
},
|
||||
{
|
||||
"cbor": "Cg==",
|
||||
"hex": "0a",
|
||||
"roundtrip": true,
|
||||
"decoded": 10
|
||||
},
|
||||
{
|
||||
"cbor": "Fw==",
|
||||
"hex": "17",
|
||||
"roundtrip": true,
|
||||
"decoded": 23
|
||||
},
|
||||
{
|
||||
"cbor": "GBg=",
|
||||
"hex": "1818",
|
||||
"roundtrip": true,
|
||||
"decoded": 24
|
||||
},
|
||||
{
|
||||
"cbor": "GBk=",
|
||||
"hex": "1819",
|
||||
"roundtrip": true,
|
||||
"decoded": 25
|
||||
},
|
||||
{
|
||||
"cbor": "GGQ=",
|
||||
"hex": "1864",
|
||||
"roundtrip": true,
|
||||
"decoded": 100
|
||||
},
|
||||
{
|
||||
"cbor": "GQPo",
|
||||
"hex": "1903e8",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000
|
||||
},
|
||||
{
|
||||
"cbor": "GgAPQkA=",
|
||||
"hex": "1a000f4240",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000000
|
||||
},
|
||||
{
|
||||
"cbor": "GwAAAOjUpRAA",
|
||||
"hex": "1b000000e8d4a51000",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000000000000
|
||||
},
|
||||
{
|
||||
"cbor": "G///////////",
|
||||
"hex": "1bffffffffffffffff",
|
||||
"roundtrip": true,
|
||||
"decoded": 18446744073709551615
|
||||
},
|
||||
{
|
||||
"cbor": "wkkBAAAAAAAAAAA=",
|
||||
"hex": "c249010000000000000000",
|
||||
"roundtrip": true,
|
||||
"decoded": 18446744073709551616
|
||||
},
|
||||
{
|
||||
"cbor": "O///////////",
|
||||
"hex": "3bffffffffffffffff",
|
||||
"roundtrip": true,
|
||||
"decoded": -18446744073709551616,
|
||||
"skip": true
|
||||
},
|
||||
{
|
||||
"cbor": "w0kBAAAAAAAAAAA=",
|
||||
"hex": "c349010000000000000000",
|
||||
"roundtrip": true,
|
||||
"decoded": -18446744073709551617
|
||||
},
|
||||
{
|
||||
"cbor": "IA==",
|
||||
"hex": "20",
|
||||
"roundtrip": true,
|
||||
"decoded": -1
|
||||
},
|
||||
{
|
||||
"cbor": "KQ==",
|
||||
"hex": "29",
|
||||
"roundtrip": true,
|
||||
"decoded": -10
|
||||
},
|
||||
{
|
||||
"cbor": "OGM=",
|
||||
"hex": "3863",
|
||||
"roundtrip": true,
|
||||
"decoded": -100
|
||||
},
|
||||
{
|
||||
"cbor": "OQPn",
|
||||
"hex": "3903e7",
|
||||
"roundtrip": true,
|
||||
"decoded": -1000
|
||||
},
|
||||
{
|
||||
"cbor": "+QAA",
|
||||
"hex": "f90000",
|
||||
"roundtrip": true,
|
||||
"decoded": 0.0
|
||||
},
|
||||
{
|
||||
"cbor": "+YAA",
|
||||
"hex": "f98000",
|
||||
"roundtrip": true,
|
||||
"decoded": -0.0
|
||||
},
|
||||
{
|
||||
"cbor": "+TwA",
|
||||
"hex": "f93c00",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.0
|
||||
},
|
||||
{
|
||||
"cbor": "+z/xmZmZmZma",
|
||||
"hex": "fb3ff199999999999a",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.1
|
||||
},
|
||||
{
|
||||
"cbor": "+T4A",
|
||||
"hex": "f93e00",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.5
|
||||
},
|
||||
{
|
||||
"cbor": "+Xv/",
|
||||
"hex": "f97bff",
|
||||
"roundtrip": true,
|
||||
"decoded": 65504.0
|
||||
},
|
||||
{
|
||||
"cbor": "+kfDUAA=",
|
||||
"hex": "fa47c35000",
|
||||
"roundtrip": true,
|
||||
"decoded": 100000.0
|
||||
},
|
||||
{
|
||||
"cbor": "+n9///8=",
|
||||
"hex": "fa7f7fffff",
|
||||
"roundtrip": true,
|
||||
"decoded": 3.4028234663852886e+38
|
||||
},
|
||||
{
|
||||
"cbor": "+3435DyIAHWc",
|
||||
"hex": "fb7e37e43c8800759c",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.0e+300
|
||||
},
|
||||
{
|
||||
"cbor": "+QAB",
|
||||
"hex": "f90001",
|
||||
"roundtrip": true,
|
||||
"decoded": 5.960464477539063e-08
|
||||
},
|
||||
{
|
||||
"cbor": "+QQA",
|
||||
"hex": "f90400",
|
||||
"roundtrip": true,
|
||||
"decoded": 6.103515625e-05
|
||||
},
|
||||
{
|
||||
"cbor": "+cQA",
|
||||
"hex": "f9c400",
|
||||
"roundtrip": true,
|
||||
"decoded": -4.0
|
||||
},
|
||||
{
|
||||
"cbor": "+8AQZmZmZmZm",
|
||||
"hex": "fbc010666666666666",
|
||||
"roundtrip": true,
|
||||
"decoded": -4.1
|
||||
},
|
||||
{
|
||||
"cbor": "+XwA",
|
||||
"hex": "f97c00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+X4A",
|
||||
"hex": "f97e00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+fwA",
|
||||
"hex": "f9fc00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+n+AAAA=",
|
||||
"hex": "fa7f800000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+n/AAAA=",
|
||||
"hex": "fa7fc00000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+v+AAAA=",
|
||||
"hex": "faff800000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+3/wAAAAAAAA",
|
||||
"hex": "fb7ff0000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+3/4AAAAAAAA",
|
||||
"hex": "fb7ff8000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+//wAAAAAAAA",
|
||||
"hex": "fbfff0000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "9A==",
|
||||
"hex": "f4",
|
||||
"roundtrip": true,
|
||||
"decoded": false
|
||||
},
|
||||
{
|
||||
"cbor": "9Q==",
|
||||
"hex": "f5",
|
||||
"roundtrip": true,
|
||||
"decoded": true
|
||||
},
|
||||
{
|
||||
"cbor": "9g==",
|
||||
"hex": "f6",
|
||||
"roundtrip": true,
|
||||
"decoded": null
|
||||
},
|
||||
{
|
||||
"cbor": "9w==",
|
||||
"hex": "f7",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "undefined"
|
||||
},
|
||||
{
|
||||
"cbor": "8A==",
|
||||
"hex": "f0",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(16)"
|
||||
},
|
||||
{
|
||||
"cbor": "+Bg=",
|
||||
"hex": "f818",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(24)"
|
||||
},
|
||||
{
|
||||
"cbor": "+P8=",
|
||||
"hex": "f8ff",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(255)"
|
||||
},
|
||||
{
|
||||
"cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
|
||||
"hex": "c074323031332d30332d32315432303a30343a30305a",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "0(\"2013-03-21T20:04:00Z\")"
|
||||
},
|
||||
{
|
||||
"cbor": "wRpRS2ew",
|
||||
"hex": "c11a514b67b0",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "1(1363896240)"
|
||||
},
|
||||
{
|
||||
"cbor": "wftB1FLZ7CAAAA==",
|
||||
"hex": "c1fb41d452d9ec200000",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "1(1363896240.5)"
|
||||
},
|
||||
{
|
||||
"cbor": "10QBAgME",
|
||||
"hex": "d74401020304",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "23(h'01020304')"
|
||||
},
|
||||
{
|
||||
"cbor": "2BhFZElFVEY=",
|
||||
"hex": "d818456449455446",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "24(h'6449455446')"
|
||||
},
|
||||
{
|
||||
"cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
|
||||
"hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "32(\"http://www.example.com\")"
|
||||
},
|
||||
{
|
||||
"cbor": "QA==",
|
||||
"hex": "40",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "h''"
|
||||
},
|
||||
{
|
||||
"cbor": "RAECAwQ=",
|
||||
"hex": "4401020304",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "h'01020304'"
|
||||
},
|
||||
{
|
||||
"cbor": "YA==",
|
||||
"hex": "60",
|
||||
"roundtrip": true,
|
||||
"decoded": ""
|
||||
},
|
||||
{
|
||||
"cbor": "YWE=",
|
||||
"hex": "6161",
|
||||
"roundtrip": true,
|
||||
"decoded": "a"
|
||||
},
|
||||
{
|
||||
"cbor": "ZElFVEY=",
|
||||
"hex": "6449455446",
|
||||
"roundtrip": true,
|
||||
"decoded": "IETF"
|
||||
},
|
||||
{
|
||||
"cbor": "YiJc",
|
||||
"hex": "62225c",
|
||||
"roundtrip": true,
|
||||
"decoded": "\"\\"
|
||||
},
|
||||
{
|
||||
"cbor": "YsO8",
|
||||
"hex": "62c3bc",
|
||||
"roundtrip": true,
|
||||
"decoded": "ü"
|
||||
},
|
||||
{
|
||||
"cbor": "Y+awtA==",
|
||||
"hex": "63e6b0b4",
|
||||
"roundtrip": true,
|
||||
"decoded": "水"
|
||||
},
|
||||
{
|
||||
"cbor": "ZPCQhZE=",
|
||||
"hex": "64f0908591",
|
||||
"roundtrip": true,
|
||||
"decoded": "𐅑"
|
||||
},
|
||||
{
|
||||
"cbor": "gA==",
|
||||
"hex": "80",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwECAw==",
|
||||
"hex": "83010203",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGCAgOCBAU=",
|
||||
"hex": "8301820203820405",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
|
||||
"hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "oA==",
|
||||
"hex": "a0",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "ogECAwQ=",
|
||||
"hex": "a201020304",
|
||||
"roundtrip": true,
|
||||
"skip": true,
|
||||
"diagnostic": "{1: 2, 3: 4}"
|
||||
},
|
||||
{
|
||||
"cbor": "omFhAWFiggID",
|
||||
"hex": "a26161016162820203",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
"a": 1,
|
||||
"b": [
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "gmFhoWFiYWM=",
|
||||
"hex": "826161a161626163",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
"a",
|
||||
{
|
||||
"b": "c"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
|
||||
"hex": "a56161614161626142616361436164614461656145",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
"a": "A",
|
||||
"b": "B",
|
||||
"c": "C",
|
||||
"d": "D",
|
||||
"e": "E"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "X0IBAkMDBAX/",
|
||||
"hex": "5f42010243030405ff",
|
||||
"roundtrip": false,
|
||||
"skip": true,
|
||||
"diagnostic": "(_ h'0102', h'030405')"
|
||||
},
|
||||
{
|
||||
"cbor": "f2VzdHJlYWRtaW5n/w==",
|
||||
"hex": "7f657374726561646d696e67ff",
|
||||
"roundtrip": false,
|
||||
"decoded": "streaming"
|
||||
},
|
||||
{
|
||||
"cbor": "n/8=",
|
||||
"hex": "9fff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwGCAgOfBAX//w==",
|
||||
"hex": "9f018202039f0405ffff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwGCAgOCBAX/",
|
||||
"hex": "9f01820203820405ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGCAgOfBAX/",
|
||||
"hex": "83018202039f0405ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGfAgP/ggQF",
|
||||
"hex": "83019f0203ff820405",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
|
||||
"hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "v2FhAWFinwID//8=",
|
||||
"hex": "bf61610161629f0203ffff",
|
||||
"roundtrip": false,
|
||||
"decoded": {
|
||||
"a": 1,
|
||||
"b": [
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "gmFhv2FiYWP/",
|
||||
"hex": "826161bf61626163ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
"a",
|
||||
{
|
||||
"b": "c"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "v2NGdW71Y0FtdCH/",
|
||||
"hex": "bf6346756ef563416d7421ff",
|
||||
"roundtrip": false,
|
||||
"decoded": {
|
||||
"Fun": true,
|
||||
"Amt": -2
|
||||
}
|
||||
}
|
||||
]
|
|
@ -1,74 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run all the different permutations of all the tests.
|
||||
# This helps ensure that nothing gets broken.
|
||||
|
||||
_run() {
|
||||
# 1. VARIATIONS: regular (t), canonical (c), IO R/W (i),
|
||||
# binc-nosymbols (n), struct2array (s), intern string (e),
|
||||
# 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f)
|
||||
# 3. OPTIONS: verbose (v), reset (z), must (m),
|
||||
#
|
||||
# Use combinations of mode to get exactly what you want,
|
||||
# and then pass the variations you need.
|
||||
|
||||
ztags=""
|
||||
zargs=""
|
||||
local OPTIND
|
||||
OPTIND=1
|
||||
while getopts "xurtcinsvgzmef" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xr') ;;
|
||||
'xf') ztags="$ztags notfastpath" ;;
|
||||
'xg') ztags="$ztags codecgen" ;;
|
||||
'xx') ztags="$ztags x" ;;
|
||||
'xu') ztags="$ztags unsafe" ;;
|
||||
'xv') zargs="$zargs -tv" ;;
|
||||
'xz') zargs="$zargs -tr" ;;
|
||||
'xm') zargs="$zargs -tm" ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
# shift $((OPTIND-1))
|
||||
printf '............. TAGS: %s .............\n' "$ztags"
|
||||
# echo ">>>>>>> TAGS: $ztags"
|
||||
|
||||
OPTIND=1
|
||||
while getopts "xurtcinsvgzmef" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
|
||||
'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;;
|
||||
'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;;
|
||||
'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;;
|
||||
'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;;
|
||||
'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
OPTIND=1
|
||||
}
|
||||
|
||||
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
|
||||
if [[ "x$@" = "x" ]]; then
|
||||
# All: r, x, g, gu
|
||||
_run "-rtcinsm" # regular
|
||||
_run "-rtcinsmz" # regular with reset
|
||||
_run "-rtcinsmf" # regular with no fastpath (notfastpath)
|
||||
_run "-xtcinsm" # external
|
||||
_run "-gxtcinsm" # codecgen: requires external
|
||||
_run "-gxutcinsm" # codecgen + unsafe
|
||||
elif [[ "x$@" = "x-Z" ]]; then
|
||||
# Regular
|
||||
_run "-rtcinsm" # regular
|
||||
_run "-rtcinsmz" # regular with reset
|
||||
elif [[ "x$@" = "x-F" ]]; then
|
||||
# regular with notfastpath
|
||||
_run "-rtcinsmf" # regular
|
||||
_run "-rtcinsmzf" # regular with reset
|
||||
else
|
||||
_run "$@"
|
||||
fi
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1]
|
||||
|
||||
// Package fake has the automatically generated clients.
|
||||
package fake
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
api "k8s.io/client-go/1.4/pkg/api"
|
||||
unversioned "k8s.io/client-go/1.4/pkg/api/unversioned"
|
||||
v1beta1 "k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1"
|
||||
labels "k8s.io/client-go/1.4/pkg/labels"
|
||||
watch "k8s.io/client-go/1.4/pkg/watch"
|
||||
testing "k8s.io/client-go/1.4/testing"
|
||||
)
|
||||
|
||||
// FakeStorageClasses implements StorageClassInterface
|
||||
type FakeStorageClasses struct {
|
||||
Fake *FakeExtensions
|
||||
}
|
||||
|
||||
var storageclassesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "storageclasses"}
|
||||
|
||||
func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.StorageClass), err
|
||||
}
|
||||
|
||||
func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.StorageClass), err
|
||||
}
|
||||
|
||||
func (c *FakeStorageClasses) Delete(name string, options *api.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeStorageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *FakeStorageClasses) Get(name string) (result *v1beta1.StorageClass, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.StorageClass), err
|
||||
}
|
||||
|
||||
func (c *FakeStorageClasses) List(opts api.ListOptions) (result *v1beta1.StorageClassList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(storageclassesResource, opts), &v1beta1.StorageClassList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label := opts.LabelSelector
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1beta1.StorageClassList{}
|
||||
for _, item := range obj.(*v1beta1.StorageClassList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested storageClasses.
|
||||
func (c *FakeStorageClasses) Watch(opts api.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts))
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched storageClass.
|
||||
func (c *FakeStorageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.StorageClass), err
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
api "k8s.io/client-go/1.4/pkg/api"
|
||||
v1beta1 "k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1"
|
||||
watch "k8s.io/client-go/1.4/pkg/watch"
|
||||
)
|
||||
|
||||
// StorageClassesGetter has a method to return a StorageClassInterface.
|
||||
// A group's client should implement this interface.
|
||||
type StorageClassesGetter interface {
|
||||
StorageClasses() StorageClassInterface
|
||||
}
|
||||
|
||||
// StorageClassInterface has methods to work with StorageClass resources.
|
||||
type StorageClassInterface interface {
|
||||
Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
|
||||
Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
|
||||
Delete(name string, options *api.DeleteOptions) error
|
||||
DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
|
||||
Get(name string) (*v1beta1.StorageClass, error)
|
||||
List(opts api.ListOptions) (*v1beta1.StorageClassList, error)
|
||||
Watch(opts api.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error)
|
||||
StorageClassExpansion
|
||||
}
|
||||
|
||||
// storageClasses implements StorageClassInterface
|
||||
type storageClasses struct {
|
||||
client *ExtensionsClient
|
||||
}
|
||||
|
||||
// newStorageClasses returns a StorageClasses
|
||||
func newStorageClasses(c *ExtensionsClient) *storageClasses {
|
||||
return &storageClasses{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any.
|
||||
func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
|
||||
result = &v1beta1.StorageClass{}
|
||||
err = c.client.Post().
|
||||
Resource("storageclasses").
|
||||
Body(storageClass).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
|
||||
func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
|
||||
result = &v1beta1.StorageClass{}
|
||||
err = c.client.Put().
|
||||
Resource("storageclasses").
|
||||
Name(storageClass.Name).
|
||||
Body(storageClass).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
|
||||
func (c *storageClasses) Delete(name string, options *api.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Resource("storageclasses").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *storageClasses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Resource("storageclasses").
|
||||
VersionedParams(&listOptions, api.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
|
||||
func (c *storageClasses) Get(name string) (result *v1beta1.StorageClass, err error) {
|
||||
result = &v1beta1.StorageClass{}
|
||||
err = c.client.Get().
|
||||
Resource("storageclasses").
|
||||
Name(name).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
|
||||
func (c *storageClasses) List(opts api.ListOptions) (result *v1beta1.StorageClassList, err error) {
|
||||
result = &v1beta1.StorageClassList{}
|
||||
err = c.client.Get().
|
||||
Resource("storageclasses").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested storageClasses.
|
||||
func (c *storageClasses) Watch(opts api.ListOptions) (watch.Interface, error) {
|
||||
return c.client.Get().
|
||||
Prefix("watch").
|
||||
Resource("storageclasses").
|
||||
VersionedParams(&opts, api.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched storageClass.
|
||||
func (c *storageClasses) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) {
|
||||
result = &v1beta1.StorageClass{}
|
||||
err = c.client.Patch(pt).
|
||||
Resource("storageclasses").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1]
|
||||
|
||||
// Package fake has the automatically generated clients.
|
||||
package fake
|
|
@ -1,238 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"sort"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
"k8s.io/client-go/1.4/pkg/types"
|
||||
hashutil "k8s.io/client-go/1.4/pkg/util/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: to be deleted after v1.3 is released
|
||||
// Its value is the json representation of map[string(IP)][HostRecord]
|
||||
// example: '{"10.245.1.6":{"HostName":"my-webserver"}}'
|
||||
PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map"
|
||||
)
|
||||
|
||||
// TODO: to be deleted after v1.3 is released
|
||||
type HostRecord struct {
|
||||
HostName string
|
||||
}
|
||||
|
||||
// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full
|
||||
// representation, and then repacks that into the canonical layout. This
|
||||
// ensures that code which operates on these objects can rely on the common
|
||||
// form for things like comparison. The result is a newly allocated slice.
|
||||
func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
||||
// First map each unique port definition to the sets of hosts that
|
||||
// offer it.
|
||||
allAddrs := map[addressKey]*api.EndpointAddress{}
|
||||
portToAddrReadyMap := map[api.EndpointPort]addressSet{}
|
||||
for i := range subsets {
|
||||
for _, port := range subsets[i].Ports {
|
||||
for k := range subsets[i].Addresses {
|
||||
mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
for k := range subsets[i].NotReadyAddresses {
|
||||
mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, map the sets of hosts to the sets of ports they offer.
|
||||
// Go does not allow maps or slices as keys to maps, so we have
|
||||
// to synthesize an artificial key and do a sort of 2-part
|
||||
// associative entity.
|
||||
type keyString string
|
||||
keyToAddrReadyMap := map[keyString]addressSet{}
|
||||
addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{}
|
||||
for port, addrs := range portToAddrReadyMap {
|
||||
key := keyString(hashAddresses(addrs))
|
||||
keyToAddrReadyMap[key] = addrs
|
||||
addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port)
|
||||
}
|
||||
|
||||
// Next, build the N-to-M association the API wants.
|
||||
final := []api.EndpointSubset{}
|
||||
for key, ports := range addrReadyMapKeyToPorts {
|
||||
var readyAddrs, notReadyAddrs []api.EndpointAddress
|
||||
for addr, ready := range keyToAddrReadyMap[key] {
|
||||
if ready {
|
||||
readyAddrs = append(readyAddrs, *addr)
|
||||
} else {
|
||||
notReadyAddrs = append(notReadyAddrs, *addr)
|
||||
}
|
||||
}
|
||||
final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports})
|
||||
}
|
||||
|
||||
// Finally, sort it.
|
||||
return SortSubsets(final)
|
||||
}
|
||||
|
||||
// The sets of hosts must be de-duped, using IP+UID as the key.
|
||||
type addressKey struct {
|
||||
ip string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving
|
||||
// any existing ready state.
|
||||
func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress {
|
||||
// use addressKey to distinguish between two endpoints that are identical addresses
|
||||
// but may have come from different hosts, for attribution. For instance, Mesos
|
||||
// assigns pods the node IP, but the pods are distinct.
|
||||
key := addressKey{ip: addr.IP}
|
||||
if addr.TargetRef != nil {
|
||||
key.uid = addr.TargetRef.UID
|
||||
}
|
||||
|
||||
// Accumulate the address. The full EndpointAddress structure is preserved for use when
|
||||
// we rebuild the subsets so that the final TargetRef has all of the necessary data.
|
||||
existingAddress := allAddrs[key]
|
||||
if existingAddress == nil {
|
||||
// Make a copy so we don't write to the
|
||||
// input args of this function.
|
||||
existingAddress = &api.EndpointAddress{}
|
||||
*existingAddress = *addr
|
||||
allAddrs[key] = existingAddress
|
||||
}
|
||||
|
||||
// Remember that this port maps to this address.
|
||||
if _, found := portToAddrReadyMap[port]; !found {
|
||||
portToAddrReadyMap[port] = addressSet{}
|
||||
}
|
||||
// if we have not yet recorded this port for this address, or if the previous
|
||||
// state was ready, write the current ready state. not ready always trumps
|
||||
// ready.
|
||||
if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady {
|
||||
portToAddrReadyMap[port][existingAddress] = ready
|
||||
}
|
||||
return existingAddress
|
||||
}
|
||||
|
||||
type addressSet map[*api.EndpointAddress]bool
|
||||
|
||||
type addrReady struct {
|
||||
addr *api.EndpointAddress
|
||||
ready bool
|
||||
}
|
||||
|
||||
func hashAddresses(addrs addressSet) string {
|
||||
// Flatten the list of addresses into a string so it can be used as a
|
||||
// map key. Unfortunately, DeepHashObject is implemented in terms of
|
||||
// spew, and spew does not handle non-primitive map keys well. So
|
||||
// first we collapse it into a slice, sort the slice, then hash that.
|
||||
slice := make([]addrReady, 0, len(addrs))
|
||||
for k, ready := range addrs {
|
||||
slice = append(slice, addrReady{k, ready})
|
||||
}
|
||||
sort.Sort(addrsReady(slice))
|
||||
hasher := md5.New()
|
||||
hashutil.DeepHashObject(hasher, slice)
|
||||
return hex.EncodeToString(hasher.Sum(nil)[0:])
|
||||
}
|
||||
|
||||
func lessAddrReady(a, b addrReady) bool {
|
||||
// ready is not significant to hashing since we can't have duplicate addresses
|
||||
return LessEndpointAddress(a.addr, b.addr)
|
||||
}
|
||||
|
||||
type addrsReady []addrReady
|
||||
|
||||
func (sl addrsReady) Len() int { return len(sl) }
|
||||
func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsReady) Less(i, j int) bool {
|
||||
return lessAddrReady(sl[i], sl[j])
|
||||
}
|
||||
|
||||
func LessEndpointAddress(a, b *api.EndpointAddress) bool {
|
||||
ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
|
||||
if ipComparison != 0 {
|
||||
return ipComparison < 0
|
||||
}
|
||||
if b.TargetRef == nil {
|
||||
return false
|
||||
}
|
||||
if a.TargetRef == nil {
|
||||
return true
|
||||
}
|
||||
return a.TargetRef.UID < b.TargetRef.UID
|
||||
}
|
||||
|
||||
type addrPtrsByIpAndUID []*api.EndpointAddress
|
||||
|
||||
func (sl addrPtrsByIpAndUID) Len() int { return len(sl) }
|
||||
func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrPtrsByIpAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(sl[i], sl[j])
|
||||
}
|
||||
|
||||
// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
|
||||
// use it returns the input slice.
|
||||
func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
||||
for i := range subsets {
|
||||
ss := &subsets[i]
|
||||
sort.Sort(addrsByIpAndUID(ss.Addresses))
|
||||
sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses))
|
||||
sort.Sort(portsByHash(ss.Ports))
|
||||
}
|
||||
sort.Sort(subsetsByHash(subsets))
|
||||
return subsets
|
||||
}
|
||||
|
||||
func hashObject(hasher hash.Hash, obj interface{}) []byte {
|
||||
hashutil.DeepHashObject(hasher, obj)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
type subsetsByHash []api.EndpointSubset
|
||||
|
||||
func (sl subsetsByHash) Len() int { return len(sl) }
|
||||
func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl subsetsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
||||
|
||||
type addrsByIpAndUID []api.EndpointAddress
|
||||
|
||||
func (sl addrsByIpAndUID) Len() int { return len(sl) }
|
||||
func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsByIpAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(&sl[i], &sl[j])
|
||||
}
|
||||
|
||||
type portsByHash []api.EndpointPort
|
||||
|
||||
func (sl portsByHash) Len() int { return len(sl) }
|
||||
func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl portsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
{
|
||||
"kind": "Node",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "e2e-test-wojtekt-minion-etd6",
|
||||
"selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6",
|
||||
"uid": "a7e89222-e8e5-11e4-8fde-42010af09327",
|
||||
"resourceVersion": "379",
|
||||
"creationTimestamp": "2015-04-22T11:49:39Z"
|
||||
},
|
||||
"spec": {
|
||||
"externalID": "15488322946290398375"
|
||||
},
|
||||
"status": {
|
||||
"capacity": {
|
||||
"cpu": "1",
|
||||
"memory": "1745152Ki"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"type": "Ready",
|
||||
"status": "True",
|
||||
"lastHeartbeatTime": "2015-04-22T11:58:17Z",
|
||||
"lastTransitionTime": "2015-04-22T11:49:52Z",
|
||||
"reason": "kubelet is posting ready status"
|
||||
}
|
||||
],
|
||||
"addresses": [
|
||||
{
|
||||
"type": "ExternalIP",
|
||||
"address": "104.197.49.213"
|
||||
},
|
||||
{
|
||||
"type": "LegacyHostIP",
|
||||
"address": "104.197.20.11"
|
||||
}
|
||||
],
|
||||
"nodeInfo": {
|
||||
"machineID": "",
|
||||
"systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577",
|
||||
"bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95",
|
||||
"kernelVersion": "3.16.0-0.bpo.4-amd64",
|
||||
"osImage": "Debian GNU/Linux 7 (wheezy)",
|
||||
"containerRuntimeVersion": "docker://1.5.0",
|
||||
"kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty",
|
||||
"kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
"k8s.io/client-go/1.4/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field.
|
||||
// The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1'
|
||||
PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname"
|
||||
|
||||
// TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field.
|
||||
// The annotation value is a string specifying the subdomain e.g. "my-web-service"
|
||||
// If specified, on the pod itself, "<hostname>.my-web-service.<namespace>.svc.<cluster domain>" would resolve to
|
||||
// the pod's IP.
|
||||
// If there is a headless service named "my-web-service" in the same namespace as the pod, then,
|
||||
// <hostname>.my-web-service.<namespace>.svc.<cluster domain>" would be resolved by the cluster DNS Server.
|
||||
PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain"
|
||||
)
|
||||
|
||||
// FindPort locates the container port for the given pod and portName. If the
|
||||
// targetPort is a number, use that. If the targetPort is a string, look that
|
||||
// string up in all named ports in all containers in the target pod. If no
|
||||
// match is found, fail.
|
||||
func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) {
|
||||
portName := svcPort.TargetPort
|
||||
switch portName.Type {
|
||||
case intstr.String:
|
||||
name := portName.StrVal
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case intstr.Int:
|
||||
return portName.IntValue(), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
{
|
||||
"kind": "ReplicationController",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "elasticsearch-logging-controller",
|
||||
"namespace": "default",
|
||||
"selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller",
|
||||
"uid": "aa76f162-e8e5-11e4-8fde-42010af09327",
|
||||
"resourceVersion": "98",
|
||||
"creationTimestamp": "2015-04-22T11:49:43Z",
|
||||
"labels": {
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"name": "elasticsearch-logging"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"selector": {
|
||||
"name": "elasticsearch-logging"
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"name": "elasticsearch-logging"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"volumes": [
|
||||
{
|
||||
"name": "es-persistent-storage",
|
||||
"hostPath": null,
|
||||
"emptyDir": {
|
||||
"medium": ""
|
||||
},
|
||||
"gcePersistentDisk": null,
|
||||
"awsElasticBlockStore": null,
|
||||
"gitRepo": null,
|
||||
"secret": null,
|
||||
"nfs": null,
|
||||
"iscsi": null,
|
||||
"glusterfs": null,
|
||||
"quobyte": null
|
||||
}
|
||||
],
|
||||
"containers": [
|
||||
{
|
||||
"name": "elasticsearch-logging",
|
||||
"image": "gcr.io/google_containers/elasticsearch:1.0",
|
||||
"ports": [
|
||||
{
|
||||
"name": "db",
|
||||
"containerPort": 9200,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "transport",
|
||||
"containerPort": 9300,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {},
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "es-persistent-storage",
|
||||
"mountPath": "/data"
|
||||
}
|
||||
],
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
"capabilities": {}
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Always",
|
||||
"dnsPolicy": "ClusterFirst"
|
||||
}
|
||||
}
|
||||
},
|
||||
"status": {
|
||||
"replicas": 1
|
||||
}
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
|
||||
//
|
||||
// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
|
||||
// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
|
||||
// access only from the CIDRs currently allocated to MIT & the USPS.
|
||||
//
|
||||
// Not all cloud providers support this annotation, though AWS & GCE do.
|
||||
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
|
||||
|
||||
// AnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local
|
||||
// endpoints only. This preserves Source IP and avoids a second hop.
|
||||
AnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic"
|
||||
// AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour
|
||||
AnnotationValueExternalTrafficLocal = "OnlyLocal"
|
||||
// AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour
|
||||
AnnotationValueExternalTrafficGlobal = "Global"
|
||||
// AnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service
|
||||
// If not specified, annotation is created by the service api backend with the allocated nodePort
|
||||
// Will use user-specified nodePort value if specified by the client
|
||||
AnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport"
|
||||
)
|
||||
|
||||
// NeedsHealthCheck Check service for health check annotations
|
||||
func NeedsHealthCheck(service *api.Service) bool {
|
||||
if l, ok := service.Annotations[AnnotationExternalTraffic]; ok {
|
||||
if l == AnnotationValueExternalTrafficLocal {
|
||||
return true
|
||||
} else if l == AnnotationValueExternalTrafficGlobal {
|
||||
return false
|
||||
} else {
|
||||
glog.Errorf("Invalid value for annotation %v", AnnotationExternalTraffic)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists
|
||||
func GetServiceHealthCheckNodePort(service *api.Service) int32 {
|
||||
if NeedsHealthCheck(service) {
|
||||
if l, ok := service.Annotations[AnnotationHealthCheckNodePort]; ok {
|
||||
p, err := strconv.Atoi(l)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse annotation %v: %v", AnnotationHealthCheckNodePort, err)
|
||||
return 0
|
||||
}
|
||||
return int32(p)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check
|
||||
func GetServiceHealthCheckPathPort(service *api.Service) (string, int32) {
|
||||
if !NeedsHealthCheck(service) {
|
||||
return "", 0
|
||||
}
|
||||
port := GetServiceHealthCheckNodePort(service)
|
||||
if port == 0 {
|
||||
return "", 0
|
||||
}
|
||||
return "/healthz", port
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
netsets "k8s.io/client-go/1.4/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
|
||||
)
|
||||
|
||||
// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0
|
||||
func IsAllowAll(ipnets netsets.IPNet) bool {
|
||||
for _, s := range ipnets.StringSlice() {
|
||||
if s == "0.0.0.0/0" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
|
||||
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
|
||||
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
|
||||
func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) {
|
||||
var ipnets netsets.IPNet
|
||||
var err error
|
||||
// if SourceRange field is specified, ignore sourceRange annotation
|
||||
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
|
||||
specs := service.Spec.LoadBalancerSourceRanges
|
||||
ipnets, err = netsets.ParseIPNets(specs...)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
|
||||
}
|
||||
} else {
|
||||
val := service.Annotations[AnnotationLoadBalancerSourceRangesKey]
|
||||
val = strings.TrimSpace(val)
|
||||
if val == "" {
|
||||
val = defaultLoadBalancerSourceRanges
|
||||
}
|
||||
specs := strings.Split(val, ",")
|
||||
ipnets, err = netsets.ParseIPNets(specs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val)
|
||||
}
|
||||
}
|
||||
return ipnets, nil
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/1.4/pkg/api/unversioned"
|
||||
"k8s.io/client-go/1.4/pkg/util/validation"
|
||||
"k8s.io/client-go/1.4/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if ps == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
|
||||
for i, expr := range ps.MatchExpressions {
|
||||
allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch sr.Operator {
|
||||
case unversioned.LabelSelectorOpIn, unversioned.LabelSelectorOpNotIn:
|
||||
if len(sr.Values) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
|
||||
}
|
||||
case unversioned.LabelSelectorOpExists, unversioned.LabelSelectorOpDoesNotExist:
|
||||
if len(sr.Values) > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
|
||||
}
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
|
||||
}
|
||||
allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateLabelName validates that the label name is correctly defined.
|
||||
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range validation.IsQualifiedName(labelName) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateLabels validates that a set of labels are correctly defined.
|
||||
func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for k, v := range labels {
|
||||
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
|
||||
for _, msg := range validation.IsValidLabelValue(v) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced
|
||||
// with a GroupAndVersion type.
|
||||
package util
|
||||
|
||||
import "strings"
|
||||
|
||||
func GetVersion(groupVersion string) string {
|
||||
s := strings.Split(groupVersion, "/")
|
||||
if len(s) != 2 {
|
||||
// e.g. return "v1" for groupVersion="v1"
|
||||
return s[len(s)-1]
|
||||
}
|
||||
return s[1]
|
||||
}
|
||||
|
||||
func GetGroup(groupVersion string) string {
|
||||
s := strings.Split(groupVersion, "/")
|
||||
if len(s) == 1 {
|
||||
// e.g. return "" for groupVersion="v1"
|
||||
return ""
|
||||
}
|
||||
return s[0]
|
||||
}
|
||||
|
||||
// GetGroupVersion returns the "group/version". It returns "version" is if group
|
||||
// is empty. It returns "group/" if version is empty.
|
||||
func GetGroupVersion(group, version string) string {
|
||||
if len(group) == 0 {
|
||||
return version
|
||||
}
|
||||
return group + "/" + version
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package validation has functions for validating the correctness of api
|
||||
// objects and explaining what is wrong with them when they aren't valid.
|
||||
package validation
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
"k8s.io/client-go/1.4/pkg/api/meta"
|
||||
"k8s.io/client-go/1.4/pkg/api/unversioned"
|
||||
apiutil "k8s.io/client-go/1.4/pkg/api/util"
|
||||
"k8s.io/client-go/1.4/pkg/apimachinery/registered"
|
||||
"k8s.io/client-go/1.4/pkg/util/validation"
|
||||
"k8s.io/client-go/1.4/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// ValidateEvent makes sure that the event makes sense.
|
||||
func ValidateEvent(event *api.Event) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// Make sure event.Namespace and the involvedObject.Namespace agree
|
||||
if len(event.InvolvedObject.Namespace) == 0 {
|
||||
// event.Namespace must also be empty (or "default", for compatibility with old clients)
|
||||
if event.Namespace != api.NamespaceNone && event.Namespace != api.NamespaceDefault {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
|
||||
}
|
||||
} else {
|
||||
// event namespace must match
|
||||
if event.Namespace != event.InvolvedObject.Namespace {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
|
||||
}
|
||||
}
|
||||
|
||||
// For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds
|
||||
if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil {
|
||||
if namespaced && len(event.InvolvedObject.Namespace) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind)))
|
||||
}
|
||||
if !namespaced && len(event.InvolvedObject.Namespace) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind)))
|
||||
}
|
||||
}
|
||||
|
||||
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Check whether the kind in groupVersion is scoped at the root of the api hierarchy
|
||||
func isNamespacedKind(kind, groupVersion string) (bool, error) {
|
||||
group := apiutil.GetGroup(groupVersion)
|
||||
g, err := registered.Group(group)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
restMapping, err := g.RESTMapper.RESTMapping(unversioned.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
scopeName := restMapping.Scope.Name()
|
||||
if scopeName == meta.RESTScopeNameNamespace {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -1,370 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/emicklei/go-restful/swagger"
|
||||
"github.com/golang/glog"
|
||||
apiutil "k8s.io/client-go/1.4/pkg/api/util"
|
||||
"k8s.io/client-go/1.4/pkg/runtime"
|
||||
utilerrors "k8s.io/client-go/1.4/pkg/util/errors"
|
||||
"k8s.io/client-go/1.4/pkg/util/yaml"
|
||||
)
|
||||
|
||||
type InvalidTypeError struct {
|
||||
ExpectedKind reflect.Kind
|
||||
ObservedKind reflect.Kind
|
||||
FieldName string
|
||||
}
|
||||
|
||||
func (i *InvalidTypeError) Error() string {
|
||||
return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String())
|
||||
}
|
||||
|
||||
func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error {
|
||||
return &InvalidTypeError{expected, observed, fieldName}
|
||||
}
|
||||
|
||||
// TypeNotFoundError is returned when specified type
|
||||
// can not found in schema
|
||||
type TypeNotFoundError string
|
||||
|
||||
func (tnfe TypeNotFoundError) Error() string {
|
||||
return fmt.Sprintf("couldn't find type: %s", string(tnfe))
|
||||
}
|
||||
|
||||
// Schema is an interface that knows how to validate an API object serialized to a byte array.
|
||||
type Schema interface {
|
||||
ValidateBytes(data []byte) error
|
||||
}
|
||||
|
||||
type NullSchema struct{}
|
||||
|
||||
func (NullSchema) ValidateBytes(data []byte) error { return nil }
|
||||
|
||||
type SwaggerSchema struct {
|
||||
api swagger.ApiDeclaration
|
||||
delegate Schema // For delegating to other api groups
|
||||
}
|
||||
|
||||
func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) {
|
||||
schema := &SwaggerSchema{}
|
||||
err := json.Unmarshal(data, &schema.api)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
schema.delegate = factory
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
// validateList unpacks a list and validate every item in the list.
|
||||
// It return nil if every item is ok.
|
||||
// Otherwise it return an error list contain errors of every item.
|
||||
func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error {
|
||||
items, exists := obj["items"]
|
||||
if !exists {
|
||||
return []error{fmt.Errorf("no items field in %#v", obj)}
|
||||
}
|
||||
return s.validateItems(items)
|
||||
}
|
||||
|
||||
func (s *SwaggerSchema) validateItems(items interface{}) []error {
|
||||
allErrs := []error{}
|
||||
itemList, ok := items.([]interface{})
|
||||
if !ok {
|
||||
return append(allErrs, fmt.Errorf("items isn't a slice"))
|
||||
}
|
||||
for i, item := range itemList {
|
||||
fields, ok := item.(map[string]interface{})
|
||||
if !ok {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i))
|
||||
continue
|
||||
}
|
||||
groupVersion := fields["apiVersion"]
|
||||
if groupVersion == nil {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i))
|
||||
continue
|
||||
}
|
||||
itemVersion, ok := groupVersion.(string)
|
||||
if !ok {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i))
|
||||
continue
|
||||
}
|
||||
if len(itemVersion) == 0 {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i))
|
||||
}
|
||||
kind := fields["kind"]
|
||||
if kind == nil {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i))
|
||||
continue
|
||||
}
|
||||
itemKind, ok := kind.(string)
|
||||
if !ok {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i))
|
||||
continue
|
||||
}
|
||||
if len(itemKind) == 0 {
|
||||
allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i))
|
||||
}
|
||||
version := apiutil.GetVersion(itemVersion)
|
||||
errs := s.ValidateObject(item, "", version+"."+itemKind)
|
||||
if len(errs) >= 1 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (s *SwaggerSchema) ValidateBytes(data []byte) error {
|
||||
var obj interface{}
|
||||
out, err := yaml.ToJSON(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = out
|
||||
if err := json.Unmarshal(data, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
fields, ok := obj.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("error in unmarshaling data %s", string(data))
|
||||
}
|
||||
groupVersion := fields["apiVersion"]
|
||||
if groupVersion == nil {
|
||||
return fmt.Errorf("apiVersion not set")
|
||||
}
|
||||
if _, ok := groupVersion.(string); !ok {
|
||||
return fmt.Errorf("apiVersion isn't string type")
|
||||
}
|
||||
kind := fields["kind"]
|
||||
if kind == nil {
|
||||
return fmt.Errorf("kind not set")
|
||||
}
|
||||
if _, ok := kind.(string); !ok {
|
||||
return fmt.Errorf("kind isn't string type")
|
||||
}
|
||||
if strings.HasSuffix(kind.(string), "List") {
|
||||
return utilerrors.NewAggregate(s.validateList(fields))
|
||||
}
|
||||
version := apiutil.GetVersion(groupVersion.(string))
|
||||
allErrs := s.ValidateObject(obj, "", version+"."+kind.(string))
|
||||
if len(allErrs) == 1 {
|
||||
return allErrs[0]
|
||||
}
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error {
|
||||
allErrs := []error{}
|
||||
models := s.api.Models
|
||||
model, ok := models.At(typeName)
|
||||
|
||||
// Verify the api version matches. This is required for nested types with differing api versions because
|
||||
// s.api only has schema for 1 api version (the parent object type's version).
|
||||
// e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1
|
||||
// api to delegate to the schema for the /v1 api.
|
||||
// Only do this for !ok objects so that cross ApiVersion vendored types take precedence.
|
||||
if !ok && s.delegate != nil {
|
||||
fields, mapOk := obj.(map[string]interface{})
|
||||
if !mapOk {
|
||||
return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj))
|
||||
}
|
||||
if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated {
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return append(allErrs, TypeNotFoundError(typeName))
|
||||
}
|
||||
properties := model.Properties
|
||||
if len(properties.List) == 0 {
|
||||
// The object does not have any sub-fields.
|
||||
return nil
|
||||
}
|
||||
fields, ok := obj.(map[string]interface{})
|
||||
if !ok {
|
||||
return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj))
|
||||
}
|
||||
if len(fieldName) > 0 {
|
||||
fieldName = fieldName + "."
|
||||
}
|
||||
// handle required fields
|
||||
for _, requiredKey := range model.Required {
|
||||
if _, ok := fields[requiredKey]; !ok {
|
||||
allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey))
|
||||
}
|
||||
}
|
||||
for key, value := range fields {
|
||||
details, ok := properties.At(key)
|
||||
|
||||
// Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
|
||||
// This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
|
||||
// super-type (RawExtension)
|
||||
if s.isGenericArray(details) {
|
||||
errs := s.validateItems(value)
|
||||
if len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName))
|
||||
continue
|
||||
}
|
||||
if details.Type == nil && details.Ref == nil {
|
||||
allErrs = append(allErrs, fmt.Errorf("could not find the type of %s from object: %v", key, details))
|
||||
}
|
||||
var fieldType string
|
||||
if details.Type != nil {
|
||||
fieldType = *details.Type
|
||||
} else {
|
||||
fieldType = *details.Ref
|
||||
}
|
||||
if value == nil {
|
||||
glog.V(2).Infof("Skipping nil field: %s", key)
|
||||
continue
|
||||
}
|
||||
errs := s.validateField(value, fieldName+key, fieldType, &details)
|
||||
if len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the
|
||||
// current SwaggerSchema.
|
||||
// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema)
|
||||
// Second return value is the result of the delegated validation if performed.
|
||||
func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) {
|
||||
// Never delegate objects in the same ApiVersion or we will get infinite recursion
|
||||
if !s.isDifferentApiVersion(obj) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Convert the object back into bytes so that we can pass it to the ValidateBytes function
|
||||
m, err := json.Marshal(obj.Object)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// Delegate validation of this object to the correct SwaggerSchema for its ApiGroup
|
||||
return true, s.delegate.ValidateBytes(m)
|
||||
}
|
||||
|
||||
// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does.
|
||||
// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored.
|
||||
func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool {
|
||||
groupVersion := obj.GetAPIVersion()
|
||||
return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion
|
||||
}
|
||||
|
||||
// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object.
|
||||
func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool {
|
||||
return p.DataTypeFields.Type != nil &&
|
||||
*p.DataTypeFields.Type == "array" &&
|
||||
p.Items != nil &&
|
||||
p.Items.Ref != nil &&
|
||||
(*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object")
|
||||
}
|
||||
|
||||
// This matches type name in the swagger spec, such as "v1.Binding".
|
||||
var versionRegexp = regexp.MustCompile(`^(v.+|unversioned)\..*`)
|
||||
|
||||
func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error {
|
||||
allErrs := []error{}
|
||||
if reflect.TypeOf(value) == nil {
|
||||
return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName))
|
||||
}
|
||||
// TODO: caesarxuchao: because we have multiple group/versions and objects
|
||||
// may reference objects in other group, the commented out way of checking
|
||||
// if a filedType is a type defined by us is outdated. We use a hacky way
|
||||
// for now.
|
||||
// TODO: the type name in the swagger spec is something like "v1.Binding",
|
||||
// and the "v1" is generated from the package name, not the groupVersion of
|
||||
// the type. We need to fix go-restful to embed the group name in the type
|
||||
// name, otherwise we couldn't handle identically named types in different
|
||||
// groups correctly.
|
||||
if versionRegexp.MatchString(fieldType) {
|
||||
// if strings.HasPrefix(fieldType, apiVersion) {
|
||||
return s.ValidateObject(value, fieldName, fieldType)
|
||||
}
|
||||
switch fieldType {
|
||||
case "string":
|
||||
// Be loose about what we accept for 'string' since we use IntOrString in a couple of places
|
||||
_, isString := value.(string)
|
||||
_, isNumber := value.(float64)
|
||||
_, isInteger := value.(int)
|
||||
if !isString && !isNumber && !isInteger {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
case "array":
|
||||
arr, ok := value.([]interface{})
|
||||
if !ok {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
var arrType string
|
||||
if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
if fieldDetails.Items.Ref != nil {
|
||||
arrType = *fieldDetails.Items.Ref
|
||||
} else {
|
||||
arrType = *fieldDetails.Items.Type
|
||||
}
|
||||
for ix := range arr {
|
||||
errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil)
|
||||
if len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
}
|
||||
}
|
||||
case "uint64":
|
||||
case "int64":
|
||||
case "integer":
|
||||
_, isNumber := value.(float64)
|
||||
_, isInteger := value.(int)
|
||||
if !isNumber && !isInteger {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
case "float64":
|
||||
if _, ok := value.(float64); !ok {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
case "boolean":
|
||||
if _, ok := value.(bool); !ok {
|
||||
return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName))
|
||||
}
|
||||
// API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`.
|
||||
// We have both here so that kubectl can work with both old and new api servers.
|
||||
case "object":
|
||||
case "any":
|
||||
default:
|
||||
return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType))
|
||||
}
|
||||
return allErrs
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package capabilities
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Capabilities defines the set of capabilities available within the system.
|
||||
// For now these are global. Eventually they may be per-user
|
||||
type Capabilities struct {
|
||||
AllowPrivileged bool
|
||||
|
||||
// Pod sources from which to allow privileged capabilities like host networking, sharing the host
|
||||
// IPC namespace, and sharing the host PID namespace.
|
||||
PrivilegedSources PrivilegedSources
|
||||
|
||||
// PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach)
|
||||
PerConnectionBandwidthLimitBytesPerSec int64
|
||||
}
|
||||
|
||||
// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types
|
||||
// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace.
|
||||
type PrivilegedSources struct {
|
||||
// List of pod sources for which using host network is allowed.
|
||||
HostNetworkSources []string
|
||||
|
||||
// List of pod sources for which using host pid namespace is allowed.
|
||||
HostPIDSources []string
|
||||
|
||||
// List of pod sources for which using host ipc is allowed.
|
||||
HostIPCSources []string
|
||||
}
|
||||
|
||||
// TODO: Clean these up into a singleton
|
||||
var once sync.Once
|
||||
var lock sync.Mutex
|
||||
var capabilities *Capabilities
|
||||
|
||||
// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.
|
||||
func Initialize(c Capabilities) {
|
||||
// Only do this once
|
||||
once.Do(func() {
|
||||
capabilities = &c
|
||||
})
|
||||
}
|
||||
|
||||
// Setup the capability set. It wraps Initialize for improving usability.
|
||||
func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) {
|
||||
Initialize(Capabilities{
|
||||
AllowPrivileged: allowPrivileged,
|
||||
PrivilegedSources: privilegedSources,
|
||||
PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
|
||||
})
|
||||
}
|
||||
|
||||
// SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests.
|
||||
func SetForTests(c Capabilities) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
capabilities = &c
|
||||
}
|
||||
|
||||
// Returns a read-only copy of the system capabilities.
|
||||
func Get() Capabilities {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
// This check prevents clobbering of capabilities that might've been set via SetForTests
|
||||
if capabilities == nil {
|
||||
Initialize(Capabilities{
|
||||
AllowPrivileged: false,
|
||||
PrivilegedSources: PrivilegedSources{
|
||||
HostNetworkSources: []string{},
|
||||
HostPIDSources: []string{},
|
||||
HostIPCSources: []string{},
|
||||
},
|
||||
})
|
||||
}
|
||||
return *capabilities
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// package capabilities manages system level capabilities
|
||||
package capabilities
|
|
@ -1,204 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
gojson "encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api/unversioned"
|
||||
"k8s.io/client-go/1.4/pkg/util/json"
|
||||
)
|
||||
|
||||
// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
|
||||
// type, which can be used for generic access to objects without a predefined scheme.
|
||||
// TODO: move into serializer/json.
|
||||
var UnstructuredJSONScheme Codec = unstructuredJSONScheme{}
|
||||
|
||||
type unstructuredJSONScheme struct{}
|
||||
|
||||
func (s unstructuredJSONScheme) Decode(data []byte, _ *unversioned.GroupVersionKind, obj Object) (Object, *unversioned.GroupVersionKind, error) {
|
||||
var err error
|
||||
if obj != nil {
|
||||
err = s.decodeInto(data, obj)
|
||||
} else {
|
||||
obj, err = s.decode(data)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
if len(gvk.Kind) == 0 {
|
||||
return nil, &gvk, NewMissingKindErr(string(data))
|
||||
}
|
||||
|
||||
return obj, &gvk, nil
|
||||
}
|
||||
|
||||
func (unstructuredJSONScheme) Encode(obj Object, w io.Writer) error {
|
||||
switch t := obj.(type) {
|
||||
case *Unstructured:
|
||||
return json.NewEncoder(w).Encode(t.Object)
|
||||
case *UnstructuredList:
|
||||
items := make([]map[string]interface{}, 0, len(t.Items))
|
||||
for _, i := range t.Items {
|
||||
items = append(items, i.Object)
|
||||
}
|
||||
t.Object["items"] = items
|
||||
defer func() { delete(t.Object, "items") }()
|
||||
return json.NewEncoder(w).Encode(t.Object)
|
||||
case *Unknown:
|
||||
// TODO: Unstructured needs to deal with ContentType.
|
||||
_, err := w.Write(t.Raw)
|
||||
return err
|
||||
default:
|
||||
return json.NewEncoder(w).Encode(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (s unstructuredJSONScheme) decode(data []byte) (Object, error) {
|
||||
type detector struct {
|
||||
Items gojson.RawMessage
|
||||
}
|
||||
var det detector
|
||||
if err := json.Unmarshal(data, &det); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if det.Items != nil {
|
||||
list := &UnstructuredList{}
|
||||
err := s.decodeToList(data, list)
|
||||
return list, err
|
||||
}
|
||||
|
||||
// No Items field, so it wasn't a list.
|
||||
unstruct := &Unstructured{}
|
||||
err := s.decodeToUnstructured(data, unstruct)
|
||||
return unstruct, err
|
||||
}
|
||||
|
||||
func (s unstructuredJSONScheme) decodeInto(data []byte, obj Object) error {
|
||||
switch x := obj.(type) {
|
||||
case *Unstructured:
|
||||
return s.decodeToUnstructured(data, x)
|
||||
case *UnstructuredList:
|
||||
return s.decodeToList(data, x)
|
||||
case *VersionedObjects:
|
||||
o, err := s.decode(data)
|
||||
if err == nil {
|
||||
x.Objects = []Object{o}
|
||||
}
|
||||
return err
|
||||
default:
|
||||
return json.Unmarshal(data, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unstruct.Object = m
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
|
||||
type decodeList struct {
|
||||
Items []gojson.RawMessage
|
||||
}
|
||||
|
||||
var dList decodeList
|
||||
if err := json.Unmarshal(data, &dList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &list.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For typed lists, e.g., a PodList, API server doesn't set each item's
|
||||
// APIVersion and Kind. We need to set it.
|
||||
listAPIVersion := list.GetAPIVersion()
|
||||
listKind := list.GetKind()
|
||||
itemKind := strings.TrimSuffix(listKind, "List")
|
||||
|
||||
delete(list.Object, "items")
|
||||
list.Items = nil
|
||||
for _, i := range dList.Items {
|
||||
unstruct := &Unstructured{}
|
||||
if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
|
||||
return err
|
||||
}
|
||||
// This is hacky. Set the item's Kind and APIVersion to those inferred
|
||||
// from the List.
|
||||
if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
|
||||
unstruct.SetKind(itemKind)
|
||||
unstruct.SetAPIVersion(listAPIVersion)
|
||||
}
|
||||
list.Items = append(list.Items, unstruct)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnstructuredObjectConverter is an ObjectConverter for use with
|
||||
// Unstructured objects. Since it has no schema or type information,
|
||||
// it will only succeed for no-op conversions. This is provided as a
|
||||
// sane implementation for APIs that require an object converter.
|
||||
type UnstructuredObjectConverter struct{}
|
||||
|
||||
func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error {
|
||||
unstructIn, ok := in.(*Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("input type %T in not valid for unstructured conversion", in)
|
||||
}
|
||||
|
||||
unstructOut, ok := out.(*Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("output type %T in not valid for unstructured conversion", out)
|
||||
}
|
||||
|
||||
// maybe deep copy the map? It is documented in the
|
||||
// ObjectConverter interface that this function is not
|
||||
// guaranteeed to not mutate the input. Or maybe set the input
|
||||
// object to nil.
|
||||
unstructOut.Object = unstructIn.Object
|
||||
return nil
|
||||
}
|
||||
|
||||
func (UnstructuredObjectConverter) ConvertToVersion(in Object, target GroupVersioner) (Object, error) {
|
||||
if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() {
|
||||
gvk, ok := target.KindForGroupVersionKinds([]unversioned.GroupVersionKind{kind})
|
||||
if !ok {
|
||||
// TODO: should this be a typed error?
|
||||
return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target)
|
||||
}
|
||||
in.GetObjectKind().SetGroupVersionKind(gvk)
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
|
||||
func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
|
||||
return "", "", errors.New("unstructured cannot convert field labels")
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
)
|
||||
|
||||
// TODO: Move these values into the API package.
|
||||
const (
|
||||
// The prefix to an annotation key specifying a container profile.
|
||||
ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
|
||||
// The annotation key specifying the default AppArmor profile.
|
||||
DefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName"
|
||||
// The annotation key specifying the allowed AppArmor profiles.
|
||||
AllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames"
|
||||
|
||||
// The profile specifying the runtime default.
|
||||
ProfileRuntimeDefault = "runtime/default"
|
||||
// The prefix for specifying profiles loaded on the node.
|
||||
ProfileNamePrefix = "localhost/"
|
||||
)
|
||||
|
||||
// Checks whether app armor is required for pod to be run.
|
||||
func isRequired(pod *api.Pod) bool {
|
||||
for key := range pod.Annotations {
|
||||
if strings.HasPrefix(key, ContainerAnnotationKeyPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the name of the profile to use with the container.
|
||||
func GetProfileName(pod *api.Pod, containerName string) string {
|
||||
return pod.Annotations[ContainerAnnotationKeyPrefix+containerName]
|
||||
}
|
||||
|
||||
// Sets the name of the profile to use with the container.
|
||||
func SetProfileName(pod *api.Pod, containerName, profileName string) error {
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = map[string]string{}
|
||||
}
|
||||
pod.Annotations[ContainerAnnotationKeyPrefix+containerName] = profileName
|
||||
return nil
|
||||
}
|
|
@ -1,227 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/1.4/pkg/api"
|
||||
"k8s.io/client-go/1.4/pkg/util"
|
||||
utilconfig "k8s.io/client-go/1.4/pkg/util/config"
|
||||
)
|
||||
|
||||
// Whether AppArmor should be disabled by default.
|
||||
// Set to true if the wrong build tags are set (see validate_disabled.go).
|
||||
var isDisabledBuild bool
|
||||
|
||||
// Interface for validating that a pod with with an AppArmor profile can be run by a Node.
|
||||
type Validator interface {
|
||||
Validate(pod *api.Pod) error
|
||||
ValidateHost() error
|
||||
}
|
||||
|
||||
func NewValidator(runtime string) Validator {
|
||||
if err := validateHost(runtime); err != nil {
|
||||
return &validator{validateHostErr: err}
|
||||
}
|
||||
appArmorFS, err := getAppArmorFS()
|
||||
if err != nil {
|
||||
return &validator{
|
||||
validateHostErr: fmt.Errorf("error finding AppArmor FS: %v", err),
|
||||
}
|
||||
}
|
||||
return &validator{
|
||||
appArmorFS: appArmorFS,
|
||||
}
|
||||
}
|
||||
|
||||
type validator struct {
|
||||
validateHostErr error
|
||||
appArmorFS string
|
||||
}
|
||||
|
||||
func (v *validator) Validate(pod *api.Pod) error {
|
||||
if !isRequired(pod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.ValidateHost() != nil {
|
||||
return v.validateHostErr
|
||||
}
|
||||
|
||||
loadedProfiles, err := v.getLoadedProfiles()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read loaded profiles: %v", err)
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *validator) ValidateHost() error {
|
||||
return v.validateHostErr
|
||||
}
|
||||
|
||||
// Verify that the host and runtime is capable of enforcing AppArmor profiles.
|
||||
func validateHost(runtime string) error {
|
||||
// Check feature-gates
|
||||
if !utilconfig.DefaultFeatureGate.AppArmor() {
|
||||
return errors.New("AppArmor disabled by feature-gate")
|
||||
}
|
||||
|
||||
// Check build support.
|
||||
if isDisabledBuild {
|
||||
return errors.New("Binary not compiled for linux")
|
||||
}
|
||||
|
||||
// Check kernel support.
|
||||
if !IsAppArmorEnabled() {
|
||||
return errors.New("AppArmor is not enabled on the host")
|
||||
}
|
||||
|
||||
// Check runtime support. Currently only Docker is supported.
|
||||
if runtime != "docker" {
|
||||
return fmt.Errorf("AppArmor is only enabled for 'docker' runtime. Found: %q.", runtime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify that the profile is valid and loaded.
|
||||
func validateProfile(profile string, loadedProfiles map[string]bool) error {
|
||||
if err := ValidateProfileFormat(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(profile, ProfileNamePrefix) {
|
||||
profileName := strings.TrimPrefix(profile, ProfileNamePrefix)
|
||||
if !loadedProfiles[profileName] {
|
||||
return fmt.Errorf("profile %q is not loaded", profileName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateProfileFormat(profile string) error {
|
||||
if profile == "" || profile == ProfileRuntimeDefault {
|
||||
return nil
|
||||
}
|
||||
if !strings.HasPrefix(profile, ProfileNamePrefix) {
|
||||
return fmt.Errorf("invalid AppArmor profile name: %q", profile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *validator) getLoadedProfiles() (map[string]bool, error) {
|
||||
profilesPath := path.Join(v.appArmorFS, "profiles")
|
||||
profilesFile, err := os.Open(profilesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open %s: %v", profilesPath, err)
|
||||
}
|
||||
defer profilesFile.Close()
|
||||
|
||||
profiles := map[string]bool{}
|
||||
scanner := bufio.NewScanner(profilesFile)
|
||||
for scanner.Scan() {
|
||||
profileName := parseProfileName(scanner.Text())
|
||||
if profileName == "" {
|
||||
// Unknown line format; skip it.
|
||||
continue
|
||||
}
|
||||
profiles[profileName] = true
|
||||
}
|
||||
return profiles, nil
|
||||
}
|
||||
|
||||
// The profiles file is formatted with one profile per line, matching a form:
|
||||
// namespace://profile-name (mode)
|
||||
// profile-name (mode)
|
||||
// Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced
|
||||
// profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name.
|
||||
func parseProfileName(profileLine string) string {
|
||||
modeIndex := strings.IndexRune(profileLine, '(')
|
||||
if modeIndex < 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(profileLine[:modeIndex])
|
||||
}
|
||||
|
||||
func getAppArmorFS() (string, error) {
|
||||
mountsFile, err := os.Open("/proc/mounts")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not open /proc/mounts: %v", err)
|
||||
}
|
||||
defer mountsFile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(mountsFile)
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
if len(fields) < 3 {
|
||||
// Unknown line format; skip it.
|
||||
continue
|
||||
}
|
||||
if fields[2] == "securityfs" {
|
||||
appArmorFS := path.Join(fields[1], "apparmor")
|
||||
if ok, err := util.FileExists(appArmorFS); !ok {
|
||||
msg := fmt.Sprintf("path %s does not exist", appArmorFS)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s: %v", msg, err)
|
||||
} else {
|
||||
return "", errors.New(msg)
|
||||
}
|
||||
} else {
|
||||
return appArmorFS, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", fmt.Errorf("error scanning mounts: %v", err)
|
||||
}
|
||||
|
||||
return "", errors.New("securityfs not found")
|
||||
}
|
||||
|
||||
// IsAppArmorEnabled returns true if apparmor is enabled for the host.
|
||||
// This function is forked from
|
||||
// https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go
|
||||
// to avoid the libapparmor dependency.
|
||||
func IsAppArmorEnabled() bool {
|
||||
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
|
||||
if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
|
||||
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
|
||||
return err == nil && len(buf) > 1 && buf[0] == 'Y'
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// DeepHashObject writes specified object to hash using the spew library
|
||||
// which follows pointers and prints actual values of the nested objects
|
||||
// ensuring the hash does not change when a pointer changes.
|
||||
func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) {
|
||||
hasher.Reset()
|
||||
printer := spew.ConfigState{
|
||||
Indent: " ",
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
SpewKeys: true,
|
||||
}
|
||||
printer.Fprintf(hasher, "%#v", objectToWrite)
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package contains hand-coded set implementations that should be similar
|
||||
// to the autogenerated ones in pkg/util/sets.
|
||||
// We can't simply use net.IPNet as a map-key in Go (because it contains a
|
||||
// []byte).
|
||||
// We could use the same workaround we use here (a string representation as the
|
||||
// key) to autogenerate sets. If we do that, or decide on an alternate
|
||||
// approach, we should replace the implementations in this package with the
|
||||
// autogenerated versions.
|
||||
// It is expected that callers will alias this import as "netsets" i.e. import
|
||||
// netsets "k8s.io/client-go/1.4/pkg/util/net/sets"
|
||||
|
||||
package sets
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sets
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IPNet map[string]*net.IPNet
|
||||
|
||||
func ParseIPNets(specs ...string) (IPNet, error) {
|
||||
ipnetset := make(IPNet)
|
||||
for _, spec := range specs {
|
||||
spec = strings.TrimSpace(spec)
|
||||
_, ipnet, err := net.ParseCIDR(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k := ipnet.String() // In case of normalization
|
||||
ipnetset[k] = ipnet
|
||||
}
|
||||
return ipnetset, nil
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s IPNet) Insert(items ...*net.IPNet) {
|
||||
for _, item := range items {
|
||||
s[item.String()] = item
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s IPNet) Delete(items ...*net.IPNet) {
|
||||
for _, item := range items {
|
||||
delete(s, item.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s IPNet) Has(item *net.IPNet) bool {
|
||||
_, contained := s[item.String()]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s IPNet) HasAll(items ...*net.IPNet) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s IPNet) Difference(s2 IPNet) IPNet {
|
||||
result := make(IPNet)
|
||||
for k, i := range s {
|
||||
_, found := s2[k]
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
result[k] = i
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// StringSlice returns a []string with the String representation of each element in the set.
|
||||
// Order is undefined.
|
||||
func (s IPNet) StringSlice() []string {
|
||||
a := make([]string, 0, len(s))
|
||||
for k := range s {
|
||||
a = append(a, k)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 IPNet) IsSuperset(s2 IPNet) bool {
|
||||
for k := range s2 {
|
||||
_, found := s1[k]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 IPNet) Equal(s2 IPNet) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s IPNet) Len() int {
|
||||
return len(s)
|
||||
}
|
|
@ -1,11 +1,20 @@
|
|||
{
|
||||
"ImportPath": "k8s.io/client-go/1.4",
|
||||
"GoVersion": "go1.7",
|
||||
"ImportPath": "k8s.io/client-go/1.5",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v74",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/PuerkitoBio/purell",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/PuerkitoBio/urlesc",
|
||||
"Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/blang/semver",
|
||||
"Comment": "v3.0.1",
|
||||
|
@ -44,6 +53,22 @@
|
|||
"ImportPath": "github.com/ghodss/yaml",
|
||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/jsonpointer",
|
||||
"Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/jsonreference",
|
||||
"Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/spec",
|
||||
"Rev": "6aced65f8501fe1217321abf0749d354824ba2ff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/swag",
|
||||
"Rev": "1d0bd113de87027671077d3c71eb3ac5d7dbba72"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.2-33-ge18d7aa",
|
||||
|
@ -66,6 +91,10 @@
|
|||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/gopass",
|
||||
"Rev": "3ca23474a7c7203e0a0a070fd33508f6efdb9b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/imdario/mergo",
|
||||
"Comment": "0.1.3-8-g6633656",
|
||||
|
@ -75,6 +104,18 @@
|
|||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mailru/easyjson/buffer",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mailru/easyjson/jlexer",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mailru/easyjson/jwriter",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
|
@ -87,6 +128,10 @@
|
|||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
|
@ -99,10 +144,58 @@
|
|||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "9c60d1c508f5134d1ca726b4641db998f2523357"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/cases",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/tag",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/language",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/runes",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/secure/precis",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/bidi",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/width",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
"Comment": "v0.9.0",
|
5
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
5
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
*.sublime-*
|
||||
.DS_Store
|
||||
*.swp
|
||||
*.swo
|
||||
tags
|
12
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/LICENSE
generated
Normal file
12
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/LICENSE
generated
Normal file
|
@ -0,0 +1,12 @@
|
|||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
375
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/purell.go
generated
Normal file
375
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/purell/purell.go
generated
Normal file
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/secure/precis"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
if parsed, e := url.Parse(u); e != nil {
|
||||
return "", e
|
||||
} else {
|
||||
options := make([]precis.Option, 1, 3)
|
||||
options[0] = precis.IgnoreCase
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
options = append(options, precis.FoldCase())
|
||||
}
|
||||
options = append(options, precis.Norm(norm.NFC))
|
||||
profile := precis.NewFreeform(options...)
|
||||
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
|
||||
return "", e
|
||||
}
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
panic("Unreachable code.")
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k, _ := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
27
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
Normal file
27
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
180
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
Normal file
180
staging/src/k8s.io/client-go/1.5/_vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
Normal file
|
@ -0,0 +1,180 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue