mirror of https://github.com/k3s-io/k3s
parent
5b87513972
commit
63020d5f72
|
@ -101,8 +101,13 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/Microsoft/go-winio",
|
||||
"Comment": "v0.4.2",
|
||||
"Rev": "f533f7a102197536779ea3a8cb881d639e21ec5a"
|
||||
"Comment": "v0.4.5",
|
||||
"Rev": "78439966b38d69bf38227fbf57ac8a6fee70f69a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Microsoft/hcsshim",
|
||||
"Comment": "V0.6.3",
|
||||
"Rev": "6ea7fe54f719d95721e7d9b26ac0add224c9b923"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/NYTimes/gziphandler",
|
||||
|
|
|
@ -68207,6 +68207,34 @@ SOFTWARE.
|
|||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/Microsoft/hcsshim licensed under: =
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
= vendor/github.com/Microsoft/hcsshim/LICENSE d4c2cbbea5ee1e7c86dff68a7073718e -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/miekg/coredns/middleware/etcd/msg licensed under: =
|
||||
|
||||
|
@ -80979,6 +81007,35 @@ THE SOFTWARE.
|
|||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/sirupsen/logrus licensed under: =
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
= vendor/github.com/sirupsen/logrus/LICENSE 8dadfef729c08ec4e631c4f6fc5d43a0 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/spf13/afero licensed under: =
|
||||
|
||||
|
|
|
@ -11,8 +11,15 @@ go_library(
|
|||
srcs = [
|
||||
"conntrack.go",
|
||||
"server.go",
|
||||
"server_linux.go",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"server_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"server_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
|
@ -30,7 +37,6 @@ go_library(
|
|||
"//pkg/proxy/ipvs:go_default_library",
|
||||
"//pkg/proxy/userspace:go_default_library",
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/ipvs:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
@ -49,8 +55,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
|
@ -60,8 +64,23 @@ go_library(
|
|||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//pkg/proxy/winkernel:go_default_library",
|
||||
"//pkg/proxy/winuserspace:go_default_library",
|
||||
"//pkg/util/netsh:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
|
|
@ -1,59 +1,64 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["proxier_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metrics.go",
|
||||
"proxier.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"proxier.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/helper:go_default_library",
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//vendor/github.com/Microsoft/hcsshim:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/helper:go_default_library",
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//vendor/github.com/Microsoft/hcsshim:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"proxier_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
library = ":go_default_library",
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -67,4 +72,5 @@ filegroup(
|
|||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
|
@ -63,7 +65,7 @@ func CanUseWinKernelProxier(kcompat KernelCompatTester) (bool, error) {
|
|||
|
||||
type WindowsKernelCompatTester struct{}
|
||||
|
||||
// TODO : Fix the below API to query the OS version
|
||||
// Todo : Fix the below API to query the OS version
|
||||
func (lkct WindowsKernelCompatTester) IsCompatible() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
|
@ -17,9 +19,7 @@ limitations under the License.
|
|||
package winkernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -36,150 +36,14 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
|
||||
chainLines := utiliptables.GetChainLines(table, save)
|
||||
for chain, line := range chainLines {
|
||||
if expected, exists := expectedLines[chain]; exists {
|
||||
if expected != line {
|
||||
t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("getChainLines expected chain not present: %s", chain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadLinesFromByteBuffer(t *testing.T) {
|
||||
testFn := func(byteArray []byte, expected []string) {
|
||||
index := 0
|
||||
readIndex := 0
|
||||
for ; readIndex < len(byteArray); index++ {
|
||||
line, n := utiliptables.ReadLine(readIndex, byteArray)
|
||||
readIndex = n
|
||||
if expected[index] != line {
|
||||
t.Errorf("expected:%q, actual:%q", expected[index], line)
|
||||
}
|
||||
} // for
|
||||
if readIndex < len(byteArray) {
|
||||
t.Errorf("Byte buffer was only partially read. Buffer length is:%d, readIndex is:%d", len(byteArray), readIndex)
|
||||
}
|
||||
if index < len(expected) {
|
||||
t.Errorf("All expected strings were not compared. expected arr length:%d, matched count:%d", len(expected), index-1)
|
||||
}
|
||||
}
|
||||
|
||||
byteArray1 := []byte("\n Line 1 \n\n\n L ine4 \nLine 5 \n \n")
|
||||
expected1 := []string{"", "Line 1", "", "", "L ine4", "Line 5", ""}
|
||||
testFn(byteArray1, expected1)
|
||||
|
||||
byteArray1 = []byte("")
|
||||
expected1 = []string{}
|
||||
testFn(byteArray1, expected1)
|
||||
|
||||
byteArray1 = []byte("\n\n")
|
||||
expected1 = []string{"", ""}
|
||||
testFn(byteArray1, expected1)
|
||||
}
|
||||
|
||||
func TestGetChainLines(t *testing.T) {
|
||||
iptables_save := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014
|
||||
*nat
|
||||
:PREROUTING ACCEPT [2136997:197881818]
|
||||
:POSTROUTING ACCEPT [4284525:258542680]
|
||||
:OUTPUT ACCEPT [5901660:357267963]
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
||||
COMMIT
|
||||
# Completed on Wed Oct 29 14:56:01 2014`
|
||||
expected := map[utiliptables.Chain]string{
|
||||
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]",
|
||||
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]",
|
||||
utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]",
|
||||
}
|
||||
checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected)
|
||||
}
|
||||
|
||||
func TestGetChainLinesMultipleTables(t *testing.T) {
|
||||
iptables_save := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
||||
*nat
|
||||
:PREROUTING ACCEPT [2:138]
|
||||
:INPUT ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
:POSTROUTING ACCEPT [0:0]
|
||||
:DOCKER - [0:0]
|
||||
:KUBE-NODEPORT-CONTAINER - [0:0]
|
||||
:KUBE-NODEPORT-HOST - [0:0]
|
||||
:KUBE-PORTALS-CONTAINER - [0:0]
|
||||
:KUBE-PORTALS-HOST - [0:0]
|
||||
:KUBE-SVC-1111111111111111 - [0:0]
|
||||
:KUBE-SVC-2222222222222222 - [0:0]
|
||||
:KUBE-SVC-3333333333333333 - [0:0]
|
||||
:KUBE-SVC-4444444444444444 - [0:0]
|
||||
:KUBE-SVC-5555555555555555 - [0:0]
|
||||
:KUBE-SVC-6666666666666666 - [0:0]
|
||||
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
|
||||
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
|
||||
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
|
||||
-A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE
|
||||
-A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
||||
-A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
||||
-A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333
|
||||
-A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
||||
-A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443
|
||||
-A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444
|
||||
-A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111
|
||||
COMMIT
|
||||
# Completed on Fri Aug 7 14:47:37 2015
|
||||
# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
||||
*filter
|
||||
:INPUT ACCEPT [17514:83115836]
|
||||
:FORWARD ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [8909:688225]
|
||||
:DOCKER - [0:0]
|
||||
-A FORWARD -o cbr0 -j DOCKER
|
||||
-A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT
|
||||
-A FORWARD -i cbr0 -o cbr0 -j ACCEPT
|
||||
COMMIT
|
||||
`
|
||||
expected := map[utiliptables.Chain]string{
|
||||
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]",
|
||||
utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]",
|
||||
utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]",
|
||||
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]",
|
||||
utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]",
|
||||
utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]",
|
||||
utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]",
|
||||
utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]",
|
||||
utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]",
|
||||
}
|
||||
checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected)
|
||||
}
|
||||
|
||||
func newFakeServiceInfo(service proxy.ServicePortName, ip net.IP, port int, protocol api.Protocol, onlyNodeLocalEndpoints bool) *serviceInfo {
|
||||
return &serviceInfo{
|
||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
|
||||
stickyMaxAgeMinutes: 180,
|
||||
clusterIP: ip,
|
||||
port: port,
|
||||
protocol: protocol,
|
||||
|
@ -381,187 +245,34 @@ func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedNa
|
|||
return nil
|
||||
}
|
||||
|
||||
func getFakeHnsNetwork() *hnsNetworkInfo {
|
||||
return &hnsNetworkInfo{
|
||||
id: "00000000-0000-0000-0000-000000000001",
|
||||
name: "fakeNetwork",
|
||||
}, nil
|
||||
}
|
||||
|
||||
const testHostname = "test-hostname"
|
||||
|
||||
func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
||||
func NewFakeProxier() *Proxier {
|
||||
fakeHnsNetwork := getFakeHnsNetwork()
|
||||
// TODO: Call NewProxier after refactoring out the goroutine
|
||||
// invocation into a Run() method.
|
||||
p := &Proxier{
|
||||
exec: &fakeexec.FakeExec{},
|
||||
serviceMap: make(proxyServiceMap),
|
||||
serviceChanges: newServiceChangeMap(),
|
||||
endpointsMap: make(proxyEndpointsMap),
|
||||
endpointsChanges: newEndpointsChangeMap(testHostname),
|
||||
iptables: ipt,
|
||||
clusterCIDR: "10.0.0.0/24",
|
||||
hostname: testHostname,
|
||||
portsMap: make(map[localPort]closeable),
|
||||
portMapper: &fakePortOpener{[]*localPort{}},
|
||||
healthChecker: newFakeHealthChecker(),
|
||||
precomputedProbabilities: make([]string, 0, 1001),
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChains: bytes.NewBuffer(nil),
|
||||
filterRules: bytes.NewBuffer(nil),
|
||||
natChains: bytes.NewBuffer(nil),
|
||||
natRules: bytes.NewBuffer(nil),
|
||||
serviceMap: make(proxyServiceMap),
|
||||
serviceChanges: newServiceChangeMap(),
|
||||
endpointsMap: make(proxyEndpointsMap),
|
||||
endpointsChanges: newEndpointsChangeMap(testHostname),
|
||||
clusterCIDR: "10.0.0.0/24",
|
||||
hostname: testHostname,
|
||||
portsMap: make(map[localPort]closeable),
|
||||
healthChecker: newFakeHealthChecker(),
|
||||
network: fakeHnsNetwork,
|
||||
}
|
||||
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
|
||||
return p
|
||||
}
|
||||
|
||||
func hasJump(rules []iptablestest.Rule, destChain, destIP string, destPort int) bool {
|
||||
destPortStr := strconv.Itoa(destPort)
|
||||
match := false
|
||||
for _, r := range rules {
|
||||
if r[iptablestest.Jump] == destChain {
|
||||
match = true
|
||||
if destIP != "" {
|
||||
if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPortStr) || r[iptablestest.DPort] == "") {
|
||||
return true
|
||||
}
|
||||
match = false
|
||||
}
|
||||
if destPort != 0 {
|
||||
if strings.Contains(r[iptablestest.DPort], destPortStr) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") {
|
||||
return true
|
||||
}
|
||||
match = false
|
||||
}
|
||||
}
|
||||
}
|
||||
return match
|
||||
}
|
||||
|
||||
func TestHasJump(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rules []iptablestest.Rule
|
||||
destChain string
|
||||
destIP string
|
||||
destPort int
|
||||
expected bool
|
||||
}{
|
||||
"case 1": {
|
||||
// Match the 1st rule(both dest IP and dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "10.20.30.41",
|
||||
destPort: 80,
|
||||
expected: true,
|
||||
},
|
||||
"case 2": {
|
||||
// Match the 2nd rule(dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "",
|
||||
destPort: 3001,
|
||||
expected: true,
|
||||
},
|
||||
"case 3": {
|
||||
// Match both dest IP and dest Port
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 80,
|
||||
expected: true,
|
||||
},
|
||||
"case 4": {
|
||||
// Match dest IP but doesn't match dest Port
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 8080,
|
||||
expected: false,
|
||||
},
|
||||
"case 5": {
|
||||
// Match dest Port but doesn't match dest IP
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "10.20.30.40",
|
||||
destPort: 80,
|
||||
expected: false,
|
||||
},
|
||||
"case 6": {
|
||||
// Match the 2nd rule(dest IP)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 8080,
|
||||
expected: true,
|
||||
},
|
||||
"case 7": {
|
||||
// Match the 2nd rule(dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 3001,
|
||||
expected: true,
|
||||
},
|
||||
"case 8": {
|
||||
// Match the 1st rule(dest IP)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "10.20.30.41",
|
||||
destPort: 8080,
|
||||
expected: true,
|
||||
},
|
||||
"case 9": {
|
||||
rules: []iptablestest.Rule{
|
||||
{"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"},
|
||||
},
|
||||
destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV",
|
||||
destIP: "",
|
||||
destPort: 0,
|
||||
expected: true,
|
||||
},
|
||||
"case 10": {
|
||||
rules: []iptablestest.Rule{
|
||||
{"-j ": "KUBE-SEP-FOO"},
|
||||
},
|
||||
destChain: "KUBE-SEP-BAR",
|
||||
destIP: "",
|
||||
destPort: 0,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testCases {
|
||||
if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected {
|
||||
t.Errorf("%v: expected %v, got %v", k, tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasDNAT(rules []iptablestest.Rule, endpoint string) bool {
|
||||
for _, r := range rules {
|
||||
if r[iptablestest.ToDest] == endpoint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
|
||||
for _, r := range rules {
|
||||
t.Logf("%q", r)
|
||||
|
@ -569,100 +280,8 @@ func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
|
|||
t.Errorf("%v", msg)
|
||||
}
|
||||
|
||||
func TestClusterIPReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
}
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Namespace, func(svc *api.Service) {
|
||||
svc.Spec.ClusterIP = svcIP
|
||||
svc.Spec.Ports = []api.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: api.ProtocolTCP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
makeEndpointsMap(fp)
|
||||
fp.syncProxyRules()
|
||||
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP))))
|
||||
svcRules := ipt.GetRules(svcChain)
|
||||
if len(svcRules) != 0 {
|
||||
errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcPortName), svcRules, t)
|
||||
}
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
}
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) {
|
||||
svc.Spec.ClusterIP = svcIP
|
||||
svc.Spec.Ports = []api.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: api.ProtocolTCP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
epIP := "10.180.0.1"
|
||||
makeEndpointsMap(fp,
|
||||
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: epIP,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
}},
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
epStr := fmt.Sprintf("%s:%d", epIP, svcPort)
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP))))
|
||||
epChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStr))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, svcChain, svcIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t)
|
||||
}
|
||||
|
||||
svcRules := ipt.GetRules(svcChain)
|
||||
if !hasJump(svcRules, epChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t)
|
||||
}
|
||||
epRules := ipt.GetRules(epChain)
|
||||
if !hasDNAT(epRules, epStr) {
|
||||
errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, epStr), epRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
|
@ -710,20 +329,13 @@ func TestLoadBalancer(t *testing.T) {
|
|||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
//lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
||||
}
|
||||
// TODO
|
||||
|
||||
fwRules := ipt.GetRules(fwChain)
|
||||
if !hasJump(fwRules, svcChain, "", 0) || !hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePort(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
|
@ -765,15 +377,12 @@ func TestNodePort(t *testing.T) {
|
|||
proto := strings.ToLower(string(api.ProtocolTCP))
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
|
||||
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
||||
if !hasJump(kubeNodePortRules, svcChain, "", svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func TestExternalIPsReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := "50.60.70.81"
|
||||
|
@ -799,15 +408,11 @@ func TestExternalIPsReject(t *testing.T) {
|
|||
|
||||
fp.syncProxyRules()
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcExternalIPs, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to a %v rule for externalIP %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePortReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
|
@ -832,10 +437,7 @@ func TestNodePortReject(t *testing.T) {
|
|||
|
||||
fp.syncProxyRules()
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func strPtr(s string) *string {
|
||||
|
@ -843,8 +445,8 @@ func strPtr(s string) *string {
|
|||
}
|
||||
|
||||
func TestOnlyLocalLoadBalancing(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
|
@ -902,43 +504,24 @@ func TestOnlyLocalLoadBalancing(t *testing.T) {
|
|||
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrLocal))
|
||||
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrNonLocal))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
||||
}
|
||||
|
||||
fwRules := ipt.GetRules(fwChain)
|
||||
if !hasJump(fwRules, lbChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t)
|
||||
}
|
||||
if hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t)
|
||||
}
|
||||
|
||||
lbRules := ipt.GetRules(lbChain)
|
||||
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
if !hasJump(lbRules, localEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
// set cluster CIDR to empty before test
|
||||
fp.clusterCIDR = ""
|
||||
onlyLocalNodePorts(t, fp, ipt)
|
||||
onlyLocalNodePorts(t, fp)
|
||||
}
|
||||
|
||||
func TestOnlyLocalNodePorts(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
onlyLocalNodePorts(t, fp, ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
onlyLocalNodePorts(t, fp)
|
||||
}
|
||||
|
||||
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) {
|
||||
func onlyLocalNodePorts(t *testing.T, fp *Proxier) {
|
||||
shouldLBTOSVCRuleExist := len(fp.clusterCIDR) > 0
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
|
@ -986,32 +569,7 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable
|
|||
|
||||
fp.syncProxyRules()
|
||||
|
||||
proto := strings.ToLower(string(api.ProtocolTCP))
|
||||
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
||||
|
||||
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrLocal))
|
||||
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrNonLocal))
|
||||
|
||||
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
||||
if !hasJump(kubeNodePortRules, lbChain, "", svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t)
|
||||
}
|
||||
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
lbRules := ipt.GetRules(lbChain)
|
||||
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
if hasJump(lbRules, svcChain, "", 0) != shouldLBTOSVCRuleExist {
|
||||
prefix := "Did not find "
|
||||
if !shouldLBTOSVCRuleExist {
|
||||
prefix = "Found "
|
||||
}
|
||||
errorf(fmt.Sprintf("%s jump from lb chain %v to svc %v", prefix, lbChain, svcChain), lbRules, t)
|
||||
}
|
||||
if !hasJump(lbRules, localEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service {
|
||||
|
@ -1040,8 +598,8 @@ func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, po
|
|||
}
|
||||
|
||||
func TestBuildServiceMapAddRemove(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
services := []*api.Service{
|
||||
makeTestService("somewhere-else", "cluster-ip", func(svc *api.Service) {
|
||||
|
@ -1146,8 +704,8 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "headless", func(svc *api.Service) {
|
||||
|
@ -1178,8 +736,8 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "external-name", func(svc *api.Service) {
|
||||
|
@ -1204,8 +762,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp := NewFakeProxier()
|
||||
|
||||
servicev1 := makeTestService("somewhere", "some-service", func(svc *api.Service) {
|
||||
svc.Spec.Type = api.ServiceTypeClusterIP
|
||||
|
@ -2415,8 +1972,8 @@ func Test_updateEndpointsMap(t *testing.T) {
|
|||
}
|
||||
|
||||
for tci, tc := range testCases {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
fp.hostname = nodeName
|
||||
|
||||
// First check that after adding all previous versions of endpoints,
|
||||
|
|
|
@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"ea.go",
|
||||
"reparse.go",
|
||||
"syscall.go",
|
||||
] + select({
|
||||
|
|
|
@ -68,10 +68,20 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
|||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -220,7 +230,7 @@ type BackupFileWriter struct {
|
|||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
|
@ -23,6 +23,13 @@ type atomicBool int32
|
|||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
|
@ -71,7 +78,8 @@ func initIo() {
|
|||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
closing bool
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
@ -107,14 +115,18 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
|||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
if !f.closing {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
f.closing = true
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,10 +139,13 @@ func (f *win32File) Close() error {
|
|||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wg.Add(1)
|
||||
if f.closing {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
|
@ -159,7 +174,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing {
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
|
@ -175,7 +190,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
}
|
||||
|
|
|
@ -265,9 +265,9 @@ func (l *win32PipeListener) listenerRoutine() {
|
|||
if err == nil {
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func() {
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}()
|
||||
}(p)
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"activatelayer.go",
|
||||
"baselayer.go",
|
||||
"callback.go",
|
||||
"cgo.go",
|
||||
"container.go",
|
||||
"createlayer.go",
|
||||
"createsandboxlayer.go",
|
||||
"deactivatelayer.go",
|
||||
"destroylayer.go",
|
||||
"errors.go",
|
||||
"expandsandboxsize.go",
|
||||
"exportlayer.go",
|
||||
"getlayermountpath.go",
|
||||
"getsharedbaseimages.go",
|
||||
"guid.go",
|
||||
"hcsshim.go",
|
||||
"hnsendpoint.go",
|
||||
"hnsfuncs.go",
|
||||
"hnsnetwork.go",
|
||||
"hnspolicy.go",
|
||||
"hnspolicylist.go",
|
||||
"importlayer.go",
|
||||
"interface.go",
|
||||
"layerexists.go",
|
||||
"layerutils.go",
|
||||
"legacy.go",
|
||||
"nametoguid.go",
|
||||
"preparelayer.go",
|
||||
"process.go",
|
||||
"processimage.go",
|
||||
"unpreparelayer.go",
|
||||
"utils.go",
|
||||
"version.go",
|
||||
"waithelper.go",
|
||||
"zhcsshim.go",
|
||||
],
|
||||
cgo = True,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/Microsoft/go-winio:go_default_library",
|
||||
"//vendor/github.com/sirupsen/logrus:go_default_library",
|
||||
"//vendor/golang.org/x/sys/windows:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,12 @@
|
|||
# hcsshim
|
||||
|
||||
This package supports launching Windows Server containers from Go. It is
|
||||
primarily used in the [Docker Engine](https://github.com/docker/docker) project,
|
||||
but it can be freely used by other projects as well.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
|
@ -0,0 +1,28 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// ActivateLayer will find the layer with the given id and mount it's filesystem.
|
||||
// For a read/write layer, the mounted filesystem will appear as a volume on the
|
||||
// host, while a read-only layer is generally expected to be a no-op.
|
||||
// An activated layer must later be deactivated via DeactivateLayer.
|
||||
func ActivateLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::ActivateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = activateLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,183 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
type baseLayerWriter struct {
|
||||
root string
|
||||
f *os.File
|
||||
bw *winio.BackupFileWriter
|
||||
err error
|
||||
hasUtilityVM bool
|
||||
dirInfo []dirInfo
|
||||
}
|
||||
|
||||
type dirInfo struct {
|
||||
path string
|
||||
fileInfo winio.FileBasicInfo
|
||||
}
|
||||
|
||||
// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
|
||||
// after processing of the directory tree has completed. The times are expected
|
||||
// to be ordered such that parent directories come before child directories.
|
||||
func reapplyDirectoryTimes(dis []dirInfo) error {
|
||||
for i := range dis {
|
||||
di := &dis[len(dis)-i-1] // reverse order: process child directories first
|
||||
f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = winio.SetFileBasicInfo(f, &di.fileInfo)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) closeCurrentFile() error {
|
||||
if w.f != nil {
|
||||
err := w.bw.Close()
|
||||
err2 := w.f.Close()
|
||||
w.f = nil
|
||||
w.bw = nil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
}()
|
||||
|
||||
err = w.closeCurrentFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if filepath.ToSlash(name) == `UtilityVM/Files` {
|
||||
w.hasUtilityVM = true
|
||||
}
|
||||
|
||||
path := filepath.Join(w.root, name)
|
||||
path, err = makeLongAbsPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var f *os.File
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
createmode := uint32(syscall.CREATE_NEW)
|
||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
err := os.Mkdir(path, 0)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
||||
w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo})
|
||||
}
|
||||
}
|
||||
|
||||
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
|
||||
f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode)
|
||||
if err != nil {
|
||||
return makeError(err, "Failed to OpenForBackup", path)
|
||||
}
|
||||
|
||||
err = winio.SetFileBasicInfo(f, fileInfo)
|
||||
if err != nil {
|
||||
return makeError(err, "Failed to SetFileBasicInfo", path)
|
||||
}
|
||||
|
||||
w.f = f
|
||||
w.bw = winio.NewBackupFileWriter(f, true)
|
||||
f = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
}()
|
||||
|
||||
err = w.closeCurrentFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linkpath, err := makeLongAbsPath(filepath.Join(w.root, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linktarget, err := makeLongAbsPath(filepath.Join(w.root, target))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Link(linktarget, linkpath)
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) Remove(name string) error {
|
||||
return errors.New("base layer cannot have tombstones")
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) Write(b []byte) (int, error) {
|
||||
n, err := w.bw.Write(b)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *baseLayerWriter) Close() error {
|
||||
err := w.closeCurrentFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if w.err == nil {
|
||||
// Restore the file times of all the directories, since they may have
|
||||
// been modified by creating child directories.
|
||||
err = reapplyDirectoryTimes(w.dirInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ProcessBaseLayer(w.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.hasUtilityVM {
|
||||
err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.err
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
nextCallback uintptr
|
||||
callbackMap = map[uintptr]*notifcationWatcherContext{}
|
||||
callbackMapLock = sync.RWMutex{}
|
||||
|
||||
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
|
||||
|
||||
// Notifications for HCS_SYSTEM handles
|
||||
hcsNotificationSystemExited hcsNotification = 0x00000001
|
||||
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
|
||||
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
|
||||
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
|
||||
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
|
||||
|
||||
// Notifications for HCS_PROCESS handles
|
||||
hcsNotificationProcessExited hcsNotification = 0x00010000
|
||||
|
||||
// Common notifications
|
||||
hcsNotificationInvalid hcsNotification = 0x00000000
|
||||
hcsNotificationServiceDisconnect hcsNotification = 0x01000000
|
||||
)
|
||||
|
||||
type hcsNotification uint32
|
||||
type notificationChannel chan error
|
||||
|
||||
type notifcationWatcherContext struct {
|
||||
channels notificationChannels
|
||||
handle hcsCallback
|
||||
}
|
||||
|
||||
type notificationChannels map[hcsNotification]notificationChannel
|
||||
|
||||
func newChannels() notificationChannels {
|
||||
channels := make(notificationChannels)
|
||||
|
||||
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
|
||||
return channels
|
||||
}
|
||||
func closeChannels(channels notificationChannels) {
|
||||
close(channels[hcsNotificationSystemExited])
|
||||
close(channels[hcsNotificationSystemCreateCompleted])
|
||||
close(channels[hcsNotificationSystemStartCompleted])
|
||||
close(channels[hcsNotificationSystemPauseCompleted])
|
||||
close(channels[hcsNotificationSystemResumeCompleted])
|
||||
close(channels[hcsNotificationProcessExited])
|
||||
close(channels[hcsNotificationServiceDisconnect])
|
||||
}
|
||||
|
||||
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
|
||||
var result error
|
||||
if int32(notificationStatus) < 0 {
|
||||
result = syscall.Errno(win32FromHresult(notificationStatus))
|
||||
}
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
context.channels[notificationType] <- result
|
||||
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package hcsshim
|
||||
|
||||
import "C"
|
||||
|
||||
// This import is needed to make the library compile as CGO because HCSSHIM
|
||||
// only works with CGO due to callbacks from HCS comming back from a C thread
|
||||
// which is not supported without CGO. See https://github.com/golang/go/issues/10973
|
|
@ -0,0 +1,794 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTimeout = time.Minute * 4
|
||||
)
|
||||
|
||||
const (
|
||||
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
|
||||
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
|
||||
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
|
||||
mappedVirtualDiskQuery = `{ "PropertyTypes" : ["MappedVirtualDisk"]}`
|
||||
)
|
||||
|
||||
type container struct {
|
||||
handleLock sync.RWMutex
|
||||
handle hcsSystem
|
||||
id string
|
||||
callbackNumber uintptr
|
||||
}
|
||||
|
||||
// ContainerProperties holds the properties for a container and the processes running in that container
|
||||
type ContainerProperties struct {
|
||||
ID string `json:"Id"`
|
||||
Name string
|
||||
SystemType string
|
||||
Owner string
|
||||
SiloGUID string `json:"SiloGuid,omitempty"`
|
||||
RuntimeID string `json:"RuntimeId,omitempty"`
|
||||
IsRuntimeTemplate bool `json:",omitempty"`
|
||||
RuntimeImagePath string `json:",omitempty"`
|
||||
Stopped bool `json:",omitempty"`
|
||||
ExitType string `json:",omitempty"`
|
||||
AreUpdatesPending bool `json:",omitempty"`
|
||||
ObRoot string `json:",omitempty"`
|
||||
Statistics Statistics `json:",omitempty"`
|
||||
ProcessList []ProcessListItem `json:",omitempty"`
|
||||
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
|
||||
}
|
||||
|
||||
// MemoryStats holds the memory statistics for a container
|
||||
type MemoryStats struct {
|
||||
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
|
||||
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
|
||||
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
|
||||
}
|
||||
|
||||
// ProcessorStats holds the processor statistics for a container
|
||||
type ProcessorStats struct {
|
||||
TotalRuntime100ns uint64 `json:",omitempty"`
|
||||
RuntimeUser100ns uint64 `json:",omitempty"`
|
||||
RuntimeKernel100ns uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// StorageStats holds the storage statistics for a container
|
||||
type StorageStats struct {
|
||||
ReadCountNormalized uint64 `json:",omitempty"`
|
||||
ReadSizeBytes uint64 `json:",omitempty"`
|
||||
WriteCountNormalized uint64 `json:",omitempty"`
|
||||
WriteSizeBytes uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkStats holds the network statistics for a container
|
||||
type NetworkStats struct {
|
||||
BytesReceived uint64 `json:",omitempty"`
|
||||
BytesSent uint64 `json:",omitempty"`
|
||||
PacketsReceived uint64 `json:",omitempty"`
|
||||
PacketsSent uint64 `json:",omitempty"`
|
||||
DroppedPacketsIncoming uint64 `json:",omitempty"`
|
||||
DroppedPacketsOutgoing uint64 `json:",omitempty"`
|
||||
EndpointId string `json:",omitempty"`
|
||||
InstanceId string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Statistics is the structure returned by a statistics call on a container
|
||||
type Statistics struct {
|
||||
Timestamp time.Time `json:",omitempty"`
|
||||
ContainerStartTime time.Time `json:",omitempty"`
|
||||
Uptime100ns uint64 `json:",omitempty"`
|
||||
Memory MemoryStats `json:",omitempty"`
|
||||
Processor ProcessorStats `json:",omitempty"`
|
||||
Storage StorageStats `json:",omitempty"`
|
||||
Network []NetworkStats `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ProcessList is the structure of an item returned by a ProcessList call on a container
|
||||
type ProcessListItem struct {
|
||||
CreateTimestamp time.Time `json:",omitempty"`
|
||||
ImageName string `json:",omitempty"`
|
||||
KernelTime100ns uint64 `json:",omitempty"`
|
||||
MemoryCommitBytes uint64 `json:",omitempty"`
|
||||
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
|
||||
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
|
||||
ProcessId uint32 `json:",omitempty"`
|
||||
UserTime100ns uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
|
||||
type MappedVirtualDiskController struct {
|
||||
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Type of Request Support in ModifySystem
|
||||
type RequestType string
|
||||
|
||||
// Type of Resource Support in ModifySystem
|
||||
type ResourceType string
|
||||
|
||||
// RequestType const
|
||||
const (
|
||||
Add RequestType = "Add"
|
||||
Remove RequestType = "Remove"
|
||||
Network ResourceType = "Network"
|
||||
)
|
||||
|
||||
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
|
||||
// Supported resource types are Network and Request Types are Add/Remove
|
||||
type ResourceModificationRequestResponse struct {
|
||||
Resource ResourceType `json:"ResourceType"`
|
||||
Data interface{} `json:"Settings"`
|
||||
Request RequestType `json:"RequestType,omitempty"`
|
||||
}
|
||||
|
||||
// createContainerAdditionalJSON is read from the environment at initialisation
|
||||
// time. It allows an environment variable to define additional JSON which
|
||||
// is merged in the CreateContainer call to HCS.
|
||||
var createContainerAdditionalJSON string
|
||||
|
||||
func init() {
|
||||
createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container with the given configuration but does not start it.
|
||||
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
|
||||
return createContainerWithJSON(id, c, "")
|
||||
}
|
||||
|
||||
// CreateContainerWithJSON creates a new container with the given configuration but does not start it.
|
||||
// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS.
|
||||
func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
|
||||
return createContainerWithJSON(id, c, additionalJSON)
|
||||
}
|
||||
|
||||
func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
|
||||
operation := "CreateContainer"
|
||||
title := "HCSShim::" + operation
|
||||
|
||||
container := &container{
|
||||
id: id,
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Debugf(title+" id=%s config=%s", id, configuration)
|
||||
|
||||
// Merge any additional JSON. Priority is given to what is passed in explicitly,
|
||||
// falling back to what's set in the environment.
|
||||
if additionalJSON == "" && createContainerAdditionalJSON != "" {
|
||||
additionalJSON = createContainerAdditionalJSON
|
||||
}
|
||||
if additionalJSON != "" {
|
||||
configurationMap := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err)
|
||||
}
|
||||
|
||||
additionalMap := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err)
|
||||
}
|
||||
|
||||
mergedMap := mergeMaps(additionalMap, configurationMap)
|
||||
mergedJSON, err := json.Marshal(mergedMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err)
|
||||
}
|
||||
|
||||
configuration = string(mergedJSON)
|
||||
logrus.Debugf(title+" id=%s merged config=%s", id, configuration)
|
||||
}
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
identity syscall.Handle
|
||||
)
|
||||
createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
|
||||
|
||||
if createError == nil || IsPending(createError) {
|
||||
if err := container.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, configuration, err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
|
||||
// in ToMap are overwritten. Values in fromMap are added to ToMap.
|
||||
// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
|
||||
func mergeMaps(fromMap, ToMap interface{}) interface{} {
|
||||
switch fromMap := fromMap.(type) {
|
||||
case map[string]interface{}:
|
||||
ToMap, ok := ToMap.(map[string]interface{})
|
||||
if !ok {
|
||||
return fromMap
|
||||
}
|
||||
for keyToMap, valueToMap := range ToMap {
|
||||
if valueFromMap, ok := fromMap[keyToMap]; ok {
|
||||
fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap)
|
||||
} else {
|
||||
fromMap[keyToMap] = valueToMap
|
||||
}
|
||||
}
|
||||
case nil:
|
||||
// merge(nil, map[string]interface{...}) -> map[string]interface{...}
|
||||
ToMap, ok := ToMap.(map[string]interface{})
|
||||
if ok {
|
||||
return ToMap
|
||||
}
|
||||
}
|
||||
return fromMap
|
||||
}
|
||||
|
||||
// OpenContainer opens an existing container by ID.
|
||||
func OpenContainer(id string) (Container, error) {
|
||||
operation := "OpenContainer"
|
||||
title := "HCSShim::" + operation
|
||||
logrus.Debugf(title+" id=%s", id)
|
||||
|
||||
container := &container{
|
||||
id: id,
|
||||
}
|
||||
|
||||
var (
|
||||
handle hcsSystem
|
||||
resultp *uint16
|
||||
)
|
||||
err := hcsOpenComputeSystem(id, &handle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
container.handle = handle
|
||||
|
||||
if err := container.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// GetContainers gets a list of the containers on the system that match the query
|
||||
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
|
||||
operation := "GetContainers"
|
||||
title := "HCSShim::" + operation
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := string(queryb)
|
||||
logrus.Debugf(title+" query=%s", query)
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
computeSystemsp *uint16
|
||||
)
|
||||
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if computeSystemsp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp)
|
||||
computeSystems := []ContainerProperties{}
|
||||
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return computeSystems, nil
|
||||
}
|
||||
|
||||
// Start synchronously starts the container.
|
||||
func (container *container) Start() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Start"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsStartComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (container *container) Shutdown() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Shutdown"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsShutdownComputeSystem(container.handle, "", &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Terminate requests a container terminate, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (container *container) Terminate() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Terminate"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsTerminateComputeSystem(container.handle, "", &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the container to shutdown or terminate.
|
||||
func (container *container) Wait() error {
|
||||
operation := "Wait"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
|
||||
// If the timeout expires, IsTimeout(err) == true
|
||||
func (container *container) WaitTimeout(timeout time.Duration) error {
|
||||
operation := "WaitTimeout"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *container) properties(query string) (*ContainerProperties, error) {
|
||||
var (
|
||||
resultp *uint16
|
||||
propertiesp *uint16
|
||||
)
|
||||
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if propertiesp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
|
||||
properties := &ContainerProperties{}
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// HasPendingUpdates returns true if the container has updates pending to install
|
||||
func (container *container) HasPendingUpdates() (bool, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "HasPendingUpdates"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return false, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(pendingUpdatesQuery)
|
||||
if err != nil {
|
||||
return false, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.AreUpdatesPending, nil
|
||||
}
|
||||
|
||||
// Statistics returns statistics for the container
|
||||
func (container *container) Statistics() (Statistics, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Statistics"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(statisticsQuery)
|
||||
if err != nil {
|
||||
return Statistics{}, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.Statistics, nil
|
||||
}
|
||||
|
||||
// ProcessList returns an array of ProcessListItems for the container
|
||||
func (container *container) ProcessList() ([]ProcessListItem, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "ProcessList"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(processListQuery)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.ProcessList, nil
|
||||
}
|
||||
|
||||
// MappedVirtualDisks returns a map of the controllers and the disks mapped
|
||||
// to a container.
|
||||
//
|
||||
// Example of JSON returned by the query.
|
||||
//{
|
||||
// "Id":"1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3_svm",
|
||||
// "SystemType":"Container",
|
||||
// "RuntimeOsType":"Linux",
|
||||
// "RuntimeId":"00000000-0000-0000-0000-000000000000",
|
||||
// "State":"Running",
|
||||
// "MappedVirtualDiskControllers":{
|
||||
// "0":{
|
||||
// "MappedVirtualDisks":{
|
||||
// "2":{
|
||||
// "HostPath":"C:\\lcow\\lcow\\scratch\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3.vhdx",
|
||||
// "ContainerPath":"/mnt/gcs/LinuxServiceVM/scratch",
|
||||
// "Lun":2,
|
||||
// "CreateInUtilityVM":true
|
||||
// },
|
||||
// "3":{
|
||||
// "HostPath":"C:\\lcow\\lcow\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3\\sandbox.vhdx",
|
||||
// "Lun":3,
|
||||
// "CreateInUtilityVM":true,
|
||||
// "AttachOnly":true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "MappedVirtualDiskList"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := container.properties(mappedVirtualDiskQuery)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return properties.MappedVirtualDiskControllers, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the container. This feature is not enabled in TP5.
|
||||
func (container *container) Pause() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Pause"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsPauseComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the execution of the container. This feature is not enabled in TP5.
|
||||
func (container *container) Resume() error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Resume"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsResumeComputeSystem(container.handle, "", &resultp)
|
||||
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the container.
|
||||
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "CreateProcess"
|
||||
title := "HCSShim::Container::" + operation
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
// If we are not emulating a console, ignore any console size passed to us
|
||||
if !c.EmulateConsole {
|
||||
c.ConsoleSize[0] = 0
|
||||
c.ConsoleSize[1] = 0
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
|
||||
|
||||
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, configuration, err)
|
||||
}
|
||||
|
||||
process := &process{
|
||||
handle: processHandle,
|
||||
processID: int(processInfo.ProcessId),
|
||||
container: container,
|
||||
cachedPipes: &cachedPipes{
|
||||
stdIn: processInfo.StdInput,
|
||||
stdOut: processInfo.StdOutput,
|
||||
stdErr: processInfo.StdError,
|
||||
},
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID)
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the container.
|
||||
func (container *container) OpenProcess(pid int) (Process, error) {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "OpenProcess"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
|
||||
var (
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
|
||||
if container.handle == 0 {
|
||||
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
process := &process{
|
||||
handle: processHandle,
|
||||
processID: pid,
|
||||
container: container,
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the container but does not terminate or wait for it.
|
||||
func (container *container) Close() error {
|
||||
container.handleLock.Lock()
|
||||
defer container.handleLock.Unlock()
|
||||
operation := "Close"
|
||||
title := "HCSShim::Container::" + operation
|
||||
logrus.Debugf(title+" id=%s", container.id)
|
||||
|
||||
// Don't double free this
|
||||
if container.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := container.unregisterCallback(); err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
if err := hcsCloseComputeSystem(container.handle); err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
|
||||
container.handle = 0
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *container) registerCallback() error {
|
||||
context := ¬ifcationWatcherContext{
|
||||
channels: newChannels(),
|
||||
}
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = context
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
var callbackHandle hcsCallback
|
||||
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context.handle = callbackHandle
|
||||
container.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *container) unregisterCallback() error {
|
||||
callbackNumber := container.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := context.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hcsUnregisterComputeSystemCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := hcsUnregisterComputeSystemCallback(handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(context.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackMap[callbackNumber] = nil
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
handle = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modifies the System by sending a request to HCS
|
||||
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
|
||||
container.handleLock.RLock()
|
||||
defer container.handleLock.RUnlock()
|
||||
operation := "Modify"
|
||||
title := "HCSShim::Container::" + operation
|
||||
|
||||
if container.handle == 0 {
|
||||
return makeContainerError(container, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestString := string(requestJSON)
|
||||
logrus.Debugf(title+" id=%s request=%s", container.id, requestString)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyComputeSystem(container.handle, requestString, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeContainerError(container, operation, "", err)
|
||||
}
|
||||
logrus.Debugf(title+" succeeded id=%s", container.id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
|
||||
// the parent layer provided.
|
||||
func CreateLayer(info DriverInfo, id, parent string) error {
|
||||
title := "hcsshim::CreateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = createLayer(&infop, id, parent)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
|
||||
// This requires both the id of the direct parent layer, as well as the full list
|
||||
// of paths to all parent layers up to the base (and including the direct parent
|
||||
// whose id was provided).
|
||||
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::CreateSandboxLayer "
|
||||
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = createSandboxLayer(&infop, layerId, parentId, layers)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
|
||||
func DeactivateLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::DeactivateLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = deactivateLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// DestroyLayer will remove the on-disk files representing the layer with the given
|
||||
// id, including that layer's containing folder, if any.
|
||||
func DestroyLayer(info DriverInfo, id string) error {
|
||||
title := "hcsshim::DestroyLayer "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = destroyLayer(&infop, id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,239 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
|
||||
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrElementNotFound = syscall.Errno(0x490)
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrNotSupported = syscall.Errno(0x32)
|
||||
|
||||
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
|
||||
// decimal -2147024883 / hex 0x8007000d
|
||||
ErrInvalidData = syscall.Errno(0xd)
|
||||
|
||||
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
|
||||
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
|
||||
|
||||
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
|
||||
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
|
||||
|
||||
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
|
||||
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
|
||||
|
||||
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
|
||||
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
|
||||
|
||||
// ErrTimeout is an error encountered when waiting on a notification times out
|
||||
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
|
||||
|
||||
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
|
||||
// a different expected notification
|
||||
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
|
||||
|
||||
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
|
||||
// is lost while waiting for a notification
|
||||
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
|
||||
|
||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
|
||||
|
||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
|
||||
|
||||
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
|
||||
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
|
||||
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
ErrProcNotFound = syscall.Errno(0x7f)
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
|
||||
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
|
||||
|
||||
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
|
||||
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
|
||||
|
||||
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
|
||||
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
|
||||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||
)
|
||||
|
||||
// ProcessError is an error encountered in HCS during an operation on a Process object
|
||||
type ProcessError struct {
|
||||
Process *process
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
}
|
||||
|
||||
// ContainerError is an error encountered in HCS during an operation on a Container object
|
||||
type ContainerError struct {
|
||||
Container *container
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ContainerError) Error() string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
if e.Container == nil {
|
||||
return "unexpected nil container for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := "container " + e.Container.id
|
||||
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
if e.ExtraInfo != "" {
|
||||
s += " extra info: " + e.ExtraInfo
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func makeContainerError(container *container, operation string, extraInfo string, err error) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ContainerError); ok {
|
||||
return err
|
||||
}
|
||||
containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
|
||||
return containerError
|
||||
}
|
||||
|
||||
func (e *ProcessError) Error() string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
if e.Process == nil {
|
||||
return "Unexpected nil process for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("process %d", e.Process.processID)
|
||||
|
||||
if e.Process.container != nil {
|
||||
s += " in container " + e.Process.container.id
|
||||
}
|
||||
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func makeProcessError(process *process, operation string, extraInfo string, err error) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ProcessError); ok {
|
||||
return err
|
||||
}
|
||||
processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
|
||||
return processError
|
||||
}
|
||||
|
||||
// IsNotExist checks if an error is caused by the Container or Process not existing.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrComputeSystemDoesNotExist ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
// already closed by a call to the Close() method.
|
||||
func IsAlreadyClosed(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrAlreadyClosed
|
||||
}
|
||||
|
||||
// IsPending returns a boolean indicating whether the error is that
|
||||
// the requested operation is being completed in the background.
|
||||
func IsPending(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeOperationPending
|
||||
}
|
||||
|
||||
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||
// a timeout waiting for the operation to complete.
|
||||
func IsTimeout(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrTimeout
|
||||
}
|
||||
|
||||
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
||||
// a Container or Process being already stopped.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeAlreadyStopped ||
|
||||
err == ErrElementNotFound ||
|
||||
err == ErrProcNotFound
|
||||
}
|
||||
|
||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||
// unsupported platform requests
|
||||
// Note: Currently Unsupported platform requests can be mean either
|
||||
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
|
||||
// is thrown from the Platform
|
||||
func IsNotSupported(err error) bool {
|
||||
err = getInnerError(err)
|
||||
// If Platform doesn't recognize or support the request sent, below errors are seen
|
||||
return err == ErrVmcomputeInvalidJSON ||
|
||||
err == ErrInvalidData ||
|
||||
err == ErrNotSupported ||
|
||||
err == ErrVmcomputeUnknownMessage
|
||||
}
|
||||
|
||||
func getInnerError(err error) error {
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *ContainerError:
|
||||
err = pe.Err
|
||||
case *ProcessError:
|
||||
err = pe.Err
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// ExpandSandboxSize expands the size of a layer to at least size bytes.
|
||||
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
|
||||
title := "hcsshim::ExpandSandboxSize "
|
||||
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = expandSandboxSize(&infop, layerId, size)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ExportLayer will create a folder at exportFolderPath and fill that folder with
|
||||
// the transport format version of the layer identified by layerId. This transport
|
||||
// format includes any metadata required for later importing the layer (using
|
||||
// ImportLayer), and requires the full list of parent layer paths in order to
|
||||
// perform the export.
|
||||
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::ExportLayer "
|
||||
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = exportLayer(&infop, layerId, exportFolderPath, layers)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
type LayerReader interface {
|
||||
Next() (string, int64, *winio.FileBasicInfo, error)
|
||||
Read(b []byte) (int, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
|
||||
type FilterLayerReader struct {
|
||||
context uintptr
|
||||
}
|
||||
|
||||
// Next reads the next available file from a layer, ensuring that parent directories are always read
|
||||
// before child files and directories.
|
||||
//
|
||||
// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
|
||||
// extract a Win32 backup stream with the remainder of the metadata and the data.
|
||||
func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
|
||||
var fileNamep *uint16
|
||||
fileInfo := &winio.FileBasicInfo{}
|
||||
var deleted uint32
|
||||
var fileSize int64
|
||||
err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
|
||||
if err != nil {
|
||||
if err == syscall.ERROR_NO_MORE_FILES {
|
||||
err = io.EOF
|
||||
} else {
|
||||
err = makeError(err, "ExportLayerNext", "")
|
||||
}
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileName := convertAndFreeCoTaskMemString(fileNamep)
|
||||
if deleted != 0 {
|
||||
fileInfo = nil
|
||||
}
|
||||
if fileName[0] == '\\' {
|
||||
fileName = fileName[1:]
|
||||
}
|
||||
return fileName, fileSize, fileInfo, nil
|
||||
}
|
||||
|
||||
// Read reads from the current file's Win32 backup stream.
|
||||
func (r *FilterLayerReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := exportLayerRead(r.context, b, &bytesRead)
|
||||
if err != nil {
|
||||
return 0, makeError(err, "ExportLayerRead", "")
|
||||
}
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees resources associated with the layer reader. It will return an
|
||||
// error if there was an error while reading the layer or of the layer was not
|
||||
// completely read.
|
||||
func (r *FilterLayerReader) Close() (err error) {
|
||||
if r.context != 0 {
|
||||
err = exportLayerEnd(r.context)
|
||||
if err != nil {
|
||||
err = makeError(err, "ExportLayerEnd", "")
|
||||
}
|
||||
r.context = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
|
||||
// The caller must have taken the SeBackupPrivilege privilege
|
||||
// to call this and any methods on the resulting LayerReader.
|
||||
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
|
||||
if procExportLayerBegin.Find() != nil {
|
||||
// The new layer reader is not available on this Windows build. Fall back to the
|
||||
// legacy export code path.
|
||||
path, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = ExportLayer(info, layerID, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
os.RemoveAll(path)
|
||||
return nil, err
|
||||
}
|
||||
return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
|
||||
}
|
||||
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &FilterLayerReader{}
|
||||
err = exportLayerBegin(&infop, layerID, layers, &r.context)
|
||||
if err != nil {
|
||||
return nil, makeError(err, "ExportLayerBegin", "")
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
type legacyLayerReaderWrapper struct {
|
||||
*legacyLayerReader
|
||||
}
|
||||
|
||||
func (r *legacyLayerReaderWrapper) Close() error {
|
||||
err := r.legacyLayerReader.Close()
|
||||
os.RemoveAll(r.root)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetLayerMountPath will look for a mounted layer with the given id and return
|
||||
// the path at which that layer can be accessed. This path may be a volume path
|
||||
// if the layer is a mounted read-write layer, otherwise it is expected to be the
|
||||
// folder path at which the layer is stored.
|
||||
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
|
||||
title := "hcsshim::GetLayerMountPath "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
var mountPathLength uintptr
|
||||
mountPathLength = 0
|
||||
|
||||
// Call the procedure itself.
|
||||
logrus.Debugf("Calling proc (1)")
|
||||
err = getLayerMountPath(&infop, id, &mountPathLength, nil)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Allocate a mount path of the returned length.
|
||||
if mountPathLength == 0 {
|
||||
return "", nil
|
||||
}
|
||||
mountPathp := make([]uint16, mountPathLength)
|
||||
mountPathp[0] = 0
|
||||
|
||||
// Call the procedure again
|
||||
logrus.Debugf("Calling proc (2)")
|
||||
err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
path := syscall.UTF16ToString(mountPathp[0:])
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
|
||||
return path, nil
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// GetSharedBaseImages will enumerate the images stored in the common central
|
||||
// image store and return descriptive info about those images for the purpose
|
||||
// of registering them with the graphdriver, graph, and tagstore.
|
||||
func GetSharedBaseImages() (imageData string, err error) {
|
||||
title := "hcsshim::GetSharedBaseImages "
|
||||
|
||||
logrus.Debugf("Calling proc")
|
||||
var buffer *uint16
|
||||
err = getBaseImages(&buffer)
|
||||
if err != nil {
|
||||
err = makeError(err, title, "")
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
imageData = convertAndFreeCoTaskMemString(buffer)
|
||||
logrus.Debugf(title+" - succeeded output=%s", imageData)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type GUID [16]byte
|
||||
|
||||
func NewGUID(source string) *GUID {
|
||||
h := sha1.Sum([]byte(source))
|
||||
var g GUID
|
||||
copy(g[0:], h[0:16])
|
||||
return &g
|
||||
}
|
||||
|
||||
func (g *GUID) ToString() string {
|
||||
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
// Shim for the Host Compute Service (HCS) to manage Windows Server
|
||||
// containers and Hyper-V containers.
|
||||
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
|
||||
|
||||
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
|
||||
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
|
||||
|
||||
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
|
||||
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
|
||||
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
|
||||
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
|
||||
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
|
||||
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
|
||||
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
|
||||
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
|
||||
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
|
||||
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
|
||||
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
|
||||
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
|
||||
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
|
||||
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
|
||||
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
|
||||
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
|
||||
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
|
||||
|
||||
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
|
||||
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
|
||||
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
|
||||
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
|
||||
|
||||
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
|
||||
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
|
||||
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
|
||||
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
|
||||
|
||||
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
|
||||
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
|
||||
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
|
||||
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
|
||||
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
|
||||
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
|
||||
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
|
||||
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
|
||||
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
|
||||
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
|
||||
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
|
||||
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
|
||||
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
|
||||
|
||||
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
|
||||
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
|
||||
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
|
||||
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
|
||||
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
|
||||
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
|
||||
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
|
||||
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
|
||||
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
|
||||
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
|
||||
|
||||
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
|
||||
|
||||
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
|
||||
|
||||
const (
|
||||
// Specific user-visible exit codes
|
||||
WaitErrExecFailed = 32767
|
||||
|
||||
ERROR_GEN_FAILURE = syscall.Errno(31)
|
||||
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
|
||||
WSAEINVAL = syscall.Errno(10022)
|
||||
|
||||
// Timeout on wait calls
|
||||
TimeoutInfinite = 0xFFFFFFFF
|
||||
)
|
||||
|
||||
type HcsError struct {
|
||||
title string
|
||||
rest string
|
||||
Err error
|
||||
}
|
||||
|
||||
type hcsSystem syscall.Handle
|
||||
type hcsProcess syscall.Handle
|
||||
type hcsCallback syscall.Handle
|
||||
|
||||
type hcsProcessInformation struct {
|
||||
ProcessId uint32
|
||||
Reserved uint32
|
||||
StdInput syscall.Handle
|
||||
StdOutput syscall.Handle
|
||||
StdError syscall.Handle
|
||||
}
|
||||
|
||||
func makeError(err error, title, rest string) error {
|
||||
// Pass through DLL errors directly since they do not originate from HCS.
|
||||
if _, ok := err.(*syscall.DLLError); ok {
|
||||
return err
|
||||
}
|
||||
return &HcsError{title, rest, err}
|
||||
}
|
||||
|
||||
func makeErrorf(err error, title, format string, a ...interface{}) error {
|
||||
return makeError(err, title, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func win32FromError(err error) uint32 {
|
||||
if herr, ok := err.(*HcsError); ok {
|
||||
return win32FromError(herr.Err)
|
||||
}
|
||||
if code, ok := err.(syscall.Errno); ok {
|
||||
return uint32(code)
|
||||
}
|
||||
return uint32(ERROR_GEN_FAILURE)
|
||||
}
|
||||
|
||||
func win32FromHresult(hr uintptr) uintptr {
|
||||
if hr&0x1fff0000 == 0x00070000 {
|
||||
return hr & 0xffff
|
||||
}
|
||||
return hr
|
||||
}
|
||||
|
||||
func (e *HcsError) Error() string {
|
||||
s := e.title
|
||||
if len(s) > 0 && s[len(s)-1] != ' ' {
|
||||
s += " "
|
||||
}
|
||||
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
|
||||
if e.rest != "" {
|
||||
if e.rest[0] != ' ' {
|
||||
s += " "
|
||||
}
|
||||
s += e.rest
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func convertAndFreeCoTaskMemString(buffer *uint16) string {
|
||||
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
|
||||
coTaskMemFree(unsafe.Pointer(buffer))
|
||||
return str
|
||||
}
|
||||
|
||||
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
|
||||
return []byte(convertAndFreeCoTaskMemString(buffer))
|
||||
}
|
||||
|
||||
func processHcsResult(err error, resultp *uint16) error {
|
||||
if resultp != nil {
|
||||
result := convertAndFreeCoTaskMemString(resultp)
|
||||
logrus.Debugf("Result: %s", result)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,318 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// HNSEndpoint represents a network endpoint in HNS
|
||||
type HNSEndpoint struct {
|
||||
Id string `json:"ID,omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
VirtualNetwork string `json:",omitempty"`
|
||||
VirtualNetworkName string `json:",omitempty"`
|
||||
Policies []json.RawMessage `json:",omitempty"`
|
||||
MacAddress string `json:",omitempty"`
|
||||
IPAddress net.IP `json:",omitempty"`
|
||||
DNSSuffix string `json:",omitempty"`
|
||||
DNSServerList string `json:",omitempty"`
|
||||
GatewayAddress string `json:",omitempty"`
|
||||
EnableInternalDNS bool `json:",omitempty"`
|
||||
DisableICC bool `json:",omitempty"`
|
||||
PrefixLength uint8 `json:",omitempty"`
|
||||
IsRemoteEndpoint bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
//SystemType represents the type of the system on which actions are done
|
||||
type SystemType string
|
||||
|
||||
// SystemType const
|
||||
const (
|
||||
ContainerType SystemType = "Container"
|
||||
VirtualMachineType SystemType = "VirtualMachine"
|
||||
HostType SystemType = "Host"
|
||||
)
|
||||
|
||||
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
|
||||
// Supported resource types are Network and Request Types are Add/Remove
|
||||
type EndpointAttachDetachRequest struct {
|
||||
ContainerID string `json:"ContainerId,omitempty"`
|
||||
SystemType SystemType `json:"SystemType"`
|
||||
CompartmentID uint16 `json:"CompartmentId,omitempty"`
|
||||
VirtualNICName string `json:"VirtualNicName,omitempty"`
|
||||
}
|
||||
|
||||
// EndpointResquestResponse is object to get the endpoint request response
|
||||
type EndpointResquestResponse struct {
|
||||
Success bool
|
||||
Error string
|
||||
}
|
||||
|
||||
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
|
||||
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
|
||||
endpoint := &HNSEndpoint{}
|
||||
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
|
||||
func HNSListEndpointRequest() ([]HNSEndpoint, error) {
|
||||
var endpoint []HNSEndpoint
|
||||
err := hnsCall("GET", "/endpoints/", "", &endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
|
||||
func HotAttachEndpoint(containerID string, endpointID string) error {
|
||||
return modifyNetworkEndpoint(containerID, endpointID, Add)
|
||||
}
|
||||
|
||||
// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container
|
||||
func HotDetachEndpoint(containerID string, endpointID string) error {
|
||||
return modifyNetworkEndpoint(containerID, endpointID, Remove)
|
||||
}
|
||||
|
||||
// ModifyContainer corresponding to the container id, by sending a request
|
||||
func modifyContainer(id string, request *ResourceModificationRequestResponse) error {
|
||||
container, err := OpenContainer(id)
|
||||
if err != nil {
|
||||
if IsNotExist(err) {
|
||||
return ErrComputeSystemDoesNotExist
|
||||
}
|
||||
return getInnerError(err)
|
||||
}
|
||||
defer container.Close()
|
||||
err = container.Modify(request)
|
||||
if err != nil {
|
||||
if IsNotSupported(err) {
|
||||
return ErrPlatformNotSupported
|
||||
}
|
||||
return getInnerError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error {
|
||||
requestMessage := &ResourceModificationRequestResponse{
|
||||
Resource: Network,
|
||||
Request: request,
|
||||
Data: endpointID,
|
||||
}
|
||||
err := modifyContainer(containerID, requestMessage)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHNSEndpointByID get the Endpoint by ID
|
||||
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
|
||||
return HNSEndpointRequest("GET", endpointID, "")
|
||||
}
|
||||
|
||||
// GetHNSEndpointByName gets the endpoint filtered by Name
|
||||
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
|
||||
hnsResponse, err := HNSListEndpointRequest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, hnsEndpoint := range hnsResponse {
|
||||
if hnsEndpoint.Name == endpointName {
|
||||
return &hnsEndpoint, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Endpoint %v not found", endpointName)
|
||||
}
|
||||
|
||||
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
|
||||
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
|
||||
operation := "Create"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
jsonString, err := json.Marshal(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return HNSEndpointRequest("POST", "", string(jsonString))
|
||||
}
|
||||
|
||||
// Delete Endpoint by sending EndpointRequest to HNS
|
||||
func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
|
||||
operation := "Delete"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
return HNSEndpointRequest("DELETE", endpoint.Id, "")
|
||||
}
|
||||
|
||||
// Update Endpoint
|
||||
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
|
||||
operation := "Update"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
jsonString, err := json.Marshal(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
|
||||
|
||||
return endpoint, err
|
||||
}
|
||||
|
||||
// ContainerHotAttach attaches an endpoint to a running container
|
||||
func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
|
||||
operation := "ContainerHotAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
|
||||
|
||||
return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
|
||||
}
|
||||
|
||||
// ContainerHotDetach detaches an endpoint from a running container
|
||||
func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
|
||||
operation := "ContainerHotDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
|
||||
|
||||
return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
|
||||
}
|
||||
|
||||
// ApplyACLPolicy applies Acl Policy on the Endpoint
|
||||
func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error {
|
||||
operation := "ApplyACLPolicy"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
jsonString, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint.Policies[0] = jsonString
|
||||
_, err = endpoint.Update()
|
||||
return err
|
||||
}
|
||||
|
||||
// ContainerAttach attaches an endpoint to container
|
||||
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
|
||||
operation := "ContainerAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
ContainerID: containerID,
|
||||
CompartmentID: compartmentID,
|
||||
SystemType: ContainerType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// ContainerDetach detaches an endpoint from container
|
||||
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
|
||||
operation := "ContainerDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
ContainerID: containerID,
|
||||
SystemType: ContainerType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// HostAttach attaches a nic on the host
|
||||
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
|
||||
operation := "HostAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
CompartmentID: compartmentID,
|
||||
SystemType: HostType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
|
||||
}
|
||||
|
||||
// HostDetach detaches a nic on the host
|
||||
func (endpoint *HNSEndpoint) HostDetach() error {
|
||||
operation := "HostDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
SystemType: HostType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
|
||||
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
|
||||
operation := "VirtualMachineNicAttach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
VirtualNICName: virtualMachineNICName,
|
||||
SystemType: VirtualMachineType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
|
||||
}
|
||||
|
||||
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
|
||||
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
|
||||
operation := "VirtualMachineNicDetach"
|
||||
title := "HCSShim::HNSEndpoint::" + operation
|
||||
logrus.Debugf(title+" id=%s", endpoint.Id)
|
||||
|
||||
requestMessage := &EndpointAttachDetachRequest{
|
||||
SystemType: VirtualMachineType,
|
||||
}
|
||||
response := &EndpointResquestResponse{}
|
||||
|
||||
jsonString, err := json.Marshal(requestMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func hnsCall(method, path, request string, returnResponse interface{}) error {
|
||||
var responseBuffer *uint16
|
||||
logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
|
||||
|
||||
err := _hnsCall(method, path, request, &responseBuffer)
|
||||
if err != nil {
|
||||
return makeError(err, "hnsCall ", "")
|
||||
}
|
||||
response := convertAndFreeCoTaskMemString(responseBuffer)
|
||||
|
||||
hnsresponse := &hnsResponse{}
|
||||
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hnsresponse.Success {
|
||||
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
|
||||
}
|
||||
|
||||
if len(hnsresponse.Output) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Network Response : %s", hnsresponse.Output)
|
||||
err = json.Unmarshal(hnsresponse.Output, returnResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Subnet is assoicated with a network and represents a list
|
||||
// of subnets available to the network
|
||||
type Subnet struct {
|
||||
AddressPrefix string `json:",omitempty"`
|
||||
GatewayAddress string `json:",omitempty"`
|
||||
Policies []json.RawMessage `json:",omitempty"`
|
||||
}
|
||||
|
||||
// MacPool is assoicated with a network and represents a list
|
||||
// of macaddresses available to the network
|
||||
type MacPool struct {
|
||||
StartMacAddress string `json:",omitempty"`
|
||||
EndMacAddress string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HNSNetwork represents a network in HNS
|
||||
type HNSNetwork struct {
|
||||
Id string `json:"ID,omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Type string `json:",omitempty"`
|
||||
NetworkAdapterName string `json:",omitempty"`
|
||||
SourceMac string `json:",omitempty"`
|
||||
Policies []json.RawMessage `json:",omitempty"`
|
||||
MacPools []MacPool `json:",omitempty"`
|
||||
Subnets []Subnet `json:",omitempty"`
|
||||
DNSSuffix string `json:",omitempty"`
|
||||
DNSServerList string `json:",omitempty"`
|
||||
DNSServerCompartment uint32 `json:",omitempty"`
|
||||
ManagementIP string `json:",omitempty"`
|
||||
AutomaticDNS bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type hnsNetworkResponse struct {
|
||||
Success bool
|
||||
Error string
|
||||
Output HNSNetwork
|
||||
}
|
||||
|
||||
type hnsResponse struct {
|
||||
Success bool
|
||||
Error string
|
||||
Output json.RawMessage
|
||||
}
|
||||
|
||||
// HNSNetworkRequest makes a call into HNS to update/query a single network
|
||||
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
|
||||
var network HNSNetwork
|
||||
err := hnsCall(method, "/networks/"+path, request, &network)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &network, nil
|
||||
}
|
||||
|
||||
// HNSListNetworkRequest makes a HNS call to query the list of available networks
|
||||
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
|
||||
var network []HNSNetwork
|
||||
err := hnsCall(method, "/networks/"+path, request, &network)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return network, nil
|
||||
}
|
||||
|
||||
// GetHNSNetworkByID
|
||||
func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) {
|
||||
return HNSNetworkRequest("GET", networkID, "")
|
||||
}
|
||||
|
||||
// GetHNSNetworkName filtered by Name
|
||||
func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) {
|
||||
hsnnetworks, err := HNSListNetworkRequest("GET", "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, hnsnetwork := range hsnnetworks {
|
||||
if hnsnetwork.Name == networkName {
|
||||
return &hnsnetwork, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Network %v not found", networkName)
|
||||
}
|
||||
|
||||
// Create Network by sending NetworkRequest to HNS.
|
||||
func (network *HNSNetwork) Create() (*HNSNetwork, error) {
|
||||
operation := "Create"
|
||||
title := "HCSShim::HNSNetwork::" + operation
|
||||
logrus.Debugf(title+" id=%s", network.Id)
|
||||
|
||||
jsonString, err := json.Marshal(network)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return HNSNetworkRequest("POST", "", string(jsonString))
|
||||
}
|
||||
|
||||
// Delete Network by sending NetworkRequest to HNS
|
||||
func (network *HNSNetwork) Delete() (*HNSNetwork, error) {
|
||||
operation := "Delete"
|
||||
title := "HCSShim::HNSNetwork::" + operation
|
||||
logrus.Debugf(title+" id=%s", network.Id)
|
||||
|
||||
return HNSNetworkRequest("DELETE", network.Id, "")
|
||||
}
|
||||
|
||||
// Creates an endpoint on the Network.
|
||||
func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint {
|
||||
return &HNSEndpoint{
|
||||
VirtualNetwork: network.Id,
|
||||
IPAddress: ipAddress,
|
||||
MacAddress: string(macAddress),
|
||||
}
|
||||
}
|
||||
|
||||
func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
|
||||
operation := "CreateEndpoint"
|
||||
title := "HCSShim::HNSNetwork::" + operation
|
||||
logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id)
|
||||
|
||||
endpoint.VirtualNetwork = network.Id
|
||||
return endpoint.Create()
|
||||
}
|
||||
|
||||
func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
|
||||
operation := "CreateRemoteEndpoint"
|
||||
title := "HCSShim::HNSNetwork::" + operation
|
||||
logrus.Debugf(title+" id=%s", network.Id)
|
||||
endpoint.IsRemoteEndpoint = true
|
||||
return network.CreateEndpoint(endpoint)
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package hcsshim
|
||||
|
||||
// Type of Request Support in ModifySystem
|
||||
type PolicyType string
|
||||
|
||||
// RequestType const
|
||||
const (
|
||||
Nat PolicyType = "NAT"
|
||||
ACL PolicyType = "ACL"
|
||||
PA PolicyType = "PA"
|
||||
VLAN PolicyType = "VLAN"
|
||||
VSID PolicyType = "VSID"
|
||||
VNet PolicyType = "VNET"
|
||||
L2Driver PolicyType = "L2Driver"
|
||||
Isolation PolicyType = "Isolation"
|
||||
QOS PolicyType = "QOS"
|
||||
OutboundNat PolicyType = "OutBoundNAT"
|
||||
ExternalLoadBalancer PolicyType = "ELB"
|
||||
Route PolicyType = "ROUTE"
|
||||
)
|
||||
|
||||
type NatPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string
|
||||
InternalPort uint16
|
||||
ExternalPort uint16
|
||||
}
|
||||
|
||||
type QosPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
MaximumOutgoingBandwidthInBytes uint64
|
||||
}
|
||||
|
||||
type IsolationPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
VLAN uint
|
||||
VSID uint
|
||||
InDefaultIsolation bool
|
||||
}
|
||||
|
||||
type VlanPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
VLAN uint
|
||||
}
|
||||
|
||||
type VsidPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
VSID uint
|
||||
}
|
||||
|
||||
type PaPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
PA string `json:"PA"`
|
||||
}
|
||||
|
||||
type OutboundNatPolicy struct {
|
||||
Policy
|
||||
VIP string `json:"VIP,omitempty"`
|
||||
Exceptions []string `json:"ExceptionList,omitempty"`
|
||||
}
|
||||
|
||||
type ActionType string
|
||||
type DirectionType string
|
||||
type RuleType string
|
||||
|
||||
const (
|
||||
Allow ActionType = "Allow"
|
||||
Block ActionType = "Block"
|
||||
|
||||
In DirectionType = "In"
|
||||
Out DirectionType = "Out"
|
||||
|
||||
Host RuleType = "Host"
|
||||
Switch RuleType = "Switch"
|
||||
)
|
||||
|
||||
type ACLPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol uint16
|
||||
InternalPort uint16
|
||||
Action ActionType
|
||||
Direction DirectionType
|
||||
LocalAddress string
|
||||
RemoteAddress string
|
||||
LocalPort uint16
|
||||
RemotePort uint16
|
||||
RuleType RuleType `json:"RuleType,omitempty"`
|
||||
|
||||
Priority uint16
|
||||
ServiceName string
|
||||
}
|
||||
|
||||
type Policy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RoutePolicy is a structure defining schema for Route based Policy
|
||||
type RoutePolicy struct {
|
||||
Policy
|
||||
DestinationPrefix string `json:"DestinationPrefix,omitempty"`
|
||||
NextHop string `json:"NextHop,omitempty"`
|
||||
EncapEnabled bool `json:"NeedEncap,omitempty"`
|
||||
}
|
||||
|
||||
// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
|
||||
type ELBPolicy struct {
|
||||
LBPolicy
|
||||
SourceVIP string `json:"SourceVIP,omitempty"`
|
||||
VIPs []string `json:"VIPs,omitempty"`
|
||||
ILB bool `json:"ILB,omitempty"`
|
||||
}
|
||||
|
||||
// LBPolicy is a structure defining schema for LoadBalancing based Policy
|
||||
type LBPolicy struct {
|
||||
Policy
|
||||
Protocol uint16 `json:"Protocol,omitempty"`
|
||||
InternalPort uint16
|
||||
ExternalPort uint16
|
||||
}
|
||||
|
||||
// PolicyList is a structure defining schema for Policy list request
|
||||
type PolicyList struct {
|
||||
ID string `json:"ID,omitempty"`
|
||||
EndpointReferences []string `json:"References,omitempty"`
|
||||
Policies []json.RawMessage `json:"Policies,omitempty"`
|
||||
}
|
||||
|
||||
// HNSPolicyListRequest makes a call into HNS to update/query a single network
|
||||
func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
|
||||
var policy PolicyList
|
||||
err := hnsCall(method, "/policylists/"+path, request, &policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &policy, nil
|
||||
}
|
||||
|
||||
// HNSListPolicyListRequest gets all the policy list
|
||||
func HNSListPolicyListRequest() ([]PolicyList, error) {
|
||||
var plist []PolicyList
|
||||
err := hnsCall("GET", "/policylists/", "", &plist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plist, nil
|
||||
}
|
||||
|
||||
// PolicyListRequest makes a HNS call to modify/query a network policy list
|
||||
func PolicyListRequest(method, path, request string) (*PolicyList, error) {
|
||||
policylist := &PolicyList{}
|
||||
err := hnsCall(method, "/policylists/"+path, request, &policylist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return policylist, nil
|
||||
}
|
||||
|
||||
// GetPolicyListByID get the policy list by ID
|
||||
func GetPolicyListByID(policyListID string) (*PolicyList, error) {
|
||||
return PolicyListRequest("GET", policyListID, "")
|
||||
}
|
||||
|
||||
// Create PolicyList by sending PolicyListRequest to HNS.
|
||||
func (policylist *PolicyList) Create() (*PolicyList, error) {
|
||||
operation := "Create"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" id=%s", policylist.ID)
|
||||
jsonString, err := json.Marshal(policylist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return PolicyListRequest("POST", "", string(jsonString))
|
||||
}
|
||||
|
||||
// Delete deletes PolicyList
|
||||
func (policylist *PolicyList) Delete() (*PolicyList, error) {
|
||||
operation := "Delete"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" id=%s", policylist.ID)
|
||||
|
||||
return PolicyListRequest("DELETE", policylist.ID, "")
|
||||
}
|
||||
|
||||
// AddEndpoint add an endpoint to a Policy List
|
||||
func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
|
||||
operation := "AddEndpoint"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
|
||||
|
||||
_, err := policylist.Delete()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add Endpoint to the Existing List
|
||||
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
|
||||
|
||||
return policylist.Create()
|
||||
}
|
||||
|
||||
// RemoveEndpoint removes an endpoint from the Policy List
|
||||
func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
|
||||
operation := "RemoveEndpoint"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
|
||||
|
||||
_, err := policylist.Delete()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
elementToRemove := "/endpoints/" + endpoint.Id
|
||||
|
||||
var references []string
|
||||
|
||||
for _, endpointReference := range policylist.EndpointReferences {
|
||||
if endpointReference == elementToRemove {
|
||||
continue
|
||||
}
|
||||
references = append(references, endpointReference)
|
||||
}
|
||||
policylist.EndpointReferences = references
|
||||
return policylist.Create()
|
||||
}
|
||||
|
||||
// AddLoadBalancer policy list for the specified endpoints
|
||||
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
|
||||
operation := "AddLoadBalancer"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
|
||||
|
||||
policylist := &PolicyList{}
|
||||
|
||||
elbPolicy := &ELBPolicy{
|
||||
SourceVIP: sourceVIP,
|
||||
ILB: isILB,
|
||||
}
|
||||
|
||||
if len(vip) > 0 {
|
||||
elbPolicy.VIPs = []string{vip}
|
||||
}
|
||||
elbPolicy.Type = ExternalLoadBalancer
|
||||
elbPolicy.Protocol = protocol
|
||||
elbPolicy.InternalPort = internalPort
|
||||
elbPolicy.ExternalPort = externalPort
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
|
||||
}
|
||||
|
||||
jsonString, err := json.Marshal(elbPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
policylist.Policies = append(policylist.Policies, jsonString)
|
||||
return policylist.Create()
|
||||
}
|
||||
|
||||
// AddRoute adds route policy list for the specified endpoints
|
||||
func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
|
||||
operation := "AddRoute"
|
||||
title := "HCSShim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix)
|
||||
|
||||
policylist := &PolicyList{}
|
||||
|
||||
rPolicy := &RoutePolicy{
|
||||
DestinationPrefix: destinationPrefix,
|
||||
NextHop: nextHop,
|
||||
EncapEnabled: encapEnabled,
|
||||
}
|
||||
rPolicy.Type = Route
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
|
||||
}
|
||||
|
||||
jsonString, err := json.Marshal(rPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policylist.Policies = append(policylist.Policies, jsonString)
|
||||
return policylist.Create()
|
||||
}
|
|
@ -0,0 +1,212 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ImportLayer will take the contents of the folder at importFolderPath and import
|
||||
// that into a layer with the id layerId. Note that in order to correctly populate
|
||||
// the layer and interperet the transport format, all parent layers must already
|
||||
// be present on the system at the paths provided in parentLayerPaths.
|
||||
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::ImportLayer "
|
||||
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = importLayer(&infop, layerID, importFolderPath, layers)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LayerWriter is an interface that supports writing a new container image layer.
|
||||
type LayerWriter interface {
|
||||
// Add adds a file to the layer with given metadata.
|
||||
Add(name string, fileInfo *winio.FileBasicInfo) error
|
||||
// AddLink adds a hard link to the layer. The target must already have been added.
|
||||
AddLink(name string, target string) error
|
||||
// Remove removes a file that was present in a parent layer from the layer.
|
||||
Remove(name string) error
|
||||
// Write writes data to the current file. The data must be in the format of a Win32
|
||||
// backup stream.
|
||||
Write(b []byte) (int, error)
|
||||
// Close finishes the layer writing process and releases any resources.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
|
||||
type FilterLayerWriter struct {
|
||||
context uintptr
|
||||
}
|
||||
|
||||
// Add adds a file or directory to the layer. The file's parent directory must have already been added.
|
||||
//
|
||||
// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
|
||||
// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
|
||||
// winio.BackupStreamWriter can be used to facilitate this.
|
||||
func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
|
||||
if name[0] != '\\' {
|
||||
name = `\` + name
|
||||
}
|
||||
err := importLayerNext(w.context, name, fileInfo)
|
||||
if err != nil {
|
||||
return makeError(err, "ImportLayerNext", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddLink adds a hard link to the layer. The target of the link must have already been added.
|
||||
func (w *FilterLayerWriter) AddLink(name string, target string) error {
|
||||
return errors.New("hard links not yet supported")
|
||||
}
|
||||
|
||||
// Remove removes a file from the layer. The file must have been present in the parent layer.
|
||||
//
|
||||
// name contains the file's relative path.
|
||||
func (w *FilterLayerWriter) Remove(name string) error {
|
||||
if name[0] != '\\' {
|
||||
name = `\` + name
|
||||
}
|
||||
err := importLayerNext(w.context, name, nil)
|
||||
if err != nil {
|
||||
return makeError(err, "ImportLayerNext", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes more backup stream data to the current file.
|
||||
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
|
||||
err := importLayerWrite(w.context, b)
|
||||
if err != nil {
|
||||
err = makeError(err, "ImportLayerWrite", "")
|
||||
return 0, err
|
||||
}
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
// Close completes the layer write operation. The error must be checked to ensure that the
|
||||
// operation was successful.
|
||||
func (w *FilterLayerWriter) Close() (err error) {
|
||||
if w.context != 0 {
|
||||
err = importLayerEnd(w.context)
|
||||
if err != nil {
|
||||
err = makeError(err, "ImportLayerEnd", "")
|
||||
}
|
||||
w.context = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type legacyLayerWriterWrapper struct {
|
||||
*legacyLayerWriter
|
||||
info DriverInfo
|
||||
layerID string
|
||||
path string
|
||||
parentLayerPaths []string
|
||||
}
|
||||
|
||||
func (r *legacyLayerWriterWrapper) Close() error {
|
||||
defer os.RemoveAll(r.root)
|
||||
err := r.legacyLayerWriter.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the original path here because ImportLayer does not support long paths for the source in TP5.
|
||||
// But do use a long path for the destination to work around another bug with directories
|
||||
// with MAX_PATH - 12 < length < MAX_PATH.
|
||||
info := r.info
|
||||
fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info.HomeDir = ""
|
||||
if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil {
|
||||
return err
|
||||
}
|
||||
// Add any hard links that were collected.
|
||||
for _, lnk := range r.PendingLinks {
|
||||
if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err = os.Link(lnk.Target, lnk.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Prepare the utility VM for use if one is present in the layer.
|
||||
if r.HasUtilityVM {
|
||||
err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLayerWriter returns a new layer writer for creating a layer on disk.
|
||||
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
|
||||
// to call this and any methods on the resulting LayerWriter.
|
||||
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
|
||||
if len(parentLayerPaths) == 0 {
|
||||
// This is a base layer. It gets imported differently.
|
||||
return &baseLayerWriter{
|
||||
root: filepath.Join(info.HomeDir, layerID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if procImportLayerBegin.Find() != nil {
|
||||
// The new layer reader is not available on this Windows build. Fall back to the
|
||||
// legacy export code path.
|
||||
path, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &legacyLayerWriterWrapper{
|
||||
legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)),
|
||||
info: info,
|
||||
layerID: layerID,
|
||||
path: path,
|
||||
parentLayerPaths: parentLayerPaths,
|
||||
}, nil
|
||||
}
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &FilterLayerWriter{}
|
||||
err = importLayerBegin(&infop, layerID, layers, &w.context)
|
||||
if err != nil {
|
||||
return nil, makeError(err, "ImportLayerStart", "")
|
||||
}
|
||||
return w, nil
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ProcessConfig is used as both the input of Container.CreateProcess
|
||||
// and to convert the parameters to JSON for passing onto the HCS
|
||||
type ProcessConfig struct {
|
||||
ApplicationName string `json:",omitempty"`
|
||||
CommandLine string `json:",omitempty"`
|
||||
CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows
|
||||
User string `json:",omitempty"`
|
||||
WorkingDirectory string `json:",omitempty"`
|
||||
Environment map[string]string `json:",omitempty"`
|
||||
EmulateConsole bool `json:",omitempty"`
|
||||
CreateStdInPipe bool `json:",omitempty"`
|
||||
CreateStdOutPipe bool `json:",omitempty"`
|
||||
CreateStdErrPipe bool `json:",omitempty"`
|
||||
ConsoleSize [2]uint `json:",omitempty"`
|
||||
CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows
|
||||
OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
ID string
|
||||
Path string
|
||||
}
|
||||
|
||||
type MappedDir struct {
|
||||
HostPath string
|
||||
ContainerPath string
|
||||
ReadOnly bool
|
||||
BandwidthMaximum uint64
|
||||
IOPSMaximum uint64
|
||||
}
|
||||
|
||||
type MappedPipe struct {
|
||||
HostPath string
|
||||
ContainerPipeName string
|
||||
}
|
||||
|
||||
type HvRuntime struct {
|
||||
ImagePath string `json:",omitempty"`
|
||||
SkipTemplate bool `json:",omitempty"`
|
||||
LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
|
||||
LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
|
||||
LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
|
||||
BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
|
||||
WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
|
||||
}
|
||||
|
||||
type MappedVirtualDisk struct {
|
||||
HostPath string `json:",omitempty"` // Path to VHD on the host
|
||||
ContainerPath string // Platform-specific mount point path in the container
|
||||
CreateInUtilityVM bool `json:",omitempty"`
|
||||
ReadOnly bool `json:",omitempty"`
|
||||
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
|
||||
AttachOnly bool `json:",omitempty:`
|
||||
}
|
||||
|
||||
// ContainerConfig is used as both the input of CreateContainer
|
||||
// and to convert the parameters to JSON for passing onto the HCS
|
||||
type ContainerConfig struct {
|
||||
SystemType string // HCS requires this to be hard-coded to "Container"
|
||||
Name string // Name of the container. We use the docker ID.
|
||||
Owner string `json:",omitempty"` // The management platform that created this container
|
||||
VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID}
|
||||
IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows
|
||||
LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID
|
||||
Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID
|
||||
Credentials string `json:",omitempty"` // Credentials information
|
||||
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
|
||||
ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares.
|
||||
ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit.
|
||||
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
|
||||
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
|
||||
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
|
||||
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
|
||||
HostName string `json:",omitempty"` // Hostname
|
||||
MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts)
|
||||
MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes
|
||||
HvPartition bool // True if it a Hyper-V Container
|
||||
NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with.
|
||||
EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container
|
||||
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
|
||||
Servicing bool `json:",omitempty"` // True if this container is for servicing
|
||||
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
|
||||
DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
|
||||
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
|
||||
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
|
||||
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
|
||||
}
|
||||
|
||||
type ComputeSystemQuery struct {
|
||||
IDs []string `json:"Ids,omitempty"`
|
||||
Types []string `json:",omitempty"`
|
||||
Names []string `json:",omitempty"`
|
||||
Owners []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Container represents a created (but not necessarily running) container.
|
||||
type Container interface {
|
||||
// Start synchronously starts the container.
|
||||
Start() error
|
||||
|
||||
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
|
||||
Shutdown() error
|
||||
|
||||
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
|
||||
Terminate() error
|
||||
|
||||
// Waits synchronously waits for the container to shutdown or terminate.
|
||||
Wait() error
|
||||
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
|
||||
// returns false if timeout occurs.
|
||||
WaitTimeout(time.Duration) error
|
||||
|
||||
// Pause pauses the execution of a container.
|
||||
Pause() error
|
||||
|
||||
// Resume resumes the execution of a container.
|
||||
Resume() error
|
||||
|
||||
// HasPendingUpdates returns true if the container has updates pending to install.
|
||||
HasPendingUpdates() (bool, error)
|
||||
|
||||
// Statistics returns statistics for a container.
|
||||
Statistics() (Statistics, error)
|
||||
|
||||
// ProcessList returns details for the processes in a container.
|
||||
ProcessList() ([]ProcessListItem, error)
|
||||
|
||||
// MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller
|
||||
MappedVirtualDisks() (map[int]MappedVirtualDiskController, error)
|
||||
|
||||
// CreateProcess launches a new process within the container.
|
||||
CreateProcess(c *ProcessConfig) (Process, error)
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the container.
|
||||
OpenProcess(pid int) (Process, error)
|
||||
|
||||
// Close cleans up any state associated with the container but does not terminate or wait for it.
|
||||
Close() error
|
||||
|
||||
// Modify the System
|
||||
Modify(config *ResourceModificationRequestResponse) error
|
||||
}
|
||||
|
||||
// Process represents a running or exited process.
|
||||
type Process interface {
|
||||
// Pid returns the process ID of the process within the container.
|
||||
Pid() int
|
||||
|
||||
// Kill signals the process to terminate but does not wait for it to finish terminating.
|
||||
Kill() error
|
||||
|
||||
// Wait waits for the process to exit.
|
||||
Wait() error
|
||||
|
||||
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
|
||||
// false if timeout occurs.
|
||||
WaitTimeout(time.Duration) error
|
||||
|
||||
// ExitCode returns the exit code of the process. The process must have
|
||||
// already terminated.
|
||||
ExitCode() (int, error)
|
||||
|
||||
// ResizeConsole resizes the console of the process.
|
||||
ResizeConsole(width, height uint16) error
|
||||
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes; it should be possible to
|
||||
// call this multiple times to get multiple interfaces.
|
||||
Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
|
||||
|
||||
// CloseStdin closes the write side of the stdin pipe so that the process is
|
||||
// notified on the read side that there is no more data in stdin.
|
||||
CloseStdin() error
|
||||
|
||||
// Close cleans up any state associated with the process but does not kill
|
||||
// or wait on it.
|
||||
Close() error
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// LayerExists will return true if a layer with the given id exists and is known
|
||||
// to the system.
|
||||
func LayerExists(info DriverInfo, id string) (bool, error) {
|
||||
title := "hcsshim::LayerExists "
|
||||
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Call the procedure itself.
|
||||
var exists uint32
|
||||
|
||||
err = layerExists(&infop, id, &exists)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
|
||||
return exists != 0, nil
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
package hcsshim
|
||||
|
||||
// This file contains utility functions to support storage (graph) related
|
||||
// functionality.
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
/* To pass into syscall, we need a struct matching the following:
|
||||
enum GraphDriverType
|
||||
{
|
||||
DiffDriver,
|
||||
FilterDriver
|
||||
};
|
||||
|
||||
struct DriverInfo {
|
||||
GraphDriverType Flavour;
|
||||
LPCWSTR HomeDir;
|
||||
};
|
||||
*/
|
||||
type DriverInfo struct {
|
||||
Flavour int
|
||||
HomeDir string
|
||||
}
|
||||
|
||||
type driverInfo struct {
|
||||
Flavour int
|
||||
HomeDirp *uint16
|
||||
}
|
||||
|
||||
func convertDriverInfo(info DriverInfo) (driverInfo, error) {
|
||||
homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
|
||||
return driverInfo{}, err
|
||||
}
|
||||
|
||||
return driverInfo{
|
||||
Flavour: info.Flavour,
|
||||
HomeDirp: homedirp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/* To pass into syscall, we need a struct matching the following:
|
||||
typedef struct _WC_LAYER_DESCRIPTOR {
|
||||
|
||||
//
|
||||
// The ID of the layer
|
||||
//
|
||||
|
||||
GUID LayerId;
|
||||
|
||||
//
|
||||
// Additional flags
|
||||
//
|
||||
|
||||
union {
|
||||
struct {
|
||||
ULONG Reserved : 31;
|
||||
ULONG Dirty : 1; // Created from sandbox as a result of snapshot
|
||||
};
|
||||
ULONG Value;
|
||||
} Flags;
|
||||
|
||||
//
|
||||
// Path to the layer root directory, null-terminated
|
||||
//
|
||||
|
||||
PCWSTR Path;
|
||||
|
||||
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
|
||||
*/
|
||||
type WC_LAYER_DESCRIPTOR struct {
|
||||
LayerId GUID
|
||||
Flags uint32
|
||||
Pathp *uint16
|
||||
}
|
||||
|
||||
func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
|
||||
// Array of descriptors that gets constructed.
|
||||
var layers []WC_LAYER_DESCRIPTOR
|
||||
|
||||
for i := 0; i < len(parentLayerPaths); i++ {
|
||||
// Create a layer descriptor, using the folder name
|
||||
// as the source for a GUID LayerId
|
||||
_, folderName := filepath.Split(parentLayerPaths[i])
|
||||
g, err := NameToGuid(folderName)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to convert name to guid %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = append(layers, WC_LAYER_DESCRIPTOR{
|
||||
LayerId: g,
|
||||
Flags: 0,
|
||||
Pathp: p,
|
||||
})
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
|
@ -0,0 +1,741 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
var errorIterationCanceled = errors.New("")
|
||||
|
||||
var mutatedUtilityVMFiles = map[string]bool{
|
||||
`EFI\Microsoft\Boot\BCD`: true,
|
||||
`EFI\Microsoft\Boot\BCD.LOG`: true,
|
||||
`EFI\Microsoft\Boot\BCD.LOG1`: true,
|
||||
`EFI\Microsoft\Boot\BCD.LOG2`: true,
|
||||
}
|
||||
|
||||
const (
|
||||
filesPath = `Files`
|
||||
hivesPath = `Hives`
|
||||
utilityVMPath = `UtilityVM`
|
||||
utilityVMFilesPath = `UtilityVM\Files`
|
||||
)
|
||||
|
||||
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
|
||||
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
|
||||
}
|
||||
|
||||
func makeLongAbsPath(path string) (string, error) {
|
||||
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
|
||||
return path, nil
|
||||
}
|
||||
if !filepath.IsAbs(path) {
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = absPath
|
||||
}
|
||||
if strings.HasPrefix(path, `\\`) {
|
||||
return `\\?\UNC\` + path[2:], nil
|
||||
}
|
||||
return `\\?\` + path, nil
|
||||
}
|
||||
|
||||
func hasPathPrefix(p, prefix string) bool {
|
||||
return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\'
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
path string
|
||||
fi os.FileInfo
|
||||
err error
|
||||
}
|
||||
|
||||
type legacyLayerReader struct {
|
||||
root string
|
||||
result chan *fileEntry
|
||||
proceed chan bool
|
||||
currentFile *os.File
|
||||
backupReader *winio.BackupFileReader
|
||||
}
|
||||
|
||||
// newLegacyLayerReader returns a new LayerReader that can read the Windows
|
||||
// container layer transport format from disk.
|
||||
func newLegacyLayerReader(root string) *legacyLayerReader {
|
||||
r := &legacyLayerReader{
|
||||
root: root,
|
||||
result: make(chan *fileEntry),
|
||||
proceed: make(chan bool),
|
||||
}
|
||||
go r.walk()
|
||||
return r
|
||||
}
|
||||
|
||||
func readTombstones(path string) (map[string]([]string), error) {
|
||||
tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tf.Close()
|
||||
s := bufio.NewScanner(tf)
|
||||
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
|
||||
return nil, errors.New("Invalid tombstones file")
|
||||
}
|
||||
|
||||
ts := make(map[string]([]string))
|
||||
for s.Scan() {
|
||||
t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\`
|
||||
dir := filepath.Dir(t)
|
||||
ts[dir] = append(ts[dir], t)
|
||||
}
|
||||
if err = s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) walkUntilCancelled() error {
|
||||
root, err := makeLongAbsPath(r.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.root = root
|
||||
ts, err := readTombstones(r.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.result <- &fileEntry{path, info, nil}
|
||||
if !<-r.proceed {
|
||||
return errorIterationCanceled
|
||||
}
|
||||
|
||||
// List all the tombstones.
|
||||
if info.IsDir() {
|
||||
relPath, err := filepath.Rel(r.root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dts, ok := ts[relPath]; ok {
|
||||
for _, t := range dts {
|
||||
r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
|
||||
if !<-r.proceed {
|
||||
return errorIterationCanceled
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == errorIterationCanceled {
|
||||
return nil
|
||||
}
|
||||
if err == nil {
|
||||
return io.EOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) walk() {
|
||||
defer close(r.result)
|
||||
if !<-r.proceed {
|
||||
return
|
||||
}
|
||||
|
||||
err := r.walkUntilCancelled()
|
||||
if err != nil {
|
||||
for {
|
||||
r.result <- &fileEntry{err: err}
|
||||
if !<-r.proceed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) reset() {
|
||||
if r.backupReader != nil {
|
||||
r.backupReader.Close()
|
||||
r.backupReader = nil
|
||||
}
|
||||
if r.currentFile != nil {
|
||||
r.currentFile.Close()
|
||||
r.currentFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func findBackupStreamSize(r io.Reader) (int64, error) {
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if hdr.Id == winio.BackupData {
|
||||
return hdr.Size, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
r.reset()
|
||||
r.proceed <- true
|
||||
fe := <-r.result
|
||||
if fe == nil {
|
||||
err = errors.New("LegacyLayerReader closed")
|
||||
return
|
||||
}
|
||||
if fe.err != nil {
|
||||
err = fe.err
|
||||
return
|
||||
}
|
||||
|
||||
path, err = filepath.Rel(r.root, fe.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if fe.fi == nil {
|
||||
// This is a tombstone. Return a nil fileInfo.
|
||||
return
|
||||
}
|
||||
|
||||
if fe.fi.IsDir() && hasPathPrefix(path, filesPath) {
|
||||
fe.path += ".$wcidirs$"
|
||||
}
|
||||
|
||||
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fileInfo, err = winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !hasPathPrefix(path, filesPath) {
|
||||
size = fe.fi.Size()
|
||||
r.backupReader = winio.NewBackupFileReader(f, false)
|
||||
if path == hivesPath || path == filesPath {
|
||||
// The Hives directory has a non-deterministic file time because of the
|
||||
// nature of the import process. Use the times from System_Delta.
|
||||
var g *os.File
|
||||
g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
attr := fileInfo.FileAttributes
|
||||
fileInfo, err = winio.GetFileBasicInfo(g)
|
||||
g.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fileInfo.FileAttributes = attr
|
||||
}
|
||||
|
||||
// The creation time and access time get reset for files outside of the Files path.
|
||||
fileInfo.CreationTime = fileInfo.LastWriteTime
|
||||
fileInfo.LastAccessTime = fileInfo.LastWriteTime
|
||||
|
||||
} else {
|
||||
// The file attributes are written before the backup stream.
|
||||
var attr uint32
|
||||
err = binary.Read(f, binary.LittleEndian, &attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fileInfo.FileAttributes = uintptr(attr)
|
||||
beginning := int64(4)
|
||||
|
||||
// Find the accurate file size.
|
||||
if !fe.fi.IsDir() {
|
||||
size, err = findBackupStreamSize(f)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return back to the beginning of the backup stream.
|
||||
_, err = f.Seek(beginning, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.currentFile = f
|
||||
f = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) Read(b []byte) (int, error) {
|
||||
if r.backupReader == nil {
|
||||
if r.currentFile == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.currentFile.Read(b)
|
||||
}
|
||||
return r.backupReader.Read(b)
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if r.backupReader == nil {
|
||||
if r.currentFile == nil {
|
||||
return 0, errors.New("no current file")
|
||||
}
|
||||
return r.currentFile.Seek(offset, whence)
|
||||
}
|
||||
return 0, errors.New("seek not supported on this stream")
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) Close() error {
|
||||
r.proceed <- false
|
||||
<-r.result
|
||||
r.reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
type pendingLink struct {
|
||||
Path, Target string
|
||||
}
|
||||
|
||||
type legacyLayerWriter struct {
|
||||
root string
|
||||
parentRoots []string
|
||||
destRoot string
|
||||
currentFile *os.File
|
||||
backupWriter *winio.BackupFileWriter
|
||||
tombstones []string
|
||||
pathFixed bool
|
||||
HasUtilityVM bool
|
||||
uvmDi []dirInfo
|
||||
addedFiles map[string]bool
|
||||
PendingLinks []pendingLink
|
||||
}
|
||||
|
||||
// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
|
||||
// transport format to disk.
|
||||
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter {
|
||||
return &legacyLayerWriter{
|
||||
root: root,
|
||||
parentRoots: parentRoots,
|
||||
destRoot: destRoot,
|
||||
addedFiles: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) init() error {
|
||||
if !w.pathFixed {
|
||||
path, err := makeLongAbsPath(w.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, p := range w.parentRoots {
|
||||
w.parentRoots[i], err = makeLongAbsPath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
destPath, err := makeLongAbsPath(w.destRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.root = path
|
||||
w.destRoot = destPath
|
||||
w.pathFixed = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) initUtilityVM() error {
|
||||
if !w.HasUtilityVM {
|
||||
err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Server 2016 does not support multiple layers for the utility VM, so
|
||||
// clone the utility VM from the parent layer into this layer. Use hard
|
||||
// links to avoid unnecessary copying, since most of the files are
|
||||
// immutable.
|
||||
err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
|
||||
}
|
||||
w.HasUtilityVM = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) reset() {
|
||||
if w.backupWriter != nil {
|
||||
w.backupWriter.Close()
|
||||
w.backupWriter = nil
|
||||
}
|
||||
if w.currentFile != nil {
|
||||
w.currentFile.Close()
|
||||
w.currentFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
|
||||
func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
|
||||
createDisposition := uint32(syscall.CREATE_NEW)
|
||||
if isDir {
|
||||
err = os.Mkdir(destPath, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createDisposition = syscall.OPEN_EXISTING
|
||||
}
|
||||
|
||||
src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer src.Close()
|
||||
srcr := winio.NewBackupFileReader(src, true)
|
||||
defer srcr.Close()
|
||||
|
||||
fileInfo, err = winio.GetFileBasicInfo(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
err = winio.SetFileBasicInfo(dest, fileInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destw := winio.NewBackupFileWriter(dest, true)
|
||||
defer func() {
|
||||
cerr := destw.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(destw, srcr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
// cloneTree clones a directory tree using hard links. It skips hard links for
|
||||
// the file names in the provided map and just copies those files.
|
||||
func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error {
|
||||
var di []dirInfo
|
||||
err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(srcPath, srcFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destFilePath := filepath.Join(destPath, relPath)
|
||||
|
||||
// Directories, reparse points, and files that will be mutated during
|
||||
// utility VM import must be copied. All other files can be hard linked.
|
||||
isReparsePoint := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0
|
||||
if info.IsDir() || isReparsePoint || mutatedFiles[relPath] {
|
||||
fi, err := copyFileWithMetadata(srcFilePath, destFilePath, info.IsDir())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() && !isReparsePoint {
|
||||
di = append(di, dirInfo{path: destFilePath, fileInfo: *fi})
|
||||
}
|
||||
} else {
|
||||
err = os.Link(srcFilePath, destFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Don't recurse on reparse points.
|
||||
if info.IsDir() && isReparsePoint {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return reapplyDirectoryTimes(di)
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
|
||||
w.reset()
|
||||
err := w.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if name == utilityVMPath {
|
||||
return w.initUtilityVM()
|
||||
}
|
||||
|
||||
if hasPathPrefix(name, utilityVMPath) {
|
||||
if !w.HasUtilityVM {
|
||||
return errors.New("missing UtilityVM directory")
|
||||
}
|
||||
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
|
||||
return errors.New("invalid UtilityVM layer")
|
||||
}
|
||||
path := filepath.Join(w.destRoot, name)
|
||||
createDisposition := uint32(syscall.OPEN_EXISTING)
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
st, err := os.Lstat(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if st != nil {
|
||||
// Delete the existing file/directory if it is not the same type as this directory.
|
||||
existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
|
||||
if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
|
||||
if err = os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
st = nil
|
||||
}
|
||||
}
|
||||
if st == nil {
|
||||
if err = os.Mkdir(path, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
||||
w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo})
|
||||
}
|
||||
} else {
|
||||
// Overwrite any existing hard link.
|
||||
err = os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
createDisposition = syscall.CREATE_NEW
|
||||
}
|
||||
|
||||
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
os.Remove(path)
|
||||
}
|
||||
}()
|
||||
|
||||
err = winio.SetFileBasicInfo(f, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.backupWriter = winio.NewBackupFileWriter(f, true)
|
||||
w.currentFile = f
|
||||
w.addedFiles[name] = true
|
||||
f = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
path := filepath.Join(w.root, name)
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
err := os.Mkdir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path += ".$wcidirs$"
|
||||
}
|
||||
|
||||
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
os.Remove(path)
|
||||
}
|
||||
}()
|
||||
|
||||
strippedFi := *fileInfo
|
||||
strippedFi.FileAttributes = 0
|
||||
err = winio.SetFileBasicInfo(f, &strippedFi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasPathPrefix(name, hivesPath) {
|
||||
w.backupWriter = winio.NewBackupFileWriter(f, false)
|
||||
} else {
|
||||
// The file attributes are written before the stream.
|
||||
err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
w.currentFile = f
|
||||
w.addedFiles[name] = true
|
||||
f = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) AddLink(name string, target string) error {
|
||||
w.reset()
|
||||
err := w.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var roots []string
|
||||
if hasPathPrefix(target, filesPath) {
|
||||
// Look for cross-layer hard link targets in the parent layers, since
|
||||
// nothing is in the destination path yet.
|
||||
roots = w.parentRoots
|
||||
} else if hasPathPrefix(target, utilityVMFilesPath) {
|
||||
// Since the utility VM is fully cloned into the destination path
|
||||
// already, look for cross-layer hard link targets directly in the
|
||||
// destination path.
|
||||
roots = []string{w.destRoot}
|
||||
}
|
||||
|
||||
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
|
||||
return errors.New("invalid hard link in layer")
|
||||
}
|
||||
|
||||
// Find to try the target of the link in a previously added file. If that
|
||||
// fails, search in parent layers.
|
||||
var selectedRoot string
|
||||
if _, ok := w.addedFiles[target]; ok {
|
||||
selectedRoot = w.destRoot
|
||||
} else {
|
||||
for _, r := range roots {
|
||||
if _, err = os.Lstat(filepath.Join(r, target)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
selectedRoot = r
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectedRoot == "" {
|
||||
return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target)
|
||||
}
|
||||
}
|
||||
// The link can't be written until after the ImportLayer call.
|
||||
w.PendingLinks = append(w.PendingLinks, pendingLink{
|
||||
Path: filepath.Join(w.destRoot, name),
|
||||
Target: filepath.Join(selectedRoot, target),
|
||||
})
|
||||
w.addedFiles[name] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) Remove(name string) error {
|
||||
if hasPathPrefix(name, filesPath) {
|
||||
w.tombstones = append(w.tombstones, name[len(filesPath)+1:])
|
||||
} else if hasPathPrefix(name, utilityVMFilesPath) {
|
||||
err := w.initUtilityVM()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure the path exists; os.RemoveAll will not fail if the file is
|
||||
// already gone, and this needs to be a fatal error for diagnostics
|
||||
// purposes.
|
||||
path := filepath.Join(w.destRoot, name)
|
||||
if _, err := os.Lstat(path); err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid tombstone %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
|
||||
if w.backupWriter == nil {
|
||||
if w.currentFile == nil {
|
||||
return 0, errors.New("closed")
|
||||
}
|
||||
return w.currentFile.Write(b)
|
||||
}
|
||||
return w.backupWriter.Write(b)
|
||||
}
|
||||
|
||||
func (w *legacyLayerWriter) Close() error {
|
||||
w.reset()
|
||||
err := w.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tf, err := os.Create(filepath.Join(w.root, "tombstones.txt"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tf.Close()
|
||||
_, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range w.tombstones {
|
||||
_, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if w.HasUtilityVM {
|
||||
err = reapplyDirectoryTimes(w.uvmDi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,934 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
mksyscall_windows generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
* The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
|
||||
* If go func name needs to be different from it's winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
* Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
and is [failretval==0] by default.
|
||||
|
||||
Usage:
|
||||
mksyscall_windows [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
-output
|
||||
Specify output file name (outputs to console if blank).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
|
||||
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
|
||||
)
|
||||
|
||||
func trim(s string) string {
|
||||
return strings.Trim(s, " \t")
|
||||
}
|
||||
|
||||
var packageName string
|
||||
|
||||
func packagename() string {
|
||||
return packageName
|
||||
}
|
||||
|
||||
func syscalldot() string {
|
||||
if packageName == "syscall" {
|
||||
return ""
|
||||
}
|
||||
return "syscall."
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
fn *Fn
|
||||
tmpVarIdx int
|
||||
}
|
||||
|
||||
// tmpVar returns temp variable name that will be used to represent p during syscall.
|
||||
func (p *Param) tmpVar() string {
|
||||
if p.tmpVarIdx < 0 {
|
||||
p.tmpVarIdx = p.fn.curTmpVarIdx
|
||||
p.fn.curTmpVarIdx++
|
||||
}
|
||||
return fmt.Sprintf("_p%d", p.tmpVarIdx)
|
||||
}
|
||||
|
||||
// BoolTmpVarCode returns source code for bool temp variable.
|
||||
func (p *Param) BoolTmpVarCode() string {
|
||||
const code = `var %s uint32
|
||||
if %s {
|
||||
%s = 1
|
||||
} else {
|
||||
%s = 0
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
|
||||
}
|
||||
|
||||
// SliceTmpVarCode returns source code for slice temp variable.
|
||||
func (p *Param) SliceTmpVarCode() string {
|
||||
const code = `var %s *%s
|
||||
if len(%s) > 0 {
|
||||
%s = &%s[0]
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
|
||||
}
|
||||
|
||||
// StringTmpVarCode returns source code for string temp variable.
|
||||
func (p *Param) StringTmpVarCode() string {
|
||||
errvar := p.fn.Rets.ErrorVarName()
|
||||
if errvar == "" {
|
||||
errvar = "_"
|
||||
}
|
||||
tmp := p.tmpVar()
|
||||
const code = `var %s %s
|
||||
%s, %s = %s(%s)`
|
||||
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
|
||||
if errvar == "-" {
|
||||
return s
|
||||
}
|
||||
const morecode = `
|
||||
if %s != nil {
|
||||
return
|
||||
}`
|
||||
return s + fmt.Sprintf(morecode, errvar)
|
||||
}
|
||||
|
||||
// TmpVarCode returns source code for temp variable.
|
||||
func (p *Param) TmpVarCode() string {
|
||||
switch {
|
||||
case p.Type == "bool":
|
||||
return p.BoolTmpVarCode()
|
||||
case strings.HasPrefix(p.Type, "[]"):
|
||||
return p.SliceTmpVarCode()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TmpVarHelperCode returns source code for helper's temp variable.
|
||||
func (p *Param) TmpVarHelperCode() string {
|
||||
if p.Type != "string" {
|
||||
return ""
|
||||
}
|
||||
return p.StringTmpVarCode()
|
||||
}
|
||||
|
||||
// SyscallArgList returns source code fragments representing p parameter
|
||||
// in syscall. Slices are translated into 2 syscall parameters: pointer to
|
||||
// the first element and length.
|
||||
func (p *Param) SyscallArgList() []string {
|
||||
t := p.HelperType()
|
||||
var s string
|
||||
switch {
|
||||
case t[0] == '*':
|
||||
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
|
||||
case t == "bool":
|
||||
s = p.tmpVar()
|
||||
case strings.HasPrefix(t, "[]"):
|
||||
return []string{
|
||||
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
|
||||
fmt.Sprintf("uintptr(len(%s))", p.Name),
|
||||
}
|
||||
default:
|
||||
s = p.Name
|
||||
}
|
||||
return []string{fmt.Sprintf("uintptr(%s)", s)}
|
||||
}
|
||||
|
||||
// IsError determines if p parameter is used to return error.
|
||||
func (p *Param) IsError() bool {
|
||||
return p.Name == "err" && p.Type == "error"
|
||||
}
|
||||
|
||||
// HelperType returns type of parameter p used in helper function.
|
||||
func (p *Param) HelperType() string {
|
||||
if p.Type == "string" {
|
||||
return p.fn.StrconvType()
|
||||
}
|
||||
return p.Type
|
||||
}
|
||||
|
||||
// join concatenates parameters ps into a string with sep separator.
|
||||
// Each parameter is converted into string by applying fn to it
|
||||
// before conversion.
|
||||
func join(ps []*Param, fn func(*Param) string, sep string) string {
|
||||
if len(ps) == 0 {
|
||||
return ""
|
||||
}
|
||||
a := make([]string, 0)
|
||||
for _, p := range ps {
|
||||
a = append(a, fn(p))
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// Rets describes function return parameters.
|
||||
type Rets struct {
|
||||
Name string
|
||||
Type string
|
||||
ReturnsError bool
|
||||
FailCond string
|
||||
}
|
||||
|
||||
// ErrorVarName returns error variable name for r.
|
||||
func (r *Rets) ErrorVarName() string {
|
||||
if r.ReturnsError {
|
||||
return "err"
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return r.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ToParams converts r into slice of *Param.
|
||||
func (r *Rets) ToParams() []*Param {
|
||||
ps := make([]*Param, 0)
|
||||
if len(r.Name) > 0 {
|
||||
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
|
||||
}
|
||||
if r.ReturnsError {
|
||||
ps = append(ps, &Param{Name: "err", Type: "error"})
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// List returns source code of syscall return parameters.
|
||||
func (r *Rets) List() string {
|
||||
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
if len(s) > 0 {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList returns source code of trace printing part correspondent
|
||||
// to syscall return values.
|
||||
func (r *Rets) PrintList() string {
|
||||
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// SetReturnValuesCode returns source code that accepts syscall return values.
|
||||
func (r *Rets) SetReturnValuesCode() string {
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
retvar := "r0"
|
||||
if r.Name == "" {
|
||||
retvar = "r1"
|
||||
}
|
||||
errvar := "_"
|
||||
if r.ReturnsError {
|
||||
errvar = "e1"
|
||||
}
|
||||
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
|
||||
}
|
||||
|
||||
func (r *Rets) useLongHandleErrorCode(retvar string) string {
|
||||
const code = `if %s {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = %sEINVAL
|
||||
}
|
||||
}`
|
||||
cond := retvar + " == 0"
|
||||
if r.FailCond != "" {
|
||||
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
|
||||
}
|
||||
return fmt.Sprintf(code, cond, syscalldot())
|
||||
}
|
||||
|
||||
// SetErrorCode returns source code that sets return parameters.
|
||||
func (r *Rets) SetErrorCode() string {
|
||||
const code = `if r0 != 0 {
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
const hrCode = `if int32(r0) < 0 {
|
||||
%s = %sErrno(win32FromHresult(r0))
|
||||
}`
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
if r.Name == "" {
|
||||
return r.useLongHandleErrorCode("r1")
|
||||
}
|
||||
if r.Type == "error" {
|
||||
if r.Name == "hr" {
|
||||
return fmt.Sprintf(hrCode, r.Name, syscalldot())
|
||||
} else {
|
||||
return fmt.Sprintf(code, r.Name, syscalldot())
|
||||
}
|
||||
}
|
||||
s := ""
|
||||
switch {
|
||||
case r.Type[0] == '*':
|
||||
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
|
||||
case r.Type == "bool":
|
||||
s = fmt.Sprintf("%s = r0 != 0", r.Name)
|
||||
default:
|
||||
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
|
||||
}
|
||||
if !r.ReturnsError {
|
||||
return s
|
||||
}
|
||||
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
|
||||
}
|
||||
|
||||
// Fn describes syscall function.
|
||||
type Fn struct {
|
||||
Name string
|
||||
Params []*Param
|
||||
Rets *Rets
|
||||
PrintTrace bool
|
||||
confirmproc bool
|
||||
dllname string
|
||||
dllfuncname string
|
||||
src string
|
||||
// TODO: get rid of this field and just use parameter index instead
|
||||
curTmpVarIdx int // insure tmp variables have uniq names
|
||||
}
|
||||
|
||||
// extractParams parses s to extract function parameters.
|
||||
func extractParams(s string, f *Fn) ([]*Param, error) {
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
a := strings.Split(s, ",")
|
||||
ps := make([]*Param, len(a))
|
||||
for i := range ps {
|
||||
s2 := trim(a[i])
|
||||
b := strings.Split(s2, " ")
|
||||
if len(b) != 2 {
|
||||
b = strings.Split(s2, "\t")
|
||||
if len(b) != 2 {
|
||||
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
|
||||
}
|
||||
}
|
||||
ps[i] = &Param{
|
||||
Name: trim(b[0]),
|
||||
Type: trim(b[1]),
|
||||
fn: f,
|
||||
tmpVarIdx: -1,
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// extractSection extracts text out of string s starting after start
|
||||
// and ending just before end. found return value will indicate success,
|
||||
// and prefix, body and suffix will contain correspondent parts of string s.
|
||||
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
|
||||
s = trim(s)
|
||||
if strings.HasPrefix(s, string(start)) {
|
||||
// no prefix
|
||||
body = s[1:]
|
||||
} else {
|
||||
a := strings.SplitN(s, string(start), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", s, false
|
||||
}
|
||||
prefix = a[0]
|
||||
body = a[1]
|
||||
}
|
||||
a := strings.SplitN(body, string(end), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", "", false
|
||||
}
|
||||
return prefix, a[0], a[1], true
|
||||
}
|
||||
|
||||
// newFn parses string s and return created function Fn.
|
||||
func newFn(s string) (*Fn, error) {
|
||||
s = trim(s)
|
||||
f := &Fn{
|
||||
Rets: &Rets{},
|
||||
src: s,
|
||||
PrintTrace: *printTraceFlag,
|
||||
}
|
||||
// function name and args
|
||||
prefix, body, s, found := extractSection(s, '(', ')')
|
||||
if !found || prefix == "" {
|
||||
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
|
||||
}
|
||||
f.Name = prefix
|
||||
var err error
|
||||
f.Params, err = extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return values
|
||||
_, body, s, found = extractSection(s, '(', ')')
|
||||
if found {
|
||||
r, err := extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(r) {
|
||||
case 0:
|
||||
case 1:
|
||||
if r[0].IsError() {
|
||||
f.Rets.ReturnsError = true
|
||||
} else {
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
}
|
||||
case 2:
|
||||
if !r[1].IsError() {
|
||||
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
|
||||
}
|
||||
f.Rets.ReturnsError = true
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
default:
|
||||
return nil, errors.New("Too many return values in \"" + f.src + "\"")
|
||||
}
|
||||
}
|
||||
// fail condition
|
||||
_, body, s, found = extractSection(s, '[', ']')
|
||||
if found {
|
||||
f.Rets.FailCond = body
|
||||
}
|
||||
// dll and dll function names
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return f, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
s = trim(s[1:])
|
||||
a := strings.Split(s, ".")
|
||||
switch len(a) {
|
||||
case 1:
|
||||
f.dllfuncname = a[0]
|
||||
case 2:
|
||||
f.dllname = a[0]
|
||||
f.dllfuncname = a[1]
|
||||
default:
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
|
||||
f.confirmproc = true
|
||||
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// DLLName returns DLL name for function f.
|
||||
func (f *Fn) DLLName() string {
|
||||
if f.dllname == "" {
|
||||
return "kernel32"
|
||||
}
|
||||
return f.dllname
|
||||
}
|
||||
|
||||
// DLLName returns DLL function name for function f.
|
||||
func (f *Fn) DLLFuncName() string {
|
||||
if f.dllfuncname == "" {
|
||||
return f.Name
|
||||
}
|
||||
return f.dllfuncname
|
||||
}
|
||||
|
||||
func (f *Fn) ConfirmProc() bool {
|
||||
return f.confirmproc
|
||||
}
|
||||
|
||||
// ParamList returns source code for function f parameters.
|
||||
func (f *Fn) ParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
}
|
||||
|
||||
// HelperParamList returns source code for helper function f parameters.
|
||||
func (f *Fn) HelperParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
|
||||
}
|
||||
|
||||
// ParamPrintList returns source code of trace printing part correspondent
|
||||
// to syscall input parameters.
|
||||
func (f *Fn) ParamPrintList() string {
|
||||
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// ParamCount return number of syscall parameters for function f.
|
||||
func (f *Fn) ParamCount() int {
|
||||
n := 0
|
||||
for _, p := range f.Params {
|
||||
n += len(p.SyscallArgList())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
|
||||
// to use. It returns parameter count for correspondent SyscallX function.
|
||||
func (f *Fn) SyscallParamCount() int {
|
||||
n := f.ParamCount()
|
||||
switch {
|
||||
case n <= 3:
|
||||
return 3
|
||||
case n <= 6:
|
||||
return 6
|
||||
case n <= 9:
|
||||
return 9
|
||||
case n <= 12:
|
||||
return 12
|
||||
case n <= 15:
|
||||
return 15
|
||||
default:
|
||||
panic("too many arguments to system call")
|
||||
}
|
||||
}
|
||||
|
||||
// Syscall determines which SyscallX function to use for function f.
|
||||
func (f *Fn) Syscall() string {
|
||||
c := f.SyscallParamCount()
|
||||
if c == 3 {
|
||||
return syscalldot() + "Syscall"
|
||||
}
|
||||
return syscalldot() + "Syscall" + strconv.Itoa(c)
|
||||
}
|
||||
|
||||
// SyscallParamList returns source code for SyscallX parameters for function f.
|
||||
func (f *Fn) SyscallParamList() string {
|
||||
a := make([]string, 0)
|
||||
for _, p := range f.Params {
|
||||
a = append(a, p.SyscallArgList()...)
|
||||
}
|
||||
for len(a) < f.SyscallParamCount() {
|
||||
a = append(a, "0")
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// HelperCallParamList returns source code of call into function f helper.
|
||||
func (f *Fn) HelperCallParamList() string {
|
||||
a := make([]string, 0, len(f.Params))
|
||||
for _, p := range f.Params {
|
||||
s := p.Name
|
||||
if p.Type == "string" {
|
||||
s = p.tmpVar()
|
||||
}
|
||||
a = append(a, s)
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// IsUTF16 is true, if f is W (utf16) function. It is false
|
||||
// for all A (ascii) functions.
|
||||
func (_ *Fn) IsUTF16() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StrconvFunc returns name of Go string to OS string function for f.
|
||||
func (f *Fn) StrconvFunc() string {
|
||||
if f.IsUTF16() {
|
||||
return syscalldot() + "UTF16PtrFromString"
|
||||
}
|
||||
return syscalldot() + "BytePtrFromString"
|
||||
}
|
||||
|
||||
// StrconvType returns Go type name used for OS string for f.
|
||||
func (f *Fn) StrconvType() string {
|
||||
if f.IsUTF16() {
|
||||
return "*uint16"
|
||||
}
|
||||
return "*byte"
|
||||
}
|
||||
|
||||
// HasStringParam is true, if f has at least one string parameter.
|
||||
// Otherwise it is false.
|
||||
func (f *Fn) HasStringParam() bool {
|
||||
for _, p := range f.Params {
|
||||
if p.Type == "string" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var uniqDllFuncName = make(map[string]bool)
|
||||
|
||||
// IsNotDuplicate is true if f is not a duplicated function
|
||||
func (f *Fn) IsNotDuplicate() bool {
|
||||
funcName := f.DLLFuncName()
|
||||
if uniqDllFuncName[funcName] == false {
|
||||
uniqDllFuncName[funcName] = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HelperName returns name of function f helper.
|
||||
func (f *Fn) HelperName() string {
|
||||
if !f.HasStringParam() {
|
||||
return f.Name
|
||||
}
|
||||
return "_" + f.Name
|
||||
}
|
||||
|
||||
// Source files and functions.
|
||||
type Source struct {
|
||||
Funcs []*Fn
|
||||
Files []string
|
||||
StdLibImports []string
|
||||
ExternalImports []string
|
||||
}
|
||||
|
||||
func (src *Source) Import(pkg string) {
|
||||
src.StdLibImports = append(src.StdLibImports, pkg)
|
||||
sort.Strings(src.StdLibImports)
|
||||
}
|
||||
|
||||
func (src *Source) ExternalImport(pkg string) {
|
||||
src.ExternalImports = append(src.ExternalImports, pkg)
|
||||
sort.Strings(src.ExternalImports)
|
||||
}
|
||||
|
||||
// ParseFiles parses files listed in fs and extracts all syscall
|
||||
// functions listed in sys comments. It returns source files
|
||||
// and functions collection *Source if successful.
|
||||
func ParseFiles(fs []string) (*Source, error) {
|
||||
src := &Source{
|
||||
Funcs: make([]*Fn, 0),
|
||||
Files: make([]string, 0),
|
||||
StdLibImports: []string{
|
||||
"unsafe",
|
||||
},
|
||||
ExternalImports: make([]string, 0),
|
||||
}
|
||||
for _, file := range fs {
|
||||
if err := src.ParseFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// DLLs return dll names for a source set src.
|
||||
func (src *Source) DLLs() []string {
|
||||
uniq := make(map[string]bool)
|
||||
r := make([]string, 0)
|
||||
for _, f := range src.Funcs {
|
||||
name := f.DLLName()
|
||||
if _, found := uniq[name]; !found {
|
||||
uniq[name] = true
|
||||
r = append(r, name)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ParseFile adds additional file path to a source set src.
|
||||
func (src *Source) ParseFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := trim(s.Text())
|
||||
if len(t) < 7 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(t, "//sys") {
|
||||
continue
|
||||
}
|
||||
t = t[5:]
|
||||
if !(t[0] == ' ' || t[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
f, err := newFn(t[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src.Funcs = append(src.Funcs, f)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Files = append(src.Files, path)
|
||||
|
||||
// get package name
|
||||
fset := token.NewFileSet()
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packageName = pkg.Name.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStdRepo returns true if src is part of standard library.
|
||||
func (src *Source) IsStdRepo() (bool, error) {
|
||||
if len(src.Files) == 0 {
|
||||
return false, errors.New("no input files provided")
|
||||
}
|
||||
abspath, err := filepath.Abs(src.Files[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
goroot := runtime.GOROOT()
|
||||
if runtime.GOOS == "windows" {
|
||||
abspath = strings.ToLower(abspath)
|
||||
goroot = strings.ToLower(goroot)
|
||||
}
|
||||
sep := string(os.PathSeparator)
|
||||
if !strings.HasSuffix(goroot, sep) {
|
||||
goroot += sep
|
||||
}
|
||||
return strings.HasPrefix(abspath, goroot), nil
|
||||
}
|
||||
|
||||
// Generate output source file from a source set src.
|
||||
func (src *Source) Generate(w io.Writer) error {
|
||||
const (
|
||||
pkgStd = iota // any package in std library
|
||||
pkgXSysWindows // x/sys/windows package
|
||||
pkgOther
|
||||
)
|
||||
isStdRepo, err := src.IsStdRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pkgtype int
|
||||
switch {
|
||||
case isStdRepo:
|
||||
pkgtype = pkgStd
|
||||
case packageName == "windows":
|
||||
// TODO: this needs better logic than just using package name
|
||||
pkgtype = pkgXSysWindows
|
||||
default:
|
||||
pkgtype = pkgOther
|
||||
}
|
||||
if *systemDLL {
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
src.Import("internal/syscall/windows/sysdll")
|
||||
case pkgXSysWindows:
|
||||
default:
|
||||
src.ExternalImport("golang.org/x/sys/windows")
|
||||
}
|
||||
}
|
||||
src.ExternalImport("github.com/Microsoft/go-winio")
|
||||
if packageName != "syscall" {
|
||||
src.Import("syscall")
|
||||
}
|
||||
funcMap := template.FuncMap{
|
||||
"packagename": packagename,
|
||||
"syscalldot": syscalldot,
|
||||
"newlazydll": func(dll string) string {
|
||||
arg := "\"" + dll + ".dll\""
|
||||
if !*systemDLL {
|
||||
return syscalldot() + "NewLazyDLL(" + arg + ")"
|
||||
}
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
|
||||
case pkgXSysWindows:
|
||||
return "NewLazySystemDLL(" + arg + ")"
|
||||
default:
|
||||
return "windows.NewLazySystemDLL(" + arg + ")"
|
||||
}
|
||||
},
|
||||
}
|
||||
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
|
||||
err = t.Execute(w, src)
|
||||
if err != nil {
|
||||
return errors.New("Failed to execute template: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
src, err := ParseFiles(flag.Args())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := src.Generate(&buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *filename == "" {
|
||||
_, err = os.Stdout.Write(data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use println instead to print in the following template
|
||||
const srcTemplate = `
|
||||
|
||||
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package {{packagename}}
|
||||
|
||||
import (
|
||||
{{range .StdLibImports}}"{{.}}"
|
||||
{{end}}
|
||||
|
||||
{{range .ExternalImports}}"{{.}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e {{syscalldot}}Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
{{template "dlls" .}}
|
||||
{{template "funcnames" .}})
|
||||
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{/* help functions */}}
|
||||
|
||||
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "helperbody"}}
|
||||
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
|
||||
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "funcbody"}}
|
||||
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
|
||||
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
|
||||
{{template "seterror" .}}{{template "printtrace" .}} return
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
|
||||
|
||||
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
|
||||
|
||||
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
|
||||
return
|
||||
}
|
||||
{{end}}{{end}}
|
||||
|
||||
|
||||
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
|
||||
{{end}}{{end}}
|
||||
|
||||
`
|
|
@ -0,0 +1,20 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// NameToGuid converts the given string into a GUID using the algorithm in the
|
||||
// Host Compute Service, ensuring GUIDs generated with the same string are common
|
||||
// across all clients.
|
||||
func NameToGuid(name string) (id GUID, err error) {
|
||||
title := "hcsshim::NameToGuid "
|
||||
logrus.Debugf(title+"Name %s", name)
|
||||
|
||||
err = nameToGuid(name, &id)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "name=%s", name)
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var prepareLayerLock sync.Mutex
|
||||
|
||||
// PrepareLayer finds a mounted read-write layer matching layerId and enables the
|
||||
// the filesystem filter for use on that layer. This requires the paths to all
|
||||
// parent layers, and is necessary in order to view or interact with the layer
|
||||
// as an actual filesystem (reading and writing files, creating directories, etc).
|
||||
// Disabling the filter must be done via UnprepareLayer.
|
||||
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::PrepareLayer "
|
||||
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// This lock is a temporary workaround for a Windows bug. Only allowing one
|
||||
// call to prepareLayer at a time vastly reduces the chance of a timeout.
|
||||
prepareLayerLock.Lock()
|
||||
defer prepareLayerLock.Unlock()
|
||||
err = prepareLayer(&infop, layerId, layers)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,384 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ContainerError is an error encountered in HCS
|
||||
type process struct {
|
||||
handleLock sync.RWMutex
|
||||
handle hcsProcess
|
||||
processID int
|
||||
container *container
|
||||
cachedPipes *cachedPipes
|
||||
callbackNumber uintptr
|
||||
}
|
||||
|
||||
type cachedPipes struct {
|
||||
stdIn syscall.Handle
|
||||
stdOut syscall.Handle
|
||||
stdErr syscall.Handle
|
||||
}
|
||||
|
||||
type processModifyRequest struct {
|
||||
Operation string
|
||||
ConsoleSize *consoleSize `json:",omitempty"`
|
||||
CloseHandle *closeHandle `json:",omitempty"`
|
||||
}
|
||||
|
||||
type consoleSize struct {
|
||||
Height uint16
|
||||
Width uint16
|
||||
}
|
||||
|
||||
type closeHandle struct {
|
||||
Handle string
|
||||
}
|
||||
|
||||
type processStatus struct {
|
||||
ProcessID uint32
|
||||
Exited bool
|
||||
ExitCode uint32
|
||||
LastWaitResult int32
|
||||
}
|
||||
|
||||
const (
|
||||
stdIn string = "StdIn"
|
||||
stdOut string = "StdOut"
|
||||
stdErr string = "StdErr"
|
||||
)
|
||||
|
||||
const (
|
||||
modifyConsoleSize string = "ConsoleSize"
|
||||
modifyCloseHandle string = "CloseHandle"
|
||||
)
|
||||
|
||||
// Pid returns the process ID of the process within the container.
|
||||
func (process *process) Pid() int {
|
||||
return process.processID
|
||||
}
|
||||
|
||||
// Kill signals the process to terminate but does not wait for it to finish terminating.
|
||||
func (process *process) Kill() error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Kill"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
err := hcsTerminateProcess(process.handle, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits for the process to exit.
|
||||
func (process *process) Wait() error {
|
||||
operation := "Wait"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
|
||||
// false if timeout occurs.
|
||||
func (process *process) WaitTimeout(timeout time.Duration) error {
|
||||
operation := "WaitTimeout"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExitCode returns the exit code of the process. The process must have
|
||||
// already terminated.
|
||||
func (process *process) ExitCode() (int, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "ExitCode"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
if process.handle == 0 {
|
||||
return 0, makeProcessError(process, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
properties, err := process.properties()
|
||||
if err != nil {
|
||||
return 0, makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
if properties.Exited == false {
|
||||
return 0, makeProcessError(process, operation, "", ErrInvalidProcessState)
|
||||
}
|
||||
|
||||
if properties.LastWaitResult != 0 {
|
||||
return 0, makeProcessError(process, operation, "", syscall.Errno(properties.LastWaitResult))
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode)
|
||||
return int(properties.ExitCode), nil
|
||||
}
|
||||
|
||||
// ResizeConsole resizes the console of the process.
|
||||
func (process *process) ResizeConsole(width, height uint16) error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "ResizeConsole"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
modifyRequest := processModifyRequest{
|
||||
Operation: modifyConsoleSize,
|
||||
ConsoleSize: &consoleSize{
|
||||
Height: height,
|
||||
Width: width,
|
||||
},
|
||||
}
|
||||
|
||||
modifyRequestb, err := json.Marshal(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modifyRequestStr := string(modifyRequestb)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *process) properties() (*processStatus, error) {
|
||||
operation := "properties"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
propertiesp *uint16
|
||||
)
|
||||
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if propertiesp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
|
||||
|
||||
properties := &processStatus{}
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes; it should be possible to
|
||||
// call this multiple times to get multiple interfaces.
|
||||
func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Stdio"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, nil, nil, makeProcessError(process, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
var stdIn, stdOut, stdErr syscall.Handle
|
||||
|
||||
if process.cachedPipes == nil {
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
resultp *uint16
|
||||
)
|
||||
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
|
||||
} else {
|
||||
// Use cached pipes
|
||||
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
|
||||
|
||||
// Invalidate the cache
|
||||
process.cachedPipes = nil
|
||||
}
|
||||
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return pipes[0], pipes[1], pipes[2], nil
|
||||
}
|
||||
|
||||
// CloseStdin closes the write side of the stdin pipe so that the process is
|
||||
// notified on the read side that there is no more data in stdin.
|
||||
func (process *process) CloseStdin() error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "CloseStdin"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, "", ErrAlreadyClosed)
|
||||
}
|
||||
|
||||
modifyRequest := processModifyRequest{
|
||||
Operation: modifyCloseHandle,
|
||||
CloseHandle: &closeHandle{
|
||||
Handle: stdIn,
|
||||
},
|
||||
}
|
||||
|
||||
modifyRequestb, err := json.Marshal(modifyRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modifyRequestStr := string(modifyRequestb)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
|
||||
err = processHcsResult(err, resultp)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the process but does not kill
|
||||
// or wait on it.
|
||||
func (process *process) Close() error {
|
||||
process.handleLock.Lock()
|
||||
defer process.handleLock.Unlock()
|
||||
operation := "Close"
|
||||
title := "HCSShim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
// Don't double free this
|
||||
if process.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := process.unregisterCallback(); err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
if err := hcsCloseProcess(process.handle); err != nil {
|
||||
return makeProcessError(process, operation, "", err)
|
||||
}
|
||||
|
||||
process.handle = 0
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *process) registerCallback() error {
|
||||
context := ¬ifcationWatcherContext{
|
||||
channels: newChannels(),
|
||||
}
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = context
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
var callbackHandle hcsCallback
|
||||
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context.handle = callbackHandle
|
||||
process.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *process) unregisterCallback() error {
|
||||
callbackNumber := process.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := context.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hcsUnregisterProcessCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := hcsUnregisterProcessCallback(handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(context.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
callbackMap[callbackNumber] = nil
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
handle = 0
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package hcsshim
|
||||
|
||||
import "os"
|
||||
|
||||
// ProcessBaseLayer post-processes a base layer that has had its files extracted.
|
||||
// The files should have been extracted to <path>\Files.
|
||||
func ProcessBaseLayer(path string) error {
|
||||
err := processBaseImage(path)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
|
||||
// The files should have been extracted to <path>\Files.
|
||||
func ProcessUtilityVMImage(path string) error {
|
||||
err := processUtilityImage(path)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package hcsshim
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// UnprepareLayer disables the filesystem filter for the read-write layer with
|
||||
// the given id.
|
||||
func UnprepareLayer(info DriverInfo, layerId string) error {
|
||||
title := "hcsshim::UnprepareLayer "
|
||||
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
|
||||
|
||||
// Convert info to API calling convention
|
||||
infop, err := convertDriverInfo(info)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = unprepareLayer(&infop, layerId)
|
||||
if err != nil {
|
||||
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"io"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
|
||||
// if there is an error.
|
||||
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
|
||||
fs := make([]io.ReadWriteCloser, len(hs))
|
||||
for i, h := range hs {
|
||||
if h != syscall.Handle(0) {
|
||||
if err == nil {
|
||||
fs[i], err = winio.MakeOpenFile(h)
|
||||
}
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
for _, f := range fs {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return fs, nil
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package hcsshim
|
||||
|
||||
// IsTP4 returns whether the currently running Windows build is at least TP4.
|
||||
func IsTP4() bool {
|
||||
// HNSCall was not present in TP4
|
||||
return procHNSCall.Find() != nil
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||
err = processHcsResult(err, resultp)
|
||||
if IsPending(err) {
|
||||
return waitForNotification(callbackNumber, expectedNotification, timeout)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||
callbackMapLock.RLock()
|
||||
channels := callbackMap[callbackNumber].channels
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
expectedChannel := channels[expectedNotification]
|
||||
if expectedChannel == nil {
|
||||
logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
|
||||
return ErrInvalidNotificationType
|
||||
}
|
||||
|
||||
var c <-chan time.Time
|
||||
if timeout != nil {
|
||||
timer := time.NewTimer(*timeout)
|
||||
c = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
select {
|
||||
case err, ok := <-expectedChannel:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
return err
|
||||
case err, ok := <-channels[hcsNotificationSystemExited]:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
// If the expected notification is hcsNotificationSystemExited which of the two selects
|
||||
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
|
||||
if channels[hcsNotificationSystemExited] == expectedChannel {
|
||||
return err
|
||||
}
|
||||
return ErrUnexpectedContainerExit
|
||||
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
|
||||
if !ok {
|
||||
return ErrHandleClose
|
||||
}
|
||||
// hcsNotificationServiceDisconnect should never be an expected notification
|
||||
// it does not need the same handling as hcsNotificationSystemExited
|
||||
return ErrUnexpectedProcessAbort
|
||||
case <-c:
|
||||
return ErrTimeout
|
||||
}
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue