Merge pull request #816 from galal-hussein/default_local_storage

Add default storage class
pull/851/head
Erik Wilson 5 years ago committed by GitHub
commit cac41db0e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,101 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.11
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: kube-system
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["%{DEFAULT_LOCAL_STORAGE_PATH}%"]
}
]
}

@ -8,32 +8,33 @@ import (
)
type Server struct {
ClusterCIDR string
ClusterSecret string
ServiceCIDR string
ClusterDNS string
ClusterDomain string
HTTPSPort int
HTTPPort int
DataDir string
DisableAgent bool
KubeConfigOutput string
KubeConfigMode string
TLSSan cli.StringSlice
BindAddress string
ExtraAPIArgs cli.StringSlice
ExtraSchedulerArgs cli.StringSlice
ExtraControllerArgs cli.StringSlice
Rootless bool
StoreBootstrap bool
StorageEndpoint string
StorageCAFile string
StorageCertFile string
StorageKeyFile string
AdvertiseIP string
AdvertisePort int
DisableScheduler bool
FlannelBackend string
ClusterCIDR string
ClusterSecret string
ServiceCIDR string
ClusterDNS string
ClusterDomain string
HTTPSPort int
HTTPPort int
DataDir string
DisableAgent bool
KubeConfigOutput string
KubeConfigMode string
TLSSan cli.StringSlice
BindAddress string
ExtraAPIArgs cli.StringSlice
ExtraSchedulerArgs cli.StringSlice
ExtraControllerArgs cli.StringSlice
Rootless bool
StoreBootstrap bool
StorageEndpoint string
StorageCAFile string
StorageCertFile string
StorageKeyFile string
AdvertiseIP string
AdvertisePort int
DisableScheduler bool
FlannelBackend string
DefaultLocalStoragePath string
}
var ServerConfig Server
@ -199,6 +200,11 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Destination: &ServerConfig.FlannelBackend,
Value: config.FlannelBackendVXLAN,
},
cli.StringFlag{
Name: "default-local-storage-path",
Usage: "Default local storage path for local provisioner storage class",
Destination: &ServerConfig.DefaultLocalStoragePath,
},
NodeIPFlag,
NodeNameFlag,
DockerFlag,

@ -122,6 +122,16 @@ func run(app *cli.Context, cfg *cmds.Server) error {
serverConfig.ControlConfig.ClusterDNS = net2.ParseIP(cfg.ClusterDNS)
}
if cfg.DefaultLocalStoragePath == "" {
dataDir, err := datadir.LocalHome(cfg.DataDir, false)
if err != nil {
return err
}
serverConfig.ControlConfig.DefaultLocalStoragePath = filepath.Join(dataDir, "/storage")
} else {
serverConfig.ControlConfig.DefaultLocalStoragePath = cfg.DefaultLocalStoragePath
}
for _, noDeploy := range app.StringSlice("no-deploy") {
if noDeploy == "servicelb" {
serverConfig.DisableServiceLB = true

@ -79,29 +79,30 @@ type Agent struct {
}
type Control struct {
AdvertisePort int
AdvertiseIP string
ListenPort int
HTTPSPort int
ClusterSecret string
ClusterIPRange *net.IPNet
ServiceIPRange *net.IPNet
ClusterDNS net.IP
ClusterDomain string
NoCoreDNS bool
KubeConfigOutput string
KubeConfigMode string
DataDir string
Skips []string
BootstrapReadOnly bool
Storage endpoint.Config
NoScheduler bool
ExtraAPIArgs []string
ExtraControllerArgs []string
ExtraSchedulerAPIArgs []string
NoLeaderElect bool
FlannelBackend string
IPSECPSK string
AdvertisePort int
AdvertiseIP string
ListenPort int
HTTPSPort int
ClusterSecret string
ClusterIPRange *net.IPNet
ServiceIPRange *net.IPNet
ClusterDNS net.IP
ClusterDomain string
NoCoreDNS bool
KubeConfigOutput string
KubeConfigMode string
DataDir string
Skips []string
BootstrapReadOnly bool
Storage endpoint.Config
NoScheduler bool
ExtraAPIArgs []string
ExtraControllerArgs []string
ExtraSchedulerAPIArgs []string
NoLeaderElect bool
FlannelBackend string
IPSECPSK string
DefaultLocalStoragePath string
Runtime *ControlRuntime `json:"-"`
}

@ -1,6 +1,7 @@
// Code generated for package deploy by go-bindata DO NOT EDIT. (@generated)
// sources:
// manifests/coredns.yaml
// manifests/local-storage.yaml
// manifests/rolebindings.yaml
// manifests/traefik.yaml
package deploy
@ -99,6 +100,26 @@ func corednsYaml() (*asset, error) {
return a, nil
}
var _localStorageYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\x4b\x6f\xe3\x36\x10\xbe\xeb\x57\x10\x02\xf6\x52\x2c\xed\xf5\xad\xe0\xcd\xf5\x63\xbb\x80\x5f\x88\xd3\xf6\x10\x2c\x8c\x31\x35\xb6\xb9\xa1\x48\x81\x1c\xa9\x48\xd3\xfc\xf7\x82\x7a\x45\x8a\x93\xd8\x41\xb1\xbc\x28\x19\xce\xcc\xf7\x71\xf8\xcd\xd0\x90\xa9\x3f\xd1\x79\x65\x8d\x60\xc5\x28\xba\x57\x26\x11\x6c\x8b\xae\x50\x12\xc7\x52\xda\xdc\x50\x94\x22\x41\x02\x04\x22\x62\xcc\x40\x8a\x82\x69\x2b\x41\xf3\x0c\xe8\xc4\x33\x67\x0b\x15\xe2\xd1\x71\x5f\xc5\x71\xa8\x03\x2b\x77\x9f\x81\x44\xc1\xee\xf3\x3d\x72\xff\xe0\x09\xd3\x88\x73\x1e\x75\x91\xdd\x1e\xe4\x00\x72\x3a\x59\xa7\xfe\x01\x52\xd6\x0c\xee\x7f\xf5\x03\x65\x87\xc5\x68\x8f\x04\x0d\xb1\x89\xce\x3d\xa1\xbb\xb1\x1a\xaf\x67\xe5\x82\xf7\x9b\x54\x5c\xae\xd1\x8b\x88\x33\xc8\xd4\x57\x67\xf3\xcc\x0b\x76\x17\xc7\xdf\x23\xc6\x1c\x7a\x9b\x3b\x89\xa5\xc5\xd8\x04\x7d\xfc\x99\xc5\x59\x20\xed\x09\x0d\x15\x56\xe7\x29\x4a\x0d\x2a\xf5\x65\x40\x81\x6e\x5f\x3a\x1f\x91\x82\xab\x56\xbe\xfc\xfe\x0d\x24\x4f\xf1\xf7\xcb\x20\x68\x92\xcc\x2a\x43\xaf\x02\x55\x46\x9b\xbc\xc0\xfa\xe5\xaa\xc4\x05\x86\xac\xbd\x40\xe9\x10\x08\xcb\xa4\xaf\xf3\xf3\x64\x1d\x1c\xb1\xbe\x8a\xf3\xa4\xf5\xbe\xd4\xe0\x3d\x5e\x59\x81\xff\x7f\xf1\xbf\x29\x93\x28\x73\xbc\xfe\xfe\xf7\xca\x24\x51\x10\xc1\x0d\x1e\x82\x73\x73\xc6\x77\xd0\x23\xc6\xce\x05\x77\x8d\xcc\x7c\xbe\xff\x81\x92\x4a\x41\xbd\xda\x4b\x3f\xab\x83\x20\xcb\xfc\xb0\x6d\xe0\x29\x66\xda\x3e\xa4\xf8\x81\xe6\x7d\x1b\xca\x67\x28\x45\x79\xf7\x99\x56\x12\xbc\x60\xa3\x88\x31\x8f\x1a\x25\x59\x17\x76\x18\x4b\xc3\xfd\x2e\x60\x8f\xda\x57\x86\x50\xe6\xec\x1d\x2c\xc2\x34\xd3\x40\x58\x87\x77\x48\x86\xa5\x7b\x99\x2e\xe5\x62\xac\xa1\x58\xfe\xdd\x2b\xf8\xea\x63\xd5\x0e\x4b\x5a\x43\xa0\x0c\xba\x96\x00\xbf\x54\xb9\x6a\xa9\x14\x8e\x28\x98\x03\x23\x4f\xe8\x86\xaf\x7b\x8b\xe2\xcb\xe0\xcb\x60\x34\xea\x47\x6d\x72\xad\x37\x56\x2b\xf9\x20\xd8\xb7\xc3\xca\xd2\xc6\xa1\xc7\x96\x52\x20\x95\xa6\x60\x92\xe7\x92\xf0\x4b\x6c\x38\xf3\x04\x8e\x3a\xff\x73\x2e\xad\x39\xa8\x63\xc7\x34\x44\x92\xc3\xca\x5a\x7f\x06\x3f\xbc\x35\xad\x47\x35\x79\x96\xa1\x3a\xbe\x8b\x5d\xd5\xa3\x8a\xe0\x95\x53\xbb\xcb\x58\x1a\xfc\x37\x40\x27\xd1\x03\x68\x3d\xd0\x14\xe7\xc9\x36\xeb\xe9\x6e\x35\x5e\xce\xb6\x9b\xf1\x64\xd6\x49\x56\x80\xce\x71\xee\x6c\x2a\x3a\x46\xc6\x0e\x0a\x75\x52\x77\xf4\x99\xbd\xc2\x6e\x54\x35\x68\x85\x1d\x75\x4f\xf5\x81\x03\x55\xf6\x25\x64\x7d\xb4\x33\x55\xd4\xf5\x7d\xd9\x9c\xfd\x41\xfa\xdc\xa6\xdb\xca\x3e\x09\x03\xf4\xdd\x46\x0d\x53\xcb\x18\x4b\xe5\x8c\xaa\x79\x77\xa7\xef\x20\xf4\xab\x33\x48\x58\x02\x28\xcf\x13\x3c\x40\xae\x89\x97\xdb\x82\xc5\xe4\x72\x8c\xa3\xae\x0e\x1b\x9d\x86\x80\x0e\x52\x75\xf6\x7a\xc8\x2e\x6d\x82\x82\xfd\x05\x8a\xe6\xd6\xcd\x95\xf3\x34\xb1\xc6\xe7\x29\xba\xc8\x55\x0f\x5f\x23\xda\x29\x6a\x24\x2c\x4f\x5e\x4f\xce\xa6\x64\xd1\x8b\x9f\x18\xef\x0e\xa4\x56\xa0\x6f\xcc\xa2\x26\xb0\xa3\x55\xc1\xfe\xe5\xed\xad\x3c\xbe\x50\x03\x63\xe5\xc3\x1d\xf4\xb0\x84\x2c\x16\x77\x67\xfb\xe7\x11\xbd\xc8\x58\xc4\xd3\xd9\x7c\xfc\xc7\xe2\x76\xb7\x19\xdf\xfe\xbe\x9b\xaf\x6f\x76\xab\xf5\x6a\xb7\xf8\xb6\xbd\x9d\x4d\x77\xab\xf5\x74\xb6\x8d\x3f\xbf\x9d\x23\x9c\xca\xc7\xe2\x2e\xfe\xf4\xd8\xe4\x59\xac\x27\xe3\xc5\x6e\x7b\xbb\xbe\x19\x7f\x9d\x95\x59\x9f\x3e\x95\x8f\x67\x7f\x3d\x9d\x59\x9e\x7d\x9e\xa2\xff\x02\x00\x00\xff\xff\x99\xfd\xbf\x2f\xb8\x09\x00\x00")
func localStorageYamlBytes() ([]byte, error) {
return bindataRead(
_localStorageYaml,
"local-storage.yaml",
)
}
func localStorageYaml() (*asset, error) {
bytes, err := localStorageYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "local-storage.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rolebindingsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\xcf\xbd\x0a\xc2\x40\x10\x04\xe0\xfe\x9e\xe2\x5e\xe0\x22\x76\x72\xa5\x16\xf6\x01\xed\x37\xb9\x55\xd7\xdc\x1f\xbb\x7b\x01\x7d\x7a\x09\x48\x1a\x51\xb0\x1c\x18\xe6\x63\xa0\xd2\x19\x59\xa8\x64\x6f\x79\x80\xb1\x83\xa6\xb7\xc2\xf4\x04\xa5\x92\xbb\x69\x27\x1d\x95\xcd\xbc\x35\x13\xe5\xe0\xed\x21\x36\x51\xe4\xbe\x44\xdc\x53\x0e\x94\xaf\x26\xa1\x42\x00\x05\x6f\xac\xcd\x90\xd0\xdb\xa9\x0d\xe8\xa0\x92\x20\xcf\xc8\x6e\x89\x11\xd5\x41\x48\x94\x0d\x97\x88\x3d\x5e\x96\x36\x54\x3a\x72\x69\xf5\x87\x6c\xac\xfd\x80\x57\x47\x1e\xa2\x98\xfc\xba\x5f\xe9\x6d\x48\x1b\xee\x38\xaa\x78\xe3\xfe\x42\x4e\x82\xfc\xe5\x85\x79\x05\x00\x00\xff\xff\x54\xf2\x55\xe2\x29\x01\x00\x00")
func rolebindingsYamlBytes() ([]byte, error) {
@ -191,9 +212,10 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"coredns.yaml": corednsYaml,
"rolebindings.yaml": rolebindingsYaml,
"traefik.yaml": traefikYaml,
"coredns.yaml": corednsYaml,
"local-storage.yaml": localStorageYaml,
"rolebindings.yaml": rolebindingsYaml,
"traefik.yaml": traefikYaml,
}
// AssetDir returns the file names below a certain
@ -237,9 +259,10 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
"coredns.yaml": &bintree{corednsYaml, map[string]*bintree{}},
"rolebindings.yaml": &bintree{rolebindingsYaml, map[string]*bintree{}},
"traefik.yaml": &bintree{traefikYaml, map[string]*bintree{}},
"coredns.yaml": &bintree{corednsYaml, map[string]*bintree{}},
"local-storage.yaml": &bintree{localStorageYaml, map[string]*bintree{}},
"rolebindings.yaml": &bintree{rolebindingsYaml, map[string]*bintree{}},
"traefik.yaml": &bintree{traefikYaml, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory

@ -176,8 +176,9 @@ func stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control)
dataDir = filepath.Join(controlConfig.DataDir, "manifests")
templateVars := map[string]string{
"%{CLUSTER_DNS}%": controlConfig.ClusterDNS.String(),
"%{CLUSTER_DOMAIN}%": controlConfig.ClusterDomain,
"%{CLUSTER_DNS}%": controlConfig.ClusterDNS.String(),
"%{CLUSTER_DOMAIN}%": controlConfig.ClusterDomain,
"%{DEFAULT_LOCAL_STORAGE_PATH}%": controlConfig.DefaultLocalStoragePath,
}
if err := deploy.Stage(dataDir, templateVars, controlConfig.Skips); err != nil {

Loading…
Cancel
Save