Revert "Revert "Add Mesos hyperkube minion server ""

This reverts commit 8372b0d927.
pull/6/head
Dr. Stefan Schimanski 2015-07-31 20:33:04 +02:00
parent eff78cce8d
commit 76dc61bcfb
31 changed files with 2167 additions and 222 deletions

4
Godeps/Godeps.json generated
View File

@ -577,6 +577,10 @@
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"
},
{
"ImportPath": "gopkg.in/natefinch/lumberjack.v2/",
"Rev": "20b71e5b60d756d3d2f80def009790325acc2b23"
},
{
"ImportPath": "speter.net/go/exp/math/dec/inf",
"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"

View File

@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Nate Finch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,166 @@
# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
### Lumberjack is a Go package for writing logs to rolling files.
Package lumberjack provides a rolling logger.
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
thusly:
import "gopkg.in/natefinch/lumberjack.v2"
The package name remains simply lumberjack, and the code resides at
https://github.com/natefinch/lumberjack under the v2.0 branch.
Lumberjack is intended to be one part of a logging infrastructure.
It is not an all-in-one solution, but instead is a pluggable
component at the bottom of the logging stack that simply controls the files
to which logs are written.
Lumberjack plays well with any logging package that can write to an
io.Writer, including the standard library's log package.
Lumberjack assumes that only one process is writing to the output files.
Using the same lumberjack configuration from multiple processes on the same
machine will result in improper behavior.
**Example**
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
Code:
```go
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, //days
})
```
## type Logger
``` go
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// contains filtered or unexported fields
}
```
Logger is an io.WriteCloser that writes to the specified filename.
Logger opens or creates the logfile on first Write. If the file exists and
is less than MaxSize megabytes, lumberjack will open and append to that file.
If the file exists and its size is >= MaxSize megabytes, the file is renamed
by putting the current time in a timestamp in the name immediately before the
file's extension (or the end of the filename if there's no extension). A new
log file is then created using original filename.
Whenever a write would cause the current log file exceed MaxSize megabytes,
the current file is closed, renamed, and a new log file created with the
original name. Thus, the filename you give Logger is always the "current" log
file.
### Cleaning Up Old Log Files
Whenever a new logfile gets created, old log files may be deleted. The most
recent files according to the encoded timestamp will be retained, up to a
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
with an encoded timestamp older than MaxAge days are deleted, regardless of
MaxBackups. Note that the time encoded in the timestamp is the rotation
time, which may differ from the last time that file was written to.
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
### func (\*Logger) Close
``` go
func (l *Logger) Close() error
```
Close implements io.Closer, and closes the current logfile.
### func (\*Logger) Rotate
``` go
func (l *Logger) Rotate() error
```
Rotate causes Logger to close the existing log file and immediately create a
new one. This is a helper function for applications that want to initiate
rotations outside of the normal rotation rules, such as in response to
SIGHUP. After rotating, this initiates a cleanup of old log files according
to the normal rules.
**Example**
Example of how to rotate in response to SIGHUP.
Code:
```go
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
```
### func (\*Logger) Write
``` go
func (l *Logger) Write(p []byte) (n int, err error)
```
Write implements io.Writer. If a write would cause the log file to be larger
than MaxSize, the file is closed, renamed to include a timestamp of the
current time, and a new log file is created using the original log file name.
If the length of the write is greater than MaxSize, an error is returned.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

View File

@ -0,0 +1,11 @@
// +build !linux
package lumberjack
import (
"os"
)
func chown(_ string, _ os.FileInfo) error {
return nil
}

View File

@ -0,0 +1,19 @@
package lumberjack
import (
"os"
"syscall"
)
// os_Chown is a var so we can mock it out during tests.
var os_Chown = os.Chown
func chown(name string, info os.FileInfo) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
if err != nil {
return err
}
f.Close()
stat := info.Sys().(*syscall.Stat_t)
return os_Chown(name, int(stat.Uid), int(stat.Gid))
}

View File

@ -0,0 +1,18 @@
package lumberjack_test
import (
"log"
"gopkg.in/natefinch/lumberjack.v2"
)
// To use lumberjack with the standard library's log package, just pass it into
// the SetOutput function when your application starts.
func Example() {
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, // days
})
}

View File

@ -0,0 +1,104 @@
// +build linux
package lumberjack
import (
"os"
"syscall"
"testing"
)
func TestMaintainMode(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestMaintainMode", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
mode := os.FileMode(0770)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
isNil(err, t)
f.Close()
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
filename2 := backupFile(dir)
info, err := os.Stat(filename)
isNil(err, t)
info2, err := os.Stat(filename2)
isNil(err, t)
equals(mode, info.Mode(), t)
equals(mode, info2.Mode(), t)
}
func TestMaintainOwner(t *testing.T) {
fakeC := fakeChown{}
os_Chown = fakeC.Set
os_Stat = fakeStat
defer func() {
os_Chown = os.Chown
os_Stat = os.Stat
}()
currentTime = fakeTime
dir := makeTempDir("TestMaintainOwner", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
equals(555, fakeC.uid, t)
equals(666, fakeC.gid, t)
}
type fakeChown struct {
name string
uid int
gid int
}
func (f *fakeChown) Set(name string, uid, gid int) error {
f.name = name
f.uid = uid
f.gid = gid
return nil
}
func fakeStat(name string) (os.FileInfo, error) {
info, err := os.Stat(name)
if err != nil {
return info, err
}
stat := info.Sys().(*syscall.Stat_t)
stat.Uid = 555
stat.Gid = 666
return info, nil
}

View File

@ -0,0 +1,417 @@
// Package lumberjack provides a rolling logger.
//
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
// thusly:
//
// import "gopkg.in/natefinch/lumberjack.v2"
//
// The package name remains simply lumberjack, and the code resides at
// https://github.com/natefinch/lumberjack under the v2.0 branch.
//
// Lumberjack is intended to be one part of a logging infrastructure.
// It is not an all-in-one solution, but instead is a pluggable
// component at the bottom of the logging stack that simply controls the files
// to which logs are written.
//
// Lumberjack plays well with any logging package that can write to an
// io.Writer, including the standard library's log package.
//
// Lumberjack assumes that only one process is writing to the output files.
// Using the same lumberjack configuration from multiple processes on the same
// machine will result in improper behavior.
package lumberjack
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
const (
backupTimeFormat = "2006-01-02T15-04-05.000"
defaultMaxSize = 100
)
// ensure we always implement io.WriteCloser
var _ io.WriteCloser = (*Logger)(nil)
// Logger is an io.WriteCloser that writes to the specified filename.
//
// Logger opens or creates the logfile on first Write. If the file exists and
// is less than MaxSize megabytes, lumberjack will open and append to that file.
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
// by putting the current time in a timestamp in the name immediately before the
// file's extension (or the end of the filename if there's no extension). A new
// log file is then created using original filename.
//
// Whenever a write would cause the current log file exceed MaxSize megabytes,
// the current file is closed, renamed, and a new log file created with the
// original name. Thus, the filename you give Logger is always the "current" log
// file.
//
// Cleaning Up Old Log Files
//
// Whenever a new logfile gets created, old log files may be deleted. The most
// recent files according to the encoded timestamp will be retained, up to a
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
// with an encoded timestamp older than MaxAge days are deleted, regardless of
// MaxBackups. Note that the time encoded in the timestamp is the rotation
// time, which may differ from the last time that file was written to.
//
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
size int64
file *os.File
mu sync.Mutex
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
)
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *Logger) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() {
return 0, fmt.Errorf(
"write length %d exceeds maximum file size %d", writeLen, l.max(),
)
}
if l.file == nil {
if err = l.openExistingOrNew(len(p)); err != nil {
return 0, err
}
}
if l.size+writeLen > l.max() {
if err := l.rotate(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *Logger) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// close closes the file if it is open.
func (l *Logger) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates a cleanup of old log files according
// to the normal rules.
func (l *Logger) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// cleanup.
func (l *Logger) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
return l.cleanup()
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *Logger) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := backupName(name, l.LocalTime)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func backupName(name string, local bool) string {
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
if !local {
t = t.UTC()
}
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error {
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
if info.Size()+int64(writeLen) >= l.max() {
return l.rotate()
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *Logger) filename() string {
if l.Filename != "" {
return l.Filename
}
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
return filepath.Join(os.TempDir(), name)
}
// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) cleanup() error {
if l.MaxBackups == 0 && l.MaxAge == 0 {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var deletes []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
deletes = files[l.MaxBackups:]
files = files[:l.MaxBackups]
}
if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
for _, f := range files {
if f.timestamp.Before(cutoff) {
deletes = append(deletes, f)
}
}
}
if len(deletes) == 0 {
return nil
}
go deleteAll(l.dir(), deletes)
return nil
}
func deleteAll(dir string, files []logInfo) {
// remove files on a separate goroutine
for _, f := range files {
// what am I going to do, log this?
_ = os.Remove(filepath.Join(dir, f.Name()))
}
}
// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]logInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
prefix, ext := l.prefixAndExt()
for _, f := range files {
if f.IsDir() {
continue
}
name := l.timeFromName(f.Name(), prefix, ext)
if name == "" {
continue
}
t, err := time.Parse(backupTimeFormat, name)
if err == nil {
logFiles = append(logFiles, logInfo{t, f})
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
}
sort.Sort(byFormatTime(logFiles))
return logFiles, nil
}
// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) string {
if !strings.HasPrefix(filename, prefix) {
return ""
}
filename = filename[len(prefix):]
if !strings.HasSuffix(filename, ext) {
return ""
}
filename = filename[:len(filename)-len(ext)]
return filename
}
// max returns the maximum size in bytes of log files before rolling.
func (l *Logger) max() int64 {
if l.MaxSize == 0 {
return int64(defaultMaxSize * megabyte)
}
return int64(l.MaxSize) * int64(megabyte)
}
// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.filename())
}
// prefixAndExt returns the filename part and extension part from the Logger's
// filename.
func (l *Logger) prefixAndExt() (prefix, ext string) {
filename := filepath.Base(l.filename())
ext = filepath.Ext(filename)
prefix = filename[:len(filename)-len(ext)] + "-"
return prefix, ext
}
// logInfo is a convenience struct to return the filename and its embedded
// timestamp.
type logInfo struct {
timestamp time.Time
os.FileInfo
}
// byFormatTime sorts by newest time formatted in the name.
type byFormatTime []logInfo
func (b byFormatTime) Less(i, j int) bool {
return b[i].timestamp.After(b[j].timestamp)
}
func (b byFormatTime) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byFormatTime) Len() int {
return len(b)
}

View File

@ -0,0 +1,690 @@
package lumberjack
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v1"
)
// !!!NOTE!!!
//
// Running these tests in parallel will almost certainly cause sporadic (or even
// regular) failures, because they're all messing with the same global variable
// that controls the logic's mocked time.Now. So... don't do that.
// Since all the tests uses the time to determine filenames etc, we need to
// control the wall clock as much as possible, which means having a wall clock
// that doesn't change unless we want it to.
var fakeCurrentTime = time.Now()
func fakeTime() time.Time {
return fakeCurrentTime
}
func TestNewFile(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestNewFile", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(logFile(dir), n, t)
fileCount(dir, 1, t)
}
func TestOpenExisting(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestOpenExisting", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("foo!")
err := ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
existsWithLen(filename, len(data), t)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
// make sure the file got appended
existsWithLen(filename, len(data)+n, t)
// make sure no other files were created
fileCount(dir, 1, t)
}
func TestWriteTooLong(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestWriteTooLong", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 5,
}
defer l.Close()
b := []byte("booooooooooooooo!")
n, err := l.Write(b)
notNil(err, t)
equals(0, n, t)
equals(err.Error(),
fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t)
_, err = os.Stat(logFile(dir))
assert(os.IsNotExist(err), t, "File exists, but should not have been created")
}
func TestMakeLogDir(t *testing.T) {
currentTime = fakeTime
dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(logFile(dir), n, t)
fileCount(dir, 1, t)
}
func TestDefaultFilename(t *testing.T) {
currentTime = fakeTime
dir := os.TempDir()
filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log")
defer os.Remove(filename)
l := &Logger{}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
}
func TestAutoRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestAutoRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// the old logfile should be moved aside and the main logfile should have
// only the last write in it.
existsWithLen(filename, n, t)
// the backup file will use the current fake time and have the old contents.
existsWithLen(backupFile(dir), len(b), t)
fileCount(dir, 2, t)
}
func TestFirstWriteRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestFirstWriteRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
start := []byte("boooooo!")
err := ioutil.WriteFile(filename, start, 0600)
isNil(err, t)
newFakeTime()
// this would make us rotate
b := []byte("fooo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
existsWithLen(backupFile(dir), len(start), t)
fileCount(dir, 2, t)
}
func TestMaxBackups(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxBackups", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
// this will put us over the max
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
secondFilename := backupFile(dir)
existsWithLen(secondFilename, len(b), t)
// make sure the old file still exists with the same size.
existsWithLen(filename, n, t)
fileCount(dir, 2, t)
newFakeTime()
// this will make us rotate again
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
thirdFilename := backupFile(dir)
existsWithLen(thirdFilename, len(b2), t)
existsWithLen(filename, n, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// should only have two files in the dir still
fileCount(dir, 2, t)
// second file name should still exist
existsWithLen(thirdFilename, len(b2), t)
// should have deleted the first backup
notExist(secondFilename, t)
// now test that we don't delete directories or non-logfile files
newFakeTime()
// create a file that is close to but different from the logfile name.
// It shouldn't get caught by our deletion filters.
notlogfile := logFile(dir) + ".foo"
err = ioutil.WriteFile(notlogfile, []byte("data"), 0644)
isNil(err, t)
// Make a directory that exactly matches our log file filters... it still
// shouldn't get caught by the deletion filter since it's a directory.
notlogfiledir := backupFile(dir)
err = os.Mkdir(notlogfiledir, 0700)
isNil(err, t)
newFakeTime()
// this will make us rotate again
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
fourthFilename := backupFile(dir)
existsWithLen(fourthFilename, len(b2), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// We should have four things in the directory now - the 2 log files, the
// not log file, and the directory
fileCount(dir, 4, t)
// third file name should still exist
existsWithLen(filename, n, t)
existsWithLen(fourthFilename, len(b2), t)
// should have deleted the first filename
notExist(thirdFilename, t)
// the not-a-logfile should still exist
exists(notlogfile, t)
// the directory
exists(notlogfiledir, t)
}
func TestCleanupExistingBackups(t *testing.T) {
// test that if we start with more backup files than we're supposed to have
// in total, that extra ones get cleaned up when we rotate.
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestCleanupExistingBackups", t)
defer os.RemoveAll(dir)
// make 3 backup files
data := []byte("data")
backup := backupFile(dir)
err := ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
// now create a primary log file with some data
filename := logFile(dir)
err = ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
newFakeTime()
b2 := []byte("foooooo!")
n, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// now we should only have 2 files left - the primary and one backup
fileCount(dir, 2, t)
}
func TestMaxAge(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxAge", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxAge: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
// two days later
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
existsWithLen(backupFile(dir), len(b), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should still have 2 log files, since the most recent backup was just
// created.
fileCount(dir, 2, t)
existsWithLen(filename, len(b2), t)
// we should have deleted the old file due to being too old
existsWithLen(backupFile(dir), len(b), t)
// two days later
newFakeTime()
b3 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b3), n, t)
existsWithLen(backupFile(dir), len(b2), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should have 2 log files - the main log file, and the most recent
// backup. The earlier backup is past the cutoff and should be gone.
fileCount(dir, 2, t)
existsWithLen(filename, len(b3), t)
// we should have deleted the old file due to being too old
existsWithLen(backupFile(dir), len(b2), t)
}
func TestOldLogFiles(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestOldLogFiles", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("data")
err := ioutil.WriteFile(filename, data, 07)
isNil(err, t)
// This gives us a time with the same precision as the time we get from the
// timestamp in the name.
t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup := backupFile(dir)
err = ioutil.WriteFile(backup, data, 07)
isNil(err, t)
newFakeTime()
t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup2 := backupFile(dir)
err = ioutil.WriteFile(backup2, data, 07)
isNil(err, t)
l := &Logger{Filename: filename}
files, err := l.oldLogFiles()
isNil(err, t)
equals(2, len(files), t)
// should be sorted by newest file first, which would be t2
equals(t2, files[0].timestamp, t)
equals(t1, files[1].timestamp, t)
}
func TestTimeFromName(t *testing.T) {
l := &Logger{Filename: "/var/log/myfoo/foo.log"}
prefix, ext := l.prefixAndExt()
val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext)
equals("2014-05-04T14-44-33.555", val, t)
val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext)
equals("", val, t)
val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext)
equals("", val, t)
val = l.timeFromName("foo.log", prefix, ext)
equals("", val, t)
}
func TestLocalTime(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestLocalTime", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 10,
LocalTime: true,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
b2 := []byte("fooooooo!")
n2, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n2, t)
existsWithLen(logFile(dir), n2, t)
existsWithLen(backupFileLocal(dir), n, t)
}
func TestRotate(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename2 := backupFile(dir)
existsWithLen(filename2, n, t)
existsWithLen(filename, 0, t)
fileCount(dir, 2, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename3 := backupFile(dir)
existsWithLen(filename3, 0, t)
existsWithLen(filename, 0, t)
fileCount(dir, 2, t)
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
existsWithLen(filename, n, t)
}
func TestJson(t *testing.T) {
data := []byte(`
{
"filename": "foo",
"maxsize": 5,
"maxage": 10,
"maxbackups": 3,
"localtime": true
}`[1:])
l := Logger{}
err := json.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
}
func TestYaml(t *testing.T) {
data := []byte(`
filename: foo
maxsize: 5
maxage: 10
maxbackups: 3
localtime: true`[1:])
l := Logger{}
err := yaml.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
}
func TestToml(t *testing.T) {
data := `
filename = "foo"
maxsize = 5
maxage = 10
maxbackups = 3
localtime = true`[1:]
l := Logger{}
md, err := toml.Decode(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
equals(0, len(md.Undecoded()), t)
}
// makeTempDir creates a file with a semi-unique name in the OS temp directory.
// It should be based on the name of the test, to keep parallel tests from
// colliding, and must be cleaned up after the test is finished.
func makeTempDir(name string, t testing.TB) string {
dir := time.Now().Format(name + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
isNilUp(os.Mkdir(dir, 0777), t, 1)
return dir
}
// existsWithLen checks that the given file exists and has the correct length.
func existsWithLen(path string, length int, t testing.TB) {
info, err := os.Stat(path)
isNilUp(err, t, 1)
equalsUp(int64(length), info.Size(), t, 1)
}
// logFile returns the log file name in the given directory for the current fake
// time.
func logFile(dir string) string {
return filepath.Join(dir, "foobar.log")
}
func backupFile(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log")
}
func backupFileLocal(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log")
}
// logFileLocal returns the log file name in the given directory for the current
// fake time using the local timezone.
func logFileLocal(dir string) string {
return filepath.Join(dir, fakeTime().Format(backupTimeFormat))
}
// fileCount checks that the number of files in the directory is exp.
func fileCount(dir string, exp int, t testing.TB) {
files, err := ioutil.ReadDir(dir)
isNilUp(err, t, 1)
// Make sure no other files were created.
equalsUp(exp, len(files), t, 1)
}
// newFakeTime sets the fake "current time" to two days later.
func newFakeTime() {
fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2)
}
func notExist(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err)
}
func exists(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err)
}

View File

@ -0,0 +1,27 @@
// +build linux
package lumberjack_test
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/natefinch/lumberjack"
)
// Example of how to rotate in response to SIGHUP.
func ExampleLogger_Rotate() {
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
}

View File

@ -0,0 +1,91 @@
package lumberjack
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert will log the given message if condition is false.
func assert(condition bool, t testing.TB, msg string, v ...interface{}) {
assertUp(condition, t, 1, msg, v...)
}
// assertUp is like assert, but used inside helper functions, to ensure that
// the file and line number reported by failures corresponds to one or more
// levels up the stack.
func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(caller + 1)
v = append([]interface{}{filepath.Base(file), line}, v...)
fmt.Printf("%s:%d: "+msg+"\n", v...)
t.FailNow()
}
}
// equals tests that the two values are equal according to reflect.DeepEqual.
func equals(exp, act interface{}, t testing.TB) {
equalsUp(exp, act, t, 1)
}
// equalsUp is like equals, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func equalsUp(exp, act interface{}, t testing.TB, caller int) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n",
filepath.Base(file), line, exp, exp, act, act)
t.FailNow()
}
}
// isNil reports a failure if the given value is not nil. Note that values
// which cannot be nil will always fail this check.
func isNil(obtained interface{}, t testing.TB) {
isNilUp(obtained, t, 1)
}
// isNilUp is like isNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func isNilUp(obtained interface{}, t testing.TB, caller int) {
if !_isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// notNil reports a failure if the given value is nil.
func notNil(obtained interface{}, t testing.TB) {
notNilUp(obtained, t, 1)
}
// notNilUp is like notNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func notNilUp(obtained interface{}, t testing.TB, caller int) {
if _isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// _isNil is a helper function for isNil and notNil, and should not be used
// directly.
func _isNil(obtained interface{}) bool {
if obtained == nil {
return true
}
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

View File

@ -20,6 +20,7 @@ package main
import (
kubeproxy "github.com/GoogleCloudPlatform/kubernetes/cmd/kube-proxy/app"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
)
// NewKubeProxy creates a new hyperkube Server object that includes the
@ -28,7 +29,7 @@ func NewKubeProxy() *Server {
s := kubeproxy.NewProxyServer()
hks := Server{
SimpleUsage: "proxy",
SimpleUsage: hyperkube.CommandProxy,
Long: `The Kubernetes proxy server is responsible for taking traffic directed at
services and forwarding it to the appropriate pods. It generally runs on
nodes next to the Kubelet and proxies traffic from local pods to remote pods.

View File

@ -19,6 +19,7 @@ limitations under the License.
package main
import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
scheduler "github.com/GoogleCloudPlatform/kubernetes/plugin/cmd/kube-scheduler/app"
)
@ -28,7 +29,7 @@ func NewScheduler() *Server {
s := scheduler.NewSchedulerServer()
hks := Server{
SimpleUsage: "scheduler",
SimpleUsage: hyperkube.CommandScheduler,
Long: "Implements a Kubernetes scheduler. This will assign pods to kubelets based on capacity and constraints.",
Run: func(_ *Server, args []string) error {
return s.Run(args)

View File

@ -32,7 +32,7 @@ func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
s := service.NewKubeletExecutorServer()
s.AddStandaloneFlags(pflag.CommandLine)
s.AddFlags(pflag.CommandLine)
util.InitFlags()
util.InitLogs()

View File

@ -19,6 +19,7 @@ package main
import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/controllermanager"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
)
// NewHyperkubeServer creates a new hyperkube Server object that includes the
@ -27,7 +28,7 @@ func NewControllerManager() *Server {
s := controllermanager.NewCMServer()
hks := Server{
SimpleUsage: "controller-manager",
SimpleUsage: hyperkube.CommandControllerManager,
Long: "A server that runs a set of active components. This includes replication controllers, service endpoints and nodes.",
Run: func(_ *Server, args []string) error {
return s.Run(args)

View File

@ -18,14 +18,15 @@ package main
import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/executor/service"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
)
// NewHyperkubeServer creates a new hyperkube Server object that includes the
// description and flags.
func NewKubeletExecutor() *Server {
s := service.NewHyperKubeletExecutorServer()
s := service.NewKubeletExecutorServer()
hks := Server{
SimpleUsage: "executor",
SimpleUsage: hyperkube.CommandExecutor,
Long: `The kubelet-executor binary is responsible for maintaining a set of containers
on a particular node. It syncs data from a specialized Mesos source that tracks
task launches and kills. It then queries Docker to see what is currently
@ -35,6 +36,6 @@ containers by starting or stopping Docker containers.`,
return s.Run(hks, args)
},
}
s.AddHyperkubeFlags(hks.Flags())
s.AddFlags(hks.Flags())
return &hks
}

View File

@ -0,0 +1,39 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/minion"
)
// NewMinion creates a new hyperkube Server object that includes the
// description and flags.
func NewMinion() *Server {
s := minion.NewMinionServer()
hks := Server{
SimpleUsage: hyperkube.CommandMinion,
Long: `Implements a Kubernetes minion. This will launch the proxy and executor.`,
Run: func(hks *Server, args []string) error {
return s.Run(hks, args)
},
}
s.AddMinionFlags(hks.Flags())
s.AddExecutorFlags(hks.Flags())
return &hks
}

View File

@ -18,6 +18,7 @@ limitations under the License.
package main
import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/scheduler/service"
)
@ -27,7 +28,7 @@ func NewScheduler() *Server {
s := service.NewSchedulerServer()
hks := Server{
SimpleUsage: "scheduler",
SimpleUsage: hyperkube.CommandScheduler,
Long: `Implements the Kubernetes-Mesos scheduler. This will launch Mesos tasks which
results in pods assigned to kubelets based on capacity and constraints.`,
Run: func(hks *Server, args []string) error {

View File

@ -32,6 +32,7 @@ func main() {
hk.AddServer(NewScheduler())
hk.AddServer(NewKubeletExecutor())
hk.AddServer(NewKubeProxy())
hk.AddServer(NewMinion())
hk.RunToExit(os.Args)
}

View File

@ -19,6 +19,7 @@ package main
import (
kubeapiserver "github.com/GoogleCloudPlatform/kubernetes/cmd/kube-apiserver/app"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
)
// NewKubeAPIServer creates a new hyperkube Server object that includes the
@ -27,7 +28,7 @@ func NewKubeAPIServer() *Server {
s := kubeapiserver.NewAPIServer()
hks := Server{
SimpleUsage: "apiserver",
SimpleUsage: hyperkube.CommandApiserver,
Long: "The main API entrypoint and interface to the storage system. The API server is also the focal point for all authorization decisions.",
Run: func(_ *Server, args []string) error {
return s.Run(args)

View File

@ -19,6 +19,7 @@ package main
import (
kubeproxy "github.com/GoogleCloudPlatform/kubernetes/cmd/kube-proxy/app"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
)
// NewKubeProxy creates a new hyperkube Server object that includes the
@ -27,7 +28,7 @@ func NewKubeProxy() *Server {
s := kubeproxy.NewProxyServer()
hks := Server{
SimpleUsage: "proxy",
SimpleUsage: hyperkube.CommandProxy,
Long: `The Kubernetes proxy server is responsible for taking traffic directed at
services and forwarding it to the appropriate pods. It generally runs on
nodes next to the Kubelet and proxies traffic from local pods to remote pods.

View File

@ -17,14 +17,12 @@ limitations under the License.
package service
import (
"bufio"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
@ -37,7 +35,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/executor/config"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/redirfd"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider"
@ -49,7 +46,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
log "github.com/golang/glog"
"github.com/kardianos/osext"
bindings "github.com/mesos/mesos-go/executor"
"github.com/spf13/pflag"
@ -63,11 +59,6 @@ const (
type KubeletExecutorServer struct {
*app.KubeletServer
RunProxy bool
ProxyLogV int
ProxyExec string
ProxyLogfile string
ProxyBindall bool
SuicideTimeout time.Duration
ShutdownFD int
ShutdownFIFO string
@ -96,9 +87,6 @@ func findMesosCgroup(prefix string) string {
func NewKubeletExecutorServer() *KubeletExecutorServer {
k := &KubeletExecutorServer{
KubeletServer: app.NewKubeletServer(),
RunProxy: true,
ProxyExec: "./kube-proxy",
ProxyLogfile: "./proxy-log",
SuicideTimeout: config.DefaultSuicideTimeout,
cgroupPrefix: config.DefaultCgroupPrefix,
}
@ -113,40 +101,14 @@ func NewKubeletExecutorServer() *KubeletExecutorServer {
return k
}
func NewHyperKubeletExecutorServer() *KubeletExecutorServer {
s := NewKubeletExecutorServer()
// cache this for later use
binary, err := osext.Executable()
if err != nil {
log.Fatalf("failed to determine currently running executable: %v", err)
}
s.ProxyExec = binary
return s
}
func (s *KubeletExecutorServer) addCoreFlags(fs *pflag.FlagSet) {
func (s *KubeletExecutorServer) AddFlags(fs *pflag.FlagSet) {
s.KubeletServer.AddFlags(fs)
fs.BoolVar(&s.RunProxy, "run-proxy", s.RunProxy, "Maintain a running kube-proxy instance as a child proc of this kubelet-executor.")
fs.IntVar(&s.ProxyLogV, "proxy-logv", s.ProxyLogV, "Log verbosity of the child kube-proxy.")
fs.StringVar(&s.ProxyLogfile, "proxy-logfile", s.ProxyLogfile, "Path to the kube-proxy log file.")
fs.BoolVar(&s.ProxyBindall, "proxy-bindall", s.ProxyBindall, "When true will cause kube-proxy to bind to 0.0.0.0.")
fs.DurationVar(&s.SuicideTimeout, "suicide-timeout", s.SuicideTimeout, "Self-terminate after this period of inactivity. Zero disables suicide watch.")
fs.IntVar(&s.ShutdownFD, "shutdown-fd", s.ShutdownFD, "File descriptor used to signal shutdown to external watchers, requires shutdown-fifo flag")
fs.StringVar(&s.ShutdownFIFO, "shutdown-fifo", s.ShutdownFIFO, "FIFO used to signal shutdown to external watchers, requires shutdown-fd flag")
fs.StringVar(&s.cgroupPrefix, "cgroup-prefix", s.cgroupPrefix, "The cgroup prefix concatenated with MESOS_DIRECTORY must give the executor cgroup set by Mesos")
}
func (s *KubeletExecutorServer) AddStandaloneFlags(fs *pflag.FlagSet) {
s.addCoreFlags(fs)
fs.StringVar(&s.ProxyExec, "proxy-exec", s.ProxyExec, "Path to the kube-proxy executable.")
}
func (s *KubeletExecutorServer) AddHyperkubeFlags(fs *pflag.FlagSet) {
s.addCoreFlags(fs)
}
// returns a Closer that should be closed to signal impending shutdown, but only if ShutdownFD
// and ShutdownFIFO were specified. if they are specified, then this func blocks until there's
// a reader on the FIFO stream.
@ -429,11 +391,6 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
k := &kubeletExecutor{
Kubelet: klet,
runProxy: ks.RunProxy,
proxyLogV: ks.ProxyLogV,
proxyExec: ks.ProxyExec,
proxyLogfile: ks.ProxyLogfile,
proxyBindall: ks.ProxyBindall,
address: ks.Address,
dockerClient: kc.DockerClient,
hks: hks,
@ -468,11 +425,6 @@ type kubeletExecutor struct {
*kubelet.Kubelet
initialize sync.Once
driver bindings.ExecutorDriver
runProxy bool
proxyLogV int
proxyExec string
proxyLogfile string
proxyBindall bool
address util.IP
dockerClient dockertools.DockerInterface
hks hyperkube.Interface
@ -485,9 +437,6 @@ func (kl *kubeletExecutor) ListenAndServe(address net.IP, port uint, tlsOptions
// this func could be called many times, depending how often the HTTP server crashes,
// so only execute certain initialization procs once
kl.initialize.Do(func() {
if kl.runProxy {
go runtime.Until(kl.runProxyService, 5*time.Second, kl.executorDone)
}
go func() {
if _, err := kl.driver.Run(); err != nil {
log.Fatalf("executor driver failed: %v", err)
@ -499,103 +448,6 @@ func (kl *kubeletExecutor) ListenAndServe(address net.IP, port uint, tlsOptions
kubelet.ListenAndServeKubeletServer(kl, address, port, tlsOptions, enableDebuggingHandlers)
}
// this function blocks as long as the proxy service is running; intended to be
// executed asynchronously.
func (kl *kubeletExecutor) runProxyService() {
log.Infof("Starting proxy process...")
const KM_PROXY = "proxy" //TODO(jdef) constant should be shared with km package
args := []string{}
if kl.hks.FindServer(KM_PROXY) {
args = append(args, KM_PROXY)
log.V(1).Infof("attempting to using km proxy service")
} else if _, err := os.Stat(kl.proxyExec); os.IsNotExist(err) {
log.Errorf("failed to locate proxy executable at '%v' and km not present: %v", kl.proxyExec, err)
return
}
bindAddress := "0.0.0.0"
if !kl.proxyBindall {
bindAddress = kl.address.String()
}
args = append(args,
fmt.Sprintf("--bind-address=%s", bindAddress),
fmt.Sprintf("--v=%d", kl.proxyLogV),
"--logtostderr=true",
)
// add client.Config args here. proxy still calls client.BindClientConfigFlags
appendStringArg := func(name, value string) {
if value != "" {
args = append(args, fmt.Sprintf("--%s=%s", name, value))
}
}
appendStringArg("master", kl.clientConfig.Host)
/* TODO(jdef) move these flags to a config file pointed to by --kubeconfig
appendStringArg("api-version", kl.clientConfig.Version)
appendStringArg("client-certificate", kl.clientConfig.CertFile)
appendStringArg("client-key", kl.clientConfig.KeyFile)
appendStringArg("certificate-authority", kl.clientConfig.CAFile)
args = append(args, fmt.Sprintf("--insecure-skip-tls-verify=%t", kl.clientConfig.Insecure))
*/
log.Infof("Spawning process executable %s with args '%+v'", kl.proxyExec, args)
cmd := exec.Command(kl.proxyExec, args...)
if _, err := cmd.StdoutPipe(); err != nil {
log.Fatal(err)
}
proxylogs, err := cmd.StderrPipe()
if err != nil {
log.Fatal(err)
}
//TODO(jdef) append instead of truncate? what if the disk is full?
logfile, err := os.Create(kl.proxyLogfile)
if err != nil {
log.Fatal(err)
}
defer logfile.Close()
ch := make(chan struct{})
go func() {
defer func() {
select {
case <-ch:
log.Infof("killing proxy process..")
if err = cmd.Process.Kill(); err != nil {
log.Errorf("failed to kill proxy process: %v", err)
}
default:
}
}()
writer := bufio.NewWriter(logfile)
defer writer.Flush()
<-ch
written, err := io.Copy(writer, proxylogs)
if err != nil {
log.Errorf("error writing data to proxy log: %v", err)
}
log.Infof("wrote %d bytes to proxy log", written)
}()
// if the proxy fails to start then we exit the executor, otherwise
// wait for the proxy process to end (and release resources after).
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
close(ch)
if err := cmd.Wait(); err != nil {
log.Error(err)
}
}
// runs the main kubelet loop, closing the kubeletFinished chan when the loop exits.
// never returns.
func (kl *kubeletExecutor) Run(updates <-chan kubelet.PodUpdate) {

View File

@ -0,0 +1,26 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hyperkube
const (
CommandApiserver = "apiserver"
CommandControllerManager = "controller-manager"
CommandExecutor = "executor"
CommandMinion = "minion"
CommandProxy = "proxy"
CommandScheduler = "scheduler"
)

View File

@ -0,0 +1,31 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
)
const (
DefaultLogMaxBackups = 5 // how many backup to keep
DefaultLogMaxAgeInDays = 7 // after how many days to rotate at most
)
// DefaultLogMaxSize returns the maximal log file size before rotation
func DefaultLogMaxSize() resource.Quantity {
return *resource.NewQuantity(10*1024*1024, resource.BinarySI)
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config contains minion configuration constants.
package config

View File

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package minion contains the executor and proxy bootstrap code for a Mesos slave
package minion

View File

@ -0,0 +1,25 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package minion
import (
log "github.com/golang/glog"
)
func enterPrivateMountNamespace() {
log.Info("Skipping mount namespace, only available on Linux")
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package minion
import (
"syscall"
log "github.com/golang/glog"
)
// enterPrivateMountNamespace does just that: the current mount ns is unshared (isolated)
// and then made a slave to the root mount / of the parent mount ns (mount events from /
// or its children that happen in the parent NS propagate to us).
//
// this is not yet compatible with volume plugins as implemented by the kubelet, which
// depends on using host-volume args to 'docker run' to attach plugin volumes to CT's
// at runtime. as such, docker needs to be able to see the volumes mounted by k8s plugins,
// which is impossible if k8s volume plugins are running in an isolated mount ns.
//
// an alternative approach would be to always run the kubelet in the host's mount-ns and
// rely upon mesos to forcibly umount bindings in the task sandbox before rmdir'ing it:
// https://issues.apache.org/jira/browse/MESOS-349.
//
// use at your own risk.
func enterPrivateMountNamespace() {
log.Warningln("EXPERIMENTAL FEATURE: entering private mount ns")
// enter a new mount NS, useful for isolating changes to the mount table
// that are made by the kubelet for storage volumes.
err := syscall.Unshare(syscall.CLONE_NEWNS)
if err != nil {
log.Fatalf("failed to enter private mount NS: %v", err)
}
// make the rootfs / rslave to the parent mount NS so that we
// pick up on any changes made there
err = syscall.Mount("", "/", "dontcare", syscall.MS_REC|syscall.MS_SLAVE, "")
if err != nil {
log.Fatalf("failed to mark / rslave: %v", err)
}
}

View File

@ -0,0 +1,271 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package minion
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"time"
exservice "github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/executor/service"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/minion/config"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
log "github.com/golang/glog"
"github.com/kardianos/osext"
"github.com/spf13/pflag"
"gopkg.in/natefinch/lumberjack.v2"
)
type MinionServer struct {
// embed the executor server to be able to use its flags
// TODO(sttts): get rid of this mixing of the minion and the executor server with a multiflags implementation for km
KubeletExecutorServer *exservice.KubeletExecutorServer
privateMountNS bool
hks hyperkube.Interface
clientConfig *client.Config
kmBinary string
done chan struct{} // closed when shutting down
exit chan error // to signal fatal errors
logMaxSize resource.Quantity
logMaxBackups int
logMaxAgeInDays int
runProxy bool
proxyLogV int
proxyBindall bool
}
// NewMinionServer creates the MinionServer struct with default values to be used by hyperkube
func NewMinionServer() *MinionServer {
s := &MinionServer{
KubeletExecutorServer: exservice.NewKubeletExecutorServer(),
privateMountNS: false, // disabled until Docker supports customization of the parent mount namespace
done: make(chan struct{}),
exit: make(chan error),
logMaxSize: config.DefaultLogMaxSize(),
logMaxBackups: config.DefaultLogMaxBackups,
logMaxAgeInDays: config.DefaultLogMaxAgeInDays,
runProxy: true,
}
// cache this for later use
binary, err := osext.Executable()
if err != nil {
log.Fatalf("failed to determine currently running executable: %v", err)
}
s.kmBinary = binary
return s
}
// filterArgsByFlagSet returns a list of args which are parsed by the given flag set
// and another list with those which do not match
func filterArgsByFlagSet(args []string, flags *pflag.FlagSet) ([]string, []string) {
matched := []string{}
notMatched := []string{}
for _, arg := range args {
err := flags.Parse([]string{arg})
if err != nil {
notMatched = append(notMatched, arg)
} else {
matched = append(matched, arg)
}
}
return matched, notMatched
}
func (ms *MinionServer) launchProxyServer() {
bindAddress := "0.0.0.0"
if !ms.proxyBindall {
bindAddress = ms.KubeletExecutorServer.Address.String()
}
args := []string{
fmt.Sprintf("--bind-address=%s", bindAddress),
fmt.Sprintf("--v=%d", ms.proxyLogV),
"--logtostderr=true",
}
if ms.clientConfig.Host != "" {
args = append(args, fmt.Sprintf("--master=%s", ms.clientConfig.Host))
}
ms.launchHyperkubeServer(hyperkube.CommandProxy, &args, "proxy.log")
}
func (ms *MinionServer) launchExecutorServer() {
allArgs := os.Args[1:]
// filter out minion flags, leaving those for the executor
executorFlags := pflag.NewFlagSet("executor", pflag.ContinueOnError)
executorFlags.SetOutput(ioutil.Discard)
ms.AddExecutorFlags(executorFlags)
executorArgs, _ := filterArgsByFlagSet(allArgs, executorFlags)
// run executor and quit minion server when this exits cleanly
err := ms.launchHyperkubeServer(hyperkube.CommandExecutor, &executorArgs, "executor.log")
if err != nil {
// just return, executor will be restarted on error
log.Error(err)
return
}
log.Info("Executor exited cleanly, stopping the minion")
ms.exit <- nil
}
func (ms *MinionServer) launchHyperkubeServer(server string, args *[]string, logFileName string) error {
log.V(2).Infof("Spawning hyperkube %v with args '%+v'", server, args)
// prepare parameters
kmArgs := []string{server}
for _, arg := range *args {
kmArgs = append(kmArgs, arg)
}
// create command
cmd := exec.Command(ms.kmBinary, kmArgs...)
if _, err := cmd.StdoutPipe(); err != nil {
// fatal error => terminate minion
err = fmt.Errorf("error getting stdout of %v: %v", server, err)
ms.exit <- err
return err
}
stderrLogs, err := cmd.StderrPipe()
if err != nil {
// fatal error => terminate minion
err = fmt.Errorf("error getting stderr of %v: %v", server, err)
ms.exit <- err
return err
}
ch := make(chan struct{})
go func() {
defer func() {
select {
case <-ch:
log.Infof("killing %v process...", server)
if err = cmd.Process.Kill(); err != nil {
log.Errorf("failed to kill %v process: %v", server, err)
}
default:
}
}()
maxSize := ms.logMaxSize.Value()
if maxSize > 0 {
// convert to MB
maxSize = maxSize / 1024 / 1024
if maxSize == 0 {
log.Warning("maximal log file size is rounded to 1 MB")
maxSize = 1
}
}
writer := &lumberjack.Logger{
Filename: logFileName,
MaxSize: int(maxSize),
MaxBackups: ms.logMaxBackups,
MaxAge: ms.logMaxAgeInDays,
}
defer writer.Close()
log.V(2).Infof("Starting logging for %v: max log file size %d MB, keeping %d backups, for %d days", server, maxSize, ms.logMaxBackups, ms.logMaxAgeInDays)
<-ch
written, err := io.Copy(writer, stderrLogs)
if err != nil {
log.Errorf("error writing data to %v: %v", logFileName, err)
}
log.Infof("wrote %d bytes to %v", written, logFileName)
}()
// if the server fails to start then we exit the executor, otherwise
// wait for the proxy process to end (and release resources after).
if err := cmd.Start(); err != nil {
// fatal error => terminate minion
err = fmt.Errorf("error starting %v: %v", server, err)
ms.exit <- err
return err
}
close(ch)
if err := cmd.Wait(); err != nil {
log.Error("%v exited with error: %v", server, err)
err = fmt.Errorf("%v exited with error: %v", server, err)
return err
}
return nil
}
// runs the main kubelet loop, closing the kubeletFinished chan when the loop exits.
// never returns.
func (ms *MinionServer) Run(hks hyperkube.Interface, _ []string) error {
if ms.privateMountNS {
// only the Linux version will do anything
enterPrivateMountNamespace()
}
// create apiserver client
clientConfig, err := ms.KubeletExecutorServer.CreateAPIServerClientConfig()
if err != nil {
// required for k8sm since we need to send api.Binding information
// back to the apiserver
log.Fatalf("No API client: %v", err)
}
ms.clientConfig = clientConfig
// run subprocesses until ms.done is closed on return of this function
defer close(ms.done)
if ms.runProxy {
go runtime.Until(ms.launchProxyServer, 5*time.Second, ms.done)
}
go runtime.Until(ms.launchExecutorServer, 5*time.Second, ms.done)
// wait until minion exit is requested
// don't close ms.exit here to avoid panics of go routines writing an error to it
return <-ms.exit
}
func (ms *MinionServer) AddExecutorFlags(fs *pflag.FlagSet) {
ms.KubeletExecutorServer.AddFlags(fs)
}
func (ms *MinionServer) AddMinionFlags(fs *pflag.FlagSet) {
// general minion flags
fs.BoolVar(&ms.privateMountNS, "private-mountns", ms.privateMountNS, "Enter a private mount NS before spawning procs (linux only). Experimental, not yet compatible with k8s volumes.")
// log file flags
fs.Var(resource.NewQuantityFlagValue(&ms.logMaxSize), "max-log-size", "Maximum log file size for the executor and proxy before rotation")
fs.IntVar(&ms.logMaxAgeInDays, "max-log-age", ms.logMaxAgeInDays, "Maximum log file age of the executor and proxy in days")
fs.IntVar(&ms.logMaxBackups, "max-log-backups", ms.logMaxBackups, "Maximum log file backups of the executor and proxy to keep after rotation")
// proxy flags
fs.BoolVar(&ms.runProxy, "run-proxy", ms.runProxy, "Maintain a running kube-proxy instance as a child proc of this kubelet-executor.")
fs.IntVar(&ms.proxyLogV, "proxy-logv", ms.proxyLogV, "Log verbosity of the child kube-proxy.")
fs.BoolVar(&ms.proxyBindall, "proxy-bindall", ms.proxyBindall, "When true will cause kube-proxy to bind to 0.0.0.0.")
}

View File

@ -36,6 +36,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/election"
execcfg "github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/executor/config"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/hyperkube"
minioncfg "github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/minion/config"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/profile"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/scheduler"
@ -46,6 +47,7 @@ import (
mresource "github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/scheduler/resource"
"github.com/GoogleCloudPlatform/kubernetes/contrib/mesos/pkg/scheduler/uid"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
@ -79,29 +81,37 @@ const (
)
type SchedulerServer struct {
Port int
Address util.IP
EnableProfiling bool
AuthPath string
APIServerList util.StringList
EtcdServerList util.StringList
EtcdConfigFile string
AllowPrivileged bool
ExecutorPath string
ProxyPath string
MesosMaster string
MesosUser string
MesosRole string
MesosAuthPrincipal string
MesosAuthSecretFile string
Checkpoint bool
FailoverTimeout float64
ExecutorBindall bool
ExecutorRunProxy bool
ExecutorProxyBindall bool
ExecutorLogV int
ExecutorSuicideTimeout time.Duration
ExecutorCgroupPrefix string
Port int
Address util.IP
EnableProfiling bool
AuthPath string
APIServerList util.StringList
EtcdServerList util.StringList
EtcdConfigFile string
AllowPrivileged bool
ExecutorPath string
ProxyPath string
MesosMaster string
MesosUser string
MesosRole string
MesosAuthPrincipal string
MesosAuthSecretFile string
Checkpoint bool
FailoverTimeout float64
ExecutorLogV int
ExecutorBindall bool
ExecutorSuicideTimeout time.Duration
ExecutorCgroupPrefix string
RunProxy bool
ProxyBindall bool
ProxyLogV int
MinionLogMaxSize resource.Quantity
MinionLogMaxBackups int
MinionLogMaxAgeInDays int
MesosAuthProvider string
DriverPort uint
HostnameOverride string
@ -146,23 +156,29 @@ type schedulerProcessInterface interface {
// NewSchedulerServer creates a new SchedulerServer with default parameters
func NewSchedulerServer() *SchedulerServer {
s := SchedulerServer{
Port: ports.SchedulerPort,
Address: util.IP(net.ParseIP("127.0.0.1")),
FailoverTimeout: time.Duration((1 << 62) - 1).Seconds(),
ExecutorRunProxy: true,
Port: ports.SchedulerPort,
Address: util.IP(net.ParseIP("127.0.0.1")),
FailoverTimeout: time.Duration((1 << 62) - 1).Seconds(),
RunProxy: true,
ExecutorSuicideTimeout: execcfg.DefaultSuicideTimeout,
ExecutorCgroupPrefix: execcfg.DefaultCgroupPrefix,
MesosAuthProvider: sasl.ProviderName,
MesosMaster: defaultMesosMaster,
MesosUser: defaultMesosUser,
ReconcileInterval: defaultReconcileInterval,
ReconcileCooldown: defaultReconcileCooldown,
Checkpoint: true,
FrameworkName: defaultFrameworkName,
HA: false,
mux: http.NewServeMux(),
KubeletCadvisorPort: 4194, // copied from github.com/GoogleCloudPlatform/kubernetes/blob/release-0.14/cmd/kubelet/app/server.go
KubeletSyncFrequency: 10 * time.Second,
MinionLogMaxSize: minioncfg.DefaultLogMaxSize(),
MinionLogMaxBackups: minioncfg.DefaultLogMaxBackups,
MinionLogMaxAgeInDays: minioncfg.DefaultLogMaxAgeInDays,
MesosAuthProvider: sasl.ProviderName,
MesosMaster: defaultMesosMaster,
MesosUser: defaultMesosUser,
ReconcileInterval: defaultReconcileInterval,
ReconcileCooldown: defaultReconcileCooldown,
Checkpoint: true,
FrameworkName: defaultFrameworkName,
HA: false,
mux: http.NewServeMux(),
KubeletCadvisorPort: 4194, // copied from github.com/GoogleCloudPlatform/kubernetes/blob/release-0.14/cmd/kubelet/app/server.go
KubeletSyncFrequency: 10 * time.Second,
}
// cache this for later use. also useful in case the original binary gets deleted, e.g.
// during upgrades, development deployments, etc.
@ -211,13 +227,19 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
fs.Var(&s.DefaultContainerCPULimit, "default-container-cpu-limit", "Containers without a CPU resource limit are admitted this much CPU shares")
fs.Var(&s.DefaultContainerMemLimit, "default-container-mem-limit", "Containers without a memory resource limit are admitted this much amount of memory in MB")
fs.IntVar(&s.ExecutorLogV, "executor-logv", s.ExecutorLogV, "Logging verbosity of spawned minion and executor processes.")
fs.BoolVar(&s.ExecutorBindall, "executor-bindall", s.ExecutorBindall, "When true will set -address of the executor to 0.0.0.0.")
fs.IntVar(&s.ExecutorLogV, "executor-logv", s.ExecutorLogV, "Logging verbosity of spawned executor processes.")
fs.BoolVar(&s.ExecutorProxyBindall, "executor-proxy-bindall", s.ExecutorProxyBindall, "When true pass -proxy-bindall to the executor.")
fs.BoolVar(&s.ExecutorRunProxy, "executor-run-proxy", s.ExecutorRunProxy, "Run the kube-proxy as a child process of the executor.")
fs.DurationVar(&s.ExecutorSuicideTimeout, "executor-suicide-timeout", s.ExecutorSuicideTimeout, "Executor self-terminates after this period of inactivity. Zero disables suicide watch.")
fs.StringVar(&s.ExecutorCgroupPrefix, "executor-cgroup-prefix", s.ExecutorCgroupPrefix, "The cgroup prefix concatenated with MESOS_DIRECTORY must give the executor cgroup set by Mesos")
fs.BoolVar(&s.ProxyBindall, "proxy-bindall", s.ProxyBindall, "When true pass -proxy-bindall to the executor.")
fs.BoolVar(&s.RunProxy, "run-proxy", s.RunProxy, "Run the kube-proxy as a side process of the executor.")
fs.IntVar(&s.ProxyLogV, "proxy-logv", s.ProxyLogV, "Logging verbosity of spawned minion proxy processes.")
fs.Var(resource.NewQuantityFlagValue(&s.MinionLogMaxSize), "minion-max-log-size", "Maximum log file size for the executor and proxy before rotation")
fs.IntVar(&s.MinionLogMaxAgeInDays, "minion-max-log-age", s.MinionLogMaxAgeInDays, "Maximum log file age of the executor and proxy in days")
fs.IntVar(&s.MinionLogMaxBackups, "minion-max-log-backups", s.MinionLogMaxBackups, "Maximum log file backups of the executor and proxy to keep after rotation")
fs.StringVar(&s.KubeletRootDirectory, "kubelet-root-dir", s.KubeletRootDirectory, "Directory path for managing kubelet files (volume mounts,etc). Defaults to executor sandbox.")
fs.StringVar(&s.KubeletDockerEndpoint, "kubelet-docker-endpoint", s.KubeletDockerEndpoint, "If non-empty, kubelet will use this for the docker endpoint to communicate with.")
fs.StringVar(&s.KubeletPodInfraContainerImage, "kubelet-pod-infra-container-image", s.KubeletPodInfraContainerImage, "The image whose network/ipc namespaces containers in each pod will use.")
@ -233,7 +255,6 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
func (s *SchedulerServer) AddStandaloneFlags(fs *pflag.FlagSet) {
s.addCoreFlags(fs)
fs.StringVar(&s.ExecutorPath, "executor-path", s.ExecutorPath, "Location of the kubernetes executor executable")
fs.StringVar(&s.ProxyPath, "proxy-path", s.ProxyPath, "Location of the kubernetes proxy executable")
}
func (s *SchedulerServer) AddHyperkubeFlags(fs *pflag.FlagSet) {
@ -277,17 +298,11 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E
Shell: proto.Bool(false),
}
//TODO(jdef) these should be shared constants with km
const (
KM_EXECUTOR = "executor"
KM_PROXY = "proxy"
)
if s.ExecutorPath != "" {
uri, executorCmd := s.serveFrameworkArtifact(s.ExecutorPath)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Value = proto.String(fmt.Sprintf("./%s", executorCmd))
} else if !hks.FindServer(KM_EXECUTOR) {
} else if !hks.FindServer(hyperkube.CommandMinion) {
return nil, nil, fmt.Errorf("either run this scheduler via km or else --executor-path is required")
} else {
if strings.Index(s.KMPath, "://") > 0 {
@ -305,18 +320,16 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd))
}
ci.Arguments = append(ci.Arguments, KM_EXECUTOR)
}
ci.Arguments = append(ci.Arguments, hyperkube.CommandMinion)
if s.ProxyPath != "" {
uri, proxyCmd := s.serveFrameworkArtifact(s.ProxyPath)
ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)})
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-exec=./%s", proxyCmd))
} else if !hks.FindServer(KM_PROXY) {
return nil, nil, fmt.Errorf("either run this scheduler via km or else --proxy-path is required")
} else if s.ExecutorPath != "" {
return nil, nil, fmt.Errorf("proxy can only use km binary if executor does the same")
} // else, executor is smart enough to know when proxy-path is required, or to use km
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.RunProxy))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.ProxyBindall))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-logv=%d", s.ProxyLogV))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-size=%v", s.MinionLogMaxSize.String()))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-backups=%d", s.MinionLogMaxBackups))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-age=%d", s.MinionLogMaxAgeInDays))
}
//TODO(jdef): provide some way (env var?) for users to customize executor config
//TODO(jdef): set -address to 127.0.0.1 if `address` is 127.0.0.1
@ -324,7 +337,7 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E
apiServerArgs := strings.Join(s.APIServerList, ",")
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--api-servers=%s", apiServerArgs))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.ExecutorLogV))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.ExecutorLogV)) // this also applies to the minion
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--allow-privileged=%t", s.AllowPrivileged))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--suicide-timeout=%v", s.ExecutorSuicideTimeout))
@ -336,8 +349,6 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E
}
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cgroup-prefix=%v", s.ExecutorCgroupPrefix))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.ExecutorProxyBindall))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.ExecutorRunProxy))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.KubeletCadvisorPort))
ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.KubeletSyncFrequency))