mirror of https://github.com/k3s-io/k3s
Update more packages, tests, binaries for quantity
make etcd registry pass test fix kubelet config for quantity fix openstack for quantity fix controller for quantity fix last tests for quantity wire into binaries fix controller manager fix build for 32 bit systemspull/6/head
parent
0d628b3bff
commit
35f54addca
|
@ -22,20 +22,19 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
minionControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
||||
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/healthz"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/service"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag"
|
||||
|
@ -51,8 +50,9 @@ var (
|
|||
minionRegexp = flag.String("minion_regexp", "", "If non empty, and -cloud_provider is specified, a regular expression for matching minion VMs.")
|
||||
machineList util.StringList
|
||||
// TODO: Discover these by pinging the host machines, and rip out these flags.
|
||||
// TODO: in the meantime, use resource.QuantityFlag() instead of these
|
||||
nodeMilliCPU = flag.Int64("node_milli_cpu", 1000, "The amount of MilliCPU provisioned on each node")
|
||||
nodeMemory = flag.Int64("node_memory", 3*1024*1024*1024, "The amount of memory (in bytes) provisioned on each node")
|
||||
nodeMemory = resource.QuantityFlag("node_memory", "3Gi", "The amount of memory (in bytes) provisioned on each node")
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -90,18 +90,6 @@ func main() {
|
|||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
|
||||
if int64(int(*nodeMilliCPU)) != *nodeMilliCPU {
|
||||
glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d",
|
||||
*nodeMilliCPU, math.MaxInt32)
|
||||
*nodeMilliCPU = math.MaxInt32
|
||||
}
|
||||
|
||||
if int64(int(*nodeMemory)) != *nodeMemory {
|
||||
glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d",
|
||||
*nodeMemory, math.MaxInt32)
|
||||
*nodeMemory = math.MaxInt32
|
||||
}
|
||||
|
||||
go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil)
|
||||
|
||||
endpoints := service.NewEndpointController(kubeClient)
|
||||
|
@ -113,8 +101,8 @@ func main() {
|
|||
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
|
||||
nodeResources := &api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.NewIntOrStringFromInt(int(*nodeMilliCPU)),
|
||||
resources.Memory: util.NewIntOrStringFromInt(int(*nodeMemory)),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(*nodeMilliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *nodeMemory,
|
||||
},
|
||||
}
|
||||
minionController := minionControllerPkg.NewMinionController(cloud, *minionRegexp, machineList, nodeResources, kubeClient)
|
||||
|
|
|
@ -18,7 +18,6 @@ package latest
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
|
@ -167,7 +166,7 @@ func TestInternalRoundTrip(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(obj, actual) {
|
||||
if !internal.Semantic.DeepEqual(obj, actual) {
|
||||
t.Errorf("%s: diff %s", k, util.ObjectDiff(obj, actual))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ var (
|
|||
// The maximum value we can represent milli-units for.
|
||||
// Compare with the return value of Quantity.Value() to
|
||||
// see if it's safe to use Quantity.MilliValue().
|
||||
MaxMilliValue = ((1 << 63) - 1) / 1000
|
||||
MaxMilliValue = int64(((1 << 63) - 1) / 1000)
|
||||
)
|
||||
|
||||
// ParseQuantity turns str into a Quantity, or returns an error.
|
||||
|
|
|
@ -201,7 +201,7 @@ func TestMinionListConversionToNew(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if e, a := item.newML, got; !reflect.DeepEqual(e, a) {
|
||||
if e, a := item.newML, got; !newer.Semantic.DeepEqual(e, a) {
|
||||
t.Errorf("Expected: %#v, got %#v", e, a)
|
||||
}
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func TestMinionListConversionToOld(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if e, a := oldML, got; !reflect.DeepEqual(e, a) {
|
||||
if e, a := oldML, got; !newer.Semantic.DeepEqual(e, a) {
|
||||
t.Errorf("Expected: %#v, got %#v", e, a)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ import (
|
|||
"github.com/rackspace/gophercloud/pagination"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
|
@ -169,11 +169,11 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
|||
for _, flavor := range flavorList {
|
||||
rsrc := api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
"cpu": util.NewIntOrStringFromInt(flavor.VCPUs),
|
||||
"memory": util.NewIntOrStringFromString(fmt.Sprintf("%dMiB", flavor.RAM)),
|
||||
"openstack.org/disk": util.NewIntOrStringFromString(fmt.Sprintf("%dGB", flavor.Disk)),
|
||||
"openstack.org/rxTxFactor": util.NewIntOrStringFromInt(int(flavor.RxTxFactor * 1000)),
|
||||
"openstack.org/swap": util.NewIntOrStringFromString(fmt.Sprintf("%dMiB", flavor.Swap)),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(int64(flavor.VCPUs*1000), resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.Q(fmt.Sprintf("%dMi", flavor.RAM)),
|
||||
"openstack.org/disk": *resource.Q(fmt.Sprintf("%dG", flavor.Disk)),
|
||||
"openstack.org/rxTxFactor": *resource.NewQuantity(int64(flavor.RxTxFactor*1000), resource.DecimalSI),
|
||||
"openstack.org/swap": *resource.Q(fmt.Sprintf("%dMiB", flavor.Swap)),
|
||||
},
|
||||
}
|
||||
flavor_to_resource[flavor.ID] = &rsrc
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -233,7 +232,7 @@ func TestCreateReplica(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Unexpected error: %#v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(&expectedPod, actualPod) {
|
||||
if !api.Semantic.DeepEqual(&expectedPod, actualPod) {
|
||||
t.Logf("Body: %s", fakeHandler.RequestBody)
|
||||
t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod)
|
||||
}
|
||||
|
@ -345,7 +344,7 @@ func TestWatchControllers(t *testing.T) {
|
|||
var testControllerSpec api.ReplicationController
|
||||
received := make(chan struct{})
|
||||
manager.syncHandler = func(controllerSpec api.ReplicationController) error {
|
||||
if !reflect.DeepEqual(controllerSpec, testControllerSpec) {
|
||||
if !api.Semantic.DeepEqual(controllerSpec, testControllerSpec) {
|
||||
t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
|
||||
}
|
||||
close(received)
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
|
@ -65,13 +64,7 @@ func CreateValidPod(name, namespace, source string) api.BoundPod {
|
|||
}
|
||||
|
||||
func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.BoundPod) kubelet.PodUpdate {
|
||||
// We deliberately return an empty slice instead of a nil pointer here
|
||||
// because reflect.DeepEqual differentiates between the two and we need to
|
||||
// pick one for consistency.
|
||||
newPods := make([]api.BoundPod, len(pods))
|
||||
if len(pods) == 0 {
|
||||
return kubelet.PodUpdate{newPods, op, source}
|
||||
}
|
||||
for i := range pods {
|
||||
newPods[i] = pods[i]
|
||||
}
|
||||
|
@ -89,7 +82,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kube
|
|||
for i := range expected {
|
||||
update := <-ch
|
||||
sort.Sort(sortedPods(update.Pods))
|
||||
if !reflect.DeepEqual(expected[i], update) {
|
||||
if !api.Semantic.DeepEqual(expected[i], update) {
|
||||
t.Fatalf("Expected %#v, Got %#v", expected[i], update)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -130,7 +129,7 @@ func TestReadFromFile(t *testing.T) {
|
|||
Containers: []api.Container{{Image: "test/image", TerminationMessagePath: "/dev/termination-log"}},
|
||||
},
|
||||
})
|
||||
if !reflect.DeepEqual(expected, update) {
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
||||
|
@ -171,7 +170,7 @@ func TestExtractFromValidDataFile(t *testing.T) {
|
|||
}
|
||||
update := (<-ch).(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, expectedPod)
|
||||
if !reflect.DeepEqual(expected, update) {
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +191,7 @@ func TestExtractFromEmptyDir(t *testing.T) {
|
|||
|
||||
update := (<-ch).(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource)
|
||||
if !reflect.DeepEqual(expected, update) {
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +241,7 @@ func TestExtractFromDir(t *testing.T) {
|
|||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, pods...)
|
||||
sort.Sort(sortedPods(update.Pods))
|
||||
sort.Sort(sortedPods(expected.Pods))
|
||||
if !reflect.DeepEqual(expected, update) {
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
for i := range update.Pods {
|
||||
|
|
|
@ -19,7 +19,6 @@ package config
|
|||
import (
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -193,7 +192,7 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
update := (<-ch).(kubelet.PodUpdate)
|
||||
if !reflect.DeepEqual(testCase.expected, update) {
|
||||
if !api.Semantic.DeepEqual(testCase.expected, update) {
|
||||
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
||||
}
|
||||
for i := range update.Pods {
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -136,7 +135,7 @@ func TestControllerDecode(t *testing.T) {
|
|||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(controller, controllerOut) {
|
||||
if !api.Semantic.DeepEqual(controller, controllerOut) {
|
||||
t.Errorf("Expected %#v, found %#v", controller, controllerOut)
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +207,7 @@ func TestControllerParsing(t *testing.T) {
|
|||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(controller, expectedController) {
|
||||
if !api.Semantic.DeepEqual(controller, expectedController) {
|
||||
t.Errorf("Parsing failed: %s %#v %#v", string(data), controller, expectedController)
|
||||
}
|
||||
}
|
||||
|
@ -375,7 +374,7 @@ func TestFillCurrentState(t *testing.T) {
|
|||
if controller.Status.Replicas != 2 {
|
||||
t.Errorf("expected 2, got: %d", controller.Status.Replicas)
|
||||
}
|
||||
if !reflect.DeepEqual(fakeLister.s, labels.Set(controller.Spec.Selector).AsSelector()) {
|
||||
if !api.Semantic.DeepEqual(fakeLister.s, labels.Set(controller.Spec.Selector).AsSelector()) {
|
||||
t.Errorf("unexpected output: %#v %#v", labels.Set(controller.Spec.Selector).AsSelector(), fakeLister.s)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -448,7 +448,7 @@ func TestEtcdUpdatePodNotScheduled(t *testing.T) {
|
|||
}
|
||||
var podOut api.Pod
|
||||
latest.Codec.DecodeInto([]byte(response.Node.Value), &podOut)
|
||||
if !reflect.DeepEqual(podOut, podIn) {
|
||||
if !api.Semantic.DeepEqual(podOut, podIn) {
|
||||
t.Errorf("expected: %v, got: %v", podOut, podIn)
|
||||
}
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ func TestEtcdUpdatePodScheduled(t *testing.T) {
|
|||
}
|
||||
var podOut api.Pod
|
||||
latest.Codec.DecodeInto([]byte(response.Node.Value), &podOut)
|
||||
if !reflect.DeepEqual(podOut, podIn) {
|
||||
if !api.Semantic.DeepEqual(podOut, podIn) {
|
||||
t.Errorf("expected: %#v, got: %#v", podOut, podIn)
|
||||
}
|
||||
|
||||
|
@ -542,7 +542,7 @@ func TestEtcdUpdatePodScheduled(t *testing.T) {
|
|||
t.Fatalf("unexpected error decoding response: %v", err)
|
||||
}
|
||||
|
||||
if len(list.Items) != 2 || !reflect.DeepEqual(list.Items[0].Spec, podIn.Spec) {
|
||||
if len(list.Items) != 2 || !api.Semantic.DeepEqual(list.Items[0].Spec, podIn.Spec) {
|
||||
t.Errorf("unexpected container list: %d\n items[0] - %#v\n podin.spec - %#v\n", len(list.Items), list.Items[0].Spec, podIn.Spec)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,12 +18,12 @@ package standalone
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
minionControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
||||
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/service"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
|
@ -99,22 +98,10 @@ func RunScheduler(cl *client.Client) {
|
|||
|
||||
// RunControllerManager starts a controller
|
||||
func RunControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, nodeMemory int64) {
|
||||
if int64(int(nodeMilliCPU)) != nodeMilliCPU {
|
||||
glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d",
|
||||
nodeMilliCPU, math.MaxInt32)
|
||||
nodeMilliCPU = math.MaxInt32
|
||||
}
|
||||
|
||||
if int64(int(nodeMemory)) != nodeMemory {
|
||||
glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d",
|
||||
nodeMemory, math.MaxInt32)
|
||||
nodeMemory = math.MaxInt32
|
||||
}
|
||||
|
||||
nodeResources := &api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.NewIntOrStringFromInt(int(nodeMilliCPU)),
|
||||
resources.Memory: util.NewIntOrStringFromInt(int(nodeMemory)),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(nodeMilliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(nodeMemory, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
minionController := minionControllerPkg.NewMinionController(nil, "", machineList, nodeResources, cl)
|
||||
|
|
Loading…
Reference in New Issue