mirror of https://github.com/k3s-io/k3s
Merge pull request #50738 from sttts/sttts-deepcopy-calls-controllers
Automatic merge from submit-queue (batch tested with PRs 49961, 50005, 50738, 51045, 49927) controllers: simplify deepcopy callspull/6/head
commit
b4a2c09ede
|
@ -54,7 +54,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -179,11 +178,7 @@ func (e *BootstrapSigner) signConfigMap() {
|
|||
|
||||
var needUpdate = false
|
||||
|
||||
newCM, err := copyConfigMap(origCM)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
newCM := origCM.DeepCopy()
|
||||
|
||||
// First capture the config we are signing
|
||||
content, ok := newCM.Data[bootstrapapi.KubeConfigKey]
|
||||
|
@ -290,11 +285,3 @@ func (e *BootstrapSigner) getTokens() map[string]string {
|
|||
|
||||
return ret
|
||||
}
|
||||
|
||||
func copyConfigMap(orig *v1.ConfigMap) (*v1.ConfigMap, error) {
|
||||
newCMObj, err := scheme.Scheme.DeepCopy(orig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCMObj.(*v1.ConfigMap), nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
certificateslisters "k8s.io/client-go/listers/certificates/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -179,11 +178,7 @@ func (cc *CertificateController) syncFunc(key string) error {
|
|||
}
|
||||
|
||||
// need to operate on a copy so we don't mutate the csr in the shared cache
|
||||
copy, err := scheme.Scheme.DeepCopy(csr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
csr = copy.(*certificates.CertificateSigningRequest)
|
||||
csr = csr.DeepCopy()
|
||||
|
||||
return cc.handler(csr)
|
||||
}
|
||||
|
|
|
@ -178,12 +178,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
|||
}
|
||||
nodeAddresses = []v1.NodeAddress{*nodeIP}
|
||||
}
|
||||
nodeCopy, err := scheme.Scheme.DeepCopy(node)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to copy node to a new object")
|
||||
return
|
||||
}
|
||||
newNode := nodeCopy.(*v1.Node)
|
||||
newNode := node.DeepCopy()
|
||||
newNode.Status.Addresses = nodeAddresses
|
||||
if !nodeAddressesChangeDetected(node.Status.Addresses, newNode.Status.Addresses) {
|
||||
return
|
||||
|
|
|
@ -559,11 +559,7 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
|
|||
if controllerRef != nil {
|
||||
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
|
||||
}
|
||||
clone, err := scheme.Scheme.DeepCopy(&template.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.Spec = *clone.(*v1.PodSpec)
|
||||
pod.Spec = *template.Spec.DeepCopy()
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
|
@ -991,18 +987,11 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
|||
}
|
||||
|
||||
newTaints := newNode.Spec.Taints
|
||||
objCopy, err := scheme.Scheme.DeepCopy(oldNode)
|
||||
newNodeClone := oldNode.DeepCopy()
|
||||
newNodeClone.Spec.Taints = newTaints
|
||||
newData, err := json.Marshal(newNodeClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy node object %#v: %v", oldNode, err)
|
||||
}
|
||||
newNode, ok := (objCopy).(*v1.Node)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to cast copy onto node object %#v: %v", newNode, err)
|
||||
}
|
||||
newNode.Spec.Taints = newTaints
|
||||
newData, err := json.Marshal(newNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err)
|
||||
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
|
|
|
@ -967,12 +967,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
|||
return nil
|
||||
}
|
||||
|
||||
clone, err := scheme.Scheme.DeepCopy(ds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
toUpdate := clone.(*extensions.DaemonSet)
|
||||
toUpdate := ds.DeepCopy()
|
||||
|
||||
var updateErr, getErr error
|
||||
for i := 0; i < StatusUpdateRetries; i++ {
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
|
@ -93,12 +92,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
|||
// Add the unique label if it's not already added to the history
|
||||
// We use history name instead of computing hash, so that we don't need to worry about hash collision
|
||||
if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
var clone interface{}
|
||||
clone, err = scheme.Scheme.DeepCopy(history)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
toUpdate := clone.(*apps.ControllerRevision)
|
||||
toUpdate := history.DeepCopy()
|
||||
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
|
@ -133,12 +127,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
|||
}
|
||||
// Update revision number if necessary
|
||||
if cur.Revision < currRevision {
|
||||
var clone interface{}
|
||||
clone, err = scheme.Scheme.DeepCopy(cur)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
toUpdate := clone.(*apps.ControllerRevision)
|
||||
toUpdate := cur.DeepCopy()
|
||||
toUpdate.Revision = currRevision
|
||||
_, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
|
@ -233,11 +222,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
|
|||
}
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] {
|
||||
clone, err := scheme.Scheme.DeepCopy(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toUpdate := clone.(*v1.Pod)
|
||||
toUpdate := pod.DeepCopy()
|
||||
if toUpdate.Labels == nil {
|
||||
toUpdate.Labels = make(map[string]string)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ go_library(
|
|||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
|
@ -36,8 +35,7 @@ import (
|
|||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec {
|
||||
obj, _ := scheme.Scheme.DeepCopy(template)
|
||||
newTemplate := obj.(v1.PodTemplateSpec)
|
||||
newTemplate := *template.DeepCopy()
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
|
|
|
@ -578,10 +578,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
|||
|
||||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
d, err := util.DeploymentDeepCopy(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d := deployment.DeepCopy()
|
||||
|
||||
everything := metav1.LabelSelector{}
|
||||
if reflect.DeepEqual(d.Spec.Selector, &everything) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
|
@ -53,12 +52,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
|
|||
rs := newReplicaSet(test.d, fmt.Sprintf("%s-%d", test.d.Name, n), size)
|
||||
oldRSs = append(oldRSs, rs)
|
||||
|
||||
objCopy, err := api.Scheme.Copy(rs)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error while deep-copying: %v", err)
|
||||
continue
|
||||
}
|
||||
rsCopy := objCopy.(*extensions.ReplicaSet)
|
||||
rsCopy := rs.DeepCopy()
|
||||
|
||||
zero := int32(0)
|
||||
rsCopy.Spec.Replicas = &zero
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
|
@ -250,11 +249,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
// and maxReplicas) and also update the revision annotation in the deployment with the
|
||||
// latest revision.
|
||||
if existingNewRS != nil {
|
||||
objCopy, err := scheme.Scheme.Copy(existingNewRS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rsCopy := objCopy.(*extensions.ReplicaSet)
|
||||
rsCopy := existingNewRS.DeepCopy()
|
||||
|
||||
// Set existing new replica set's annotation
|
||||
annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(d, rsCopy, newRevision, true)
|
||||
|
@ -290,11 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
|||
}
|
||||
|
||||
// new ReplicaSet does not exist, create one.
|
||||
templateCopy, err := scheme.Scheme.DeepCopy(d.Spec.Template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newRSTemplate := templateCopy.(v1.PodTemplateSpec)
|
||||
newRSTemplate := *d.Spec.Template.DeepCopy()
|
||||
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
|
||||
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
// Add podTemplateHash label to selector.
|
||||
|
@ -513,11 +504,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep
|
|||
}
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) {
|
||||
objCopy, err := scheme.Scheme.Copy(rs)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
rsCopy := objCopy.(*extensions.ReplicaSet)
|
||||
rsCopy := rs.DeepCopy()
|
||||
|
||||
sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale
|
||||
// TODO: Do not mutate the replica set here, instead simply compare the annotation and if they mismatch
|
||||
|
@ -526,6 +513,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
|
|||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
|
||||
scaled := false
|
||||
var err error
|
||||
if sizeNeedsUpdate || annotationsNeedUpdate {
|
||||
*(rsCopy.Spec.Replicas) = newScale
|
||||
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
|
|
|
@ -29,7 +29,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
|
@ -58,7 +57,6 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
|
@ -649,16 +648,8 @@ func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet
|
|||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// Note that we assume input podTemplateSpecs contain non-empty labels
|
||||
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) (bool, error) {
|
||||
cp, err := scheme.Scheme.DeepCopy(template1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
t1Copy := cp.(*v1.PodTemplateSpec)
|
||||
cp, err = scheme.Scheme.DeepCopy(template2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
t2Copy := cp.(*v1.PodTemplateSpec)
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
// First, compare template.Labels (ignoring hash)
|
||||
labels1, labels2 := t1Copy.Labels, t2Copy.Labels
|
||||
if len(labels1) > len(labels2) {
|
||||
|
@ -990,15 +981,3 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired
|
|||
|
||||
return int32(surge), int32(unavailable), nil
|
||||
}
|
||||
|
||||
func DeploymentDeepCopy(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
objCopy, err := scheme.Scheme.DeepCopy(deployment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copied, ok := objCopy.(*extensions.Deployment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected Deployment, got %#v", objCopy)
|
||||
}
|
||||
return copied, nil
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
core "k8s.io/client-go/testing"
|
||||
apiv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
@ -185,8 +184,7 @@ func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
|
|||
|
||||
// generateRS creates a replica set, with the input deployment's template as its template
|
||||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
cp, _ := scheme.Scheme.DeepCopy(deployment.Spec.Template)
|
||||
template := cp.(v1.PodTemplateSpec)
|
||||
template := deployment.Spec.Template.DeepCopy()
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: randomUID(),
|
||||
|
@ -196,7 +194,7 @@ func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
|||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: new(int32),
|
||||
Template: template,
|
||||
Template: *template,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: template.Labels},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
@ -42,11 +41,7 @@ func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.P
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, deepCopyErr := scheme.Scheme.DeepCopy(pod)
|
||||
if deepCopyErr != nil {
|
||||
return deepCopyErr
|
||||
}
|
||||
pod = obj.(*v1.Pod)
|
||||
pod = pod.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(pod); applyErr != nil {
|
||||
return applyErr
|
||||
|
|
|
@ -21,10 +21,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
@ -47,11 +45,7 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsL
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, deepCopyErr := scheme.Scheme.DeepCopy(rs)
|
||||
if deepCopyErr != nil {
|
||||
return deepCopyErr
|
||||
}
|
||||
rs = obj.(*extensions.ReplicaSet)
|
||||
rs = rs.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(rs); applyErr != nil {
|
||||
return applyErr
|
||||
|
@ -71,11 +65,7 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsL
|
|||
|
||||
// GetReplicaSetHash returns the pod template hash of a ReplicaSet's pod template space
|
||||
func GetReplicaSetHash(rs *extensions.ReplicaSet, uniquifier *int32) (string, error) {
|
||||
template, err := scheme.Scheme.DeepCopy(rs.Spec.Template)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rsTemplate := template.(v1.PodTemplateSpec)
|
||||
rsTemplate := rs.Spec.Template.DeepCopy()
|
||||
rsTemplate.Labels = labelsutil.CloneAndRemoveLabel(rsTemplate.Labels, extensions.DefaultDeploymentUniqueLabelKey)
|
||||
return fmt.Sprintf("%d", controller.ComputeHash(&rsTemplate, uniquifier)), nil
|
||||
return fmt.Sprintf("%d", controller.ComputeHash(rsTemplate, uniquifier)), nil
|
||||
}
|
||||
|
|
|
@ -58,7 +58,6 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
|
|
|
@ -676,13 +676,8 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy
|
|||
// this field correctly, we will prevent the /evict handler from approving an
|
||||
// eviction when it may be unsafe to do so.
|
||||
func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error {
|
||||
obj, err := scheme.Scheme.DeepCopy(pdb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newPdb := obj.(*policy.PodDisruptionBudget)
|
||||
newPdb := pdb.DeepCopy()
|
||||
newPdb.Status.PodDisruptionsAllowed = 0
|
||||
|
||||
return dc.getUpdater()(newPdb)
|
||||
}
|
||||
|
||||
|
@ -707,12 +702,7 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget,
|
|||
return nil
|
||||
}
|
||||
|
||||
obj, err := scheme.Scheme.DeepCopy(pdb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newPdb := obj.(*policy.PodDisruptionBudget)
|
||||
|
||||
newPdb := pdb.DeepCopy()
|
||||
newPdb.Status = policy.PodDisruptionBudgetStatus{
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -50,12 +49,7 @@ func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, err := scheme.Scheme.DeepCopy(pdb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*ps)[key] = *obj.(*policy.PodDisruptionBudget)
|
||||
|
||||
(*ps)[key] = *pdb.DeepCopy()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
@ -499,11 +498,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
return nil
|
||||
}
|
||||
copy, err := scheme.Scheme.DeepCopy(currentEndpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newEndpoints := copy.(*v1.Endpoints)
|
||||
newEndpoints := currentEndpoints.DeepCopy()
|
||||
newEndpoints.Subsets = subsets
|
||||
newEndpoints.Labels = service.Labels
|
||||
if newEndpoints.Annotations == nil {
|
||||
|
|
|
@ -43,7 +43,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
@ -243,11 +242,7 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
|
|||
}
|
||||
|
||||
// Clone the input
|
||||
any, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := any.(*apps.ControllerRevision)
|
||||
clone := revision.DeepCopy()
|
||||
|
||||
// Continue to attempt to create the revision updating the name with a new hash on each iteration
|
||||
for {
|
||||
|
@ -264,12 +259,8 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
|
|||
}
|
||||
|
||||
func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevision, newRevision int64) (*apps.ControllerRevision, error) {
|
||||
obj, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := obj.(*apps.ControllerRevision)
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
clone := revision.DeepCopy()
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if clone.Revision == newRevision {
|
||||
return nil
|
||||
}
|
||||
|
@ -283,11 +274,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
|||
}
|
||||
if updated, err := rh.lister.ControllerRevisions(clone.Namespace).Get(clone.Name); err == nil {
|
||||
// make a copy so we don't mutate the shared cache
|
||||
obj, err := scheme.Scheme.DeepCopy(updated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clone = obj.(*apps.ControllerRevision)
|
||||
clone = updated.DeepCopy()
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
@ -375,11 +362,7 @@ func (fh *fakeHistory) CreateControllerRevision(parent metav1.Object, revision *
|
|||
}
|
||||
|
||||
// Clone the input
|
||||
any, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := any.(*apps.ControllerRevision)
|
||||
clone := revision.DeepCopy()
|
||||
clone.Namespace = parent.GetNamespace()
|
||||
|
||||
// Continue to attempt to create the revision updating the name with a new hash on each iteration
|
||||
|
@ -412,11 +395,7 @@ func (fh *fakeHistory) DeleteControllerRevision(revision *apps.ControllerRevisio
|
|||
}
|
||||
|
||||
func (fh *fakeHistory) UpdateControllerRevision(revision *apps.ControllerRevision, newRevision int64) (*apps.ControllerRevision, error) {
|
||||
obj, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := obj.(*apps.ControllerRevision)
|
||||
clone := revision.DeepCopy()
|
||||
clone.Revision = newRevision
|
||||
return clone, fh.indexer.Update(clone)
|
||||
}
|
||||
|
@ -438,11 +417,7 @@ func (fh *fakeHistory) AdoptControllerRevision(parent metav1.Object, parentKind
|
|||
if !found {
|
||||
return nil, errors.NewNotFound(apps.Resource("controllerrevisions"), revision.Name)
|
||||
}
|
||||
obj2, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := obj2.(*apps.ControllerRevision)
|
||||
clone := revision.DeepCopy()
|
||||
clone.OwnerReferences = append(clone.OwnerReferences, metav1.OwnerReference{
|
||||
APIVersion: parentKind.GroupVersion().String(),
|
||||
Kind: parentKind.Kind,
|
||||
|
@ -467,11 +442,7 @@ func (fh *fakeHistory) ReleaseControllerRevision(parent metav1.Object, revision
|
|||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
obj2, err := scheme.Scheme.DeepCopy(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := obj2.(*apps.ControllerRevision)
|
||||
clone := revision.DeepCopy()
|
||||
refs := clone.OwnerReferences
|
||||
clone.OwnerReferences = nil
|
||||
for i := range refs {
|
||||
|
|
|
@ -559,12 +559,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||
var gracePeriod time.Duration
|
||||
var observedReadyCondition v1.NodeCondition
|
||||
var currentReadyCondition *v1.NodeCondition
|
||||
nodeCopy, err := scheme.Scheme.DeepCopy(nodes[i])
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
continue
|
||||
}
|
||||
node := nodeCopy.(*v1.Node)
|
||||
node := nodes[i].DeepCopy()
|
||||
if err := wait.PollImmediate(retrySleepTime, retrySleepTime*scheduler.NodeStatusUpdateRetry, func() (bool, error) {
|
||||
gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node)
|
||||
if err == nil {
|
||||
|
|
|
@ -24,7 +24,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
|
|
|
@ -27,8 +27,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
|
@ -287,13 +285,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintToAdd, taintTo
|
|||
// CreateAddNodeHandler creates an add node handler.
|
||||
func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) {
|
||||
return func(originalObj interface{}) {
|
||||
obj, err := scheme.Scheme.DeepCopy(originalObj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
node := obj.(*v1.Node)
|
||||
|
||||
node := originalObj.(*v1.Node).DeepCopy()
|
||||
if err := f(node); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error while processing Node Delete: %v", err))
|
||||
}
|
||||
|
@ -303,18 +295,8 @@ func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) {
|
|||
// CreateUpdateNodeHandler creates a node update handler.
|
||||
func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldObj, newObj interface{}) {
|
||||
return func(origOldObj, origNewObj interface{}) {
|
||||
oldObj, err := scheme.Scheme.DeepCopy(origOldObj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
newObj, err := scheme.Scheme.DeepCopy(origNewObj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
node := newObj.(*v1.Node)
|
||||
prevNode := oldObj.(*v1.Node)
|
||||
node := origNewObj.(*v1.Node).DeepCopy()
|
||||
prevNode := origOldObj.(*v1.Node).DeepCopy()
|
||||
|
||||
if err := f(prevNode, node); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
|
||||
|
@ -325,28 +307,22 @@ func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldOb
|
|||
// CreateDeleteNodeHandler creates a delete node handler.
|
||||
func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{}) {
|
||||
return func(originalObj interface{}) {
|
||||
obj, err := scheme.Scheme.DeepCopy(originalObj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
node, isNode := obj.(*v1.Node)
|
||||
originalNode, isNode := originalObj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here and
|
||||
// we need to handle that correctly. #34692
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
deletedState, ok := originalObj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
glog.Errorf("Received unexpected object: %v", originalObj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
originalNode, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
node := originalNode.DeepCopy()
|
||||
if err := f(node); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
|
||||
}
|
||||
|
|
|
@ -358,26 +358,15 @@ func (a *HorizontalController) reconcileKey(key string) error {
|
|||
|
||||
func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler) error {
|
||||
// make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
|
||||
hpav1Raw, err := scheme.Scheme.DeepCopy(hpav1Shared)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpav1Shared, v1.EventTypeWarning, "FailedConvertHPA", err.Error())
|
||||
return fmt.Errorf("failed to deep-copy the HPA: %v", err)
|
||||
}
|
||||
|
||||
hpav1 := hpav1Shared.DeepCopy()
|
||||
// then, convert to autoscaling/v2, which makes our lives easier when calculating metrics
|
||||
hpav1 := hpav1Raw.(*autoscalingv1.HorizontalPodAutoscaler)
|
||||
hpaRaw, err := UnsafeConvertToVersionVia(hpav1, autoscalingv2.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpav1, v1.EventTypeWarning, "FailedConvertHPA", err.Error())
|
||||
return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err)
|
||||
}
|
||||
hpa := hpaRaw.(*autoscalingv2.HorizontalPodAutoscaler)
|
||||
hpaStatusOriginalRaw, err := scheme.Scheme.DeepCopy(&hpa.Status)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpav1Shared, v1.EventTypeWarning, "FailedConvertHPA", err.Error())
|
||||
return fmt.Errorf("failed to deep-copy the HPA status: %v", err)
|
||||
}
|
||||
hpaStatusOriginal := hpaStatusOriginalRaw.(*autoscalingv2.HorizontalPodAutoscalerStatus)
|
||||
hpaStatusOriginal := hpa.Status.DeepCopy()
|
||||
|
||||
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
|
||||
|
||||
|
|
|
@ -606,11 +606,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
|||
manageReplicasErr = rsc.manageReplicas(filteredPods, rs)
|
||||
}
|
||||
|
||||
copy, err := scheme.Scheme.DeepCopy(rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs = copy.(*extensions.ReplicaSet)
|
||||
rs = rs.DeepCopy()
|
||||
|
||||
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
|
||||
|
||||
|
|
|
@ -621,11 +621,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
|||
}
|
||||
trace.Step("manageReplicas done")
|
||||
|
||||
copy, err := scheme.Scheme.DeepCopy(rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc = copy.(*v1.ReplicationController)
|
||||
rc = rc.DeepCopy()
|
||||
|
||||
newStatus := calculateStatus(rc, filteredPods, manageReplicasErr)
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -299,11 +299,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S
|
|||
// TODO: Be careful here ... what if there were other changes to the service?
|
||||
if !v1helper.LoadBalancerStatusEqual(previousState, newState) {
|
||||
// Make a copy so we don't mutate the shared informer cache
|
||||
copy, err := scheme.Scheme.DeepCopy(service)
|
||||
if err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
service = copy.(*v1.Service)
|
||||
service = service.DeepCopy()
|
||||
|
||||
// Update the status on the copy
|
||||
service.Status.LoadBalancer = *newState
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
|
@ -367,11 +366,7 @@ func TestProcessServiceUpdate(t *testing.T) {
|
|||
t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot.(string))
|
||||
}
|
||||
|
||||
copy, err := scheme.Scheme.DeepCopy(svc)
|
||||
if err != nil {
|
||||
t.Fatalf("copy service error: %v", err)
|
||||
}
|
||||
newService := copy.(*v1.Service)
|
||||
newService := svc.DeepCopy()
|
||||
|
||||
newService.Spec.LoadBalancerIP = newLBIP
|
||||
return newService
|
||||
|
|
|
@ -16,7 +16,6 @@ go_library(
|
|||
"stateful_set_utils.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/history:go_default_library",
|
||||
|
@ -56,7 +55,6 @@ go_test(
|
|||
],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps/install:go_default_library",
|
||||
|
@ -81,7 +79,6 @@ go_test(
|
|||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
@ -121,11 +120,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
|
|||
|
||||
if updated, err := spc.podLister.Pods(set.Namespace).Get(pod.Name); err == nil {
|
||||
// make a copy so we don't mutate the shared cache
|
||||
if copy, err := scheme.Scheme.DeepCopy(updated); err == nil {
|
||||
pod = copy.(*v1.Pod)
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error copying updated Pod: %v", err))
|
||||
}
|
||||
pod = updated.DeepCopy()
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s from lister: %v", set.Namespace, pod.Name, err))
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/history"
|
||||
|
||||
|
@ -451,11 +450,7 @@ func (ssc *StatefulSetController) sync(key string) error {
|
|||
func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error {
|
||||
glog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
|
||||
// TODO: investigate where we mutate the set during the update as it is not obvious.
|
||||
setCopy, err := api.Scheme.DeepCopy(set)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ssc.control.UpdateStatefulSet(setCopy.(*apps.StatefulSet), pods); err != nil {
|
||||
if err := ssc.control.UpdateStatefulSet(set.DeepCopy(), pods); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name)
|
||||
|
|
|
@ -20,13 +20,12 @@ import (
|
|||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/controller/history"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// StatefulSetControl implements the control logic for updating StatefulSets and their children Pods. It is implemented
|
||||
|
@ -433,11 +432,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
|||
continue
|
||||
}
|
||||
// Make a deep copy so we don't mutate the shared cache
|
||||
copy, err := scheme.Scheme.DeepCopy(replicas[i])
|
||||
if err != nil {
|
||||
return &status, err
|
||||
}
|
||||
replica := copy.(*v1.Pod)
|
||||
replica := replicas[i].DeepCopy()
|
||||
if err := ssc.podControl.UpdateStatefulPod(updateSet, replica); err != nil {
|
||||
return &status, err
|
||||
}
|
||||
|
@ -543,11 +538,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSetStatus(
|
|||
}
|
||||
|
||||
// copy set and update its status
|
||||
obj, err := scheme.Scheme.Copy(set)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
set = obj.(*apps.StatefulSet)
|
||||
set = set.DeepCopy()
|
||||
if err := ssc.statusUpdater.UpdateStatefulSetStatus(set, status); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -41,11 +41,9 @@ import (
|
|||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/history"
|
||||
"k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme"
|
||||
)
|
||||
|
||||
type invariantFunc func(set *apps.StatefulSet, spc *fakeStatefulPodControl) error
|
||||
|
@ -502,11 +500,7 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) {
|
|||
}
|
||||
|
||||
updateRevision := func(cr *apps.ControllerRevision, revision int64) *apps.ControllerRevision {
|
||||
obj, err := scheme.Scheme.DeepCopy(cr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clone := obj.(*apps.ControllerRevision)
|
||||
clone := cr.DeepCopy()
|
||||
clone.Revision = revision
|
||||
return clone
|
||||
}
|
||||
|
@ -514,12 +508,12 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) {
|
|||
set := newStatefulSet(3)
|
||||
set.Status.CollisionCount = new(int32)
|
||||
rev0 := newRevisionOrDie(set, 1)
|
||||
set1 := copySet(set)
|
||||
set1 := set.DeepCopy()
|
||||
set1.Spec.Template.Spec.Containers[0].Image = "foo"
|
||||
set1.Status.CurrentRevision = rev0.Name
|
||||
set1.Status.CollisionCount = new(int32)
|
||||
rev1 := newRevisionOrDie(set1, 2)
|
||||
set2 := copySet(set1)
|
||||
set2 := set1.DeepCopy()
|
||||
set2.Spec.Template.Labels["new"] = "label"
|
||||
set2.Status.CurrentRevision = rev0.Name
|
||||
set2.Status.CollisionCount = new(int32)
|
||||
|
@ -1549,22 +1543,6 @@ func (spc *fakeStatefulPodControl) SetDeleteStatefulPodError(err error, after in
|
|||
spc.deletePodTracker.after = after
|
||||
}
|
||||
|
||||
func copyPod(pod *v1.Pod) *v1.Pod {
|
||||
obj, err := api.Scheme.Copy(pod)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj.(*v1.Pod)
|
||||
}
|
||||
|
||||
func copySet(set *apps.StatefulSet) *apps.StatefulSet {
|
||||
obj, err := scheme.Scheme.Copy(set)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj.(*apps.StatefulSet)
|
||||
}
|
||||
|
||||
func (spc *fakeStatefulPodControl) setPodPending(set *apps.StatefulSet, ordinal int) ([]*v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
|
||||
if err != nil {
|
||||
|
@ -1578,7 +1556,7 @@ func (spc *fakeStatefulPodControl) setPodPending(set *apps.StatefulSet, ordinal
|
|||
return nil, fmt.Errorf("ordinal %d out of range [0,%d)", ordinal, len(pods))
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
pod := copyPod(pods[ordinal])
|
||||
pod := pods[ordinal].DeepCopy()
|
||||
pod.Status.Phase = v1.PodPending
|
||||
fakeResourceVersion(pod)
|
||||
spc.podsIndexer.Update(pod)
|
||||
|
@ -1598,7 +1576,7 @@ func (spc *fakeStatefulPodControl) setPodRunning(set *apps.StatefulSet, ordinal
|
|||
return nil, fmt.Errorf("ordinal %d out of range [0,%d)", ordinal, len(pods))
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
pod := copyPod(pods[ordinal])
|
||||
pod := pods[ordinal].DeepCopy()
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
fakeResourceVersion(pod)
|
||||
spc.podsIndexer.Update(pod)
|
||||
|
@ -1618,7 +1596,7 @@ func (spc *fakeStatefulPodControl) setPodReady(set *apps.StatefulSet, ordinal in
|
|||
return nil, fmt.Errorf("ordinal %d out of range [0,%d)", ordinal, len(pods))
|
||||
}
|
||||
sort.Sort(ascendingOrdinal(pods))
|
||||
pod := copyPod(pods[ordinal])
|
||||
pod := pods[ordinal].DeepCopy()
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
fakeResourceVersion(pod)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
apps "k8s.io/api/apps/v1beta1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
@ -60,11 +59,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
|
|||
}
|
||||
if updated, err := ssu.setLister.StatefulSets(set.Namespace).Get(set.Name); err == nil {
|
||||
// make a copy so we don't mutate the shared cache
|
||||
if copy, err := scheme.Scheme.DeepCopy(updated); err == nil {
|
||||
set = copy.(*apps.StatefulSet)
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error copying updated StatefulSet: %v", err))
|
||||
}
|
||||
set = updated.DeepCopy()
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error getting updated StatefulSet %s/%s from lister: %v", set.Namespace, set.Name, err))
|
||||
}
|
||||
|
|
|
@ -332,11 +332,7 @@ func newRevision(set *apps.StatefulSet, revision int64, collisionCount *int32) (
|
|||
// ApplyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error
|
||||
// is nil, the returned StatefulSet is valid.
|
||||
func ApplyRevision(set *apps.StatefulSet, revision *apps.ControllerRevision) (*apps.StatefulSet, error) {
|
||||
obj, err := scheme.Scheme.DeepCopy(set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := obj.(*apps.StatefulSet)
|
||||
clone := set.DeepCopy()
|
||||
patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(patchCodec, clone)), revision.Data.Raw, clone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -21,7 +21,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -288,9 +287,5 @@ func (ttlc *TTLController) updateNodeIfNeeded(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
objCopy, err := scheme.Scheme.DeepCopy(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ttlc.patchNodeWithAnnotation(objCopy.(*v1.Node), v1.ObjectTTLAnnotationKey, desiredTTL)
|
||||
return ttlc.patchNodeWithAnnotation(node.DeepCopy(), v1.ObjectTTLAnnotationKey, desiredTTL)
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
)
|
||||
|
@ -100,20 +99,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
|||
}
|
||||
|
||||
func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error {
|
||||
clonedNode, err := scheme.Scheme.DeepCopy(nodeObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cloning node %q: %v",
|
||||
nodeName,
|
||||
err)
|
||||
}
|
||||
|
||||
node, ok := clonedNode.(*v1.Node)
|
||||
if !ok || node == nil {
|
||||
return fmt.Errorf(
|
||||
"failed to cast %q object %#v to Node",
|
||||
nodeName,
|
||||
clonedNode)
|
||||
}
|
||||
node := nodeObj.DeepCopy()
|
||||
|
||||
// TODO: Change to pkg/util/node.UpdateNodeStatus.
|
||||
oldData, err := json.Marshal(node)
|
||||
|
|
|
@ -15,7 +15,6 @@ go_library(
|
|||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -80,16 +79,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
|
|||
|
||||
// Do not return the original volume object, since it's from the shared
|
||||
// informer it may be mutated by another consumer.
|
||||
clonedPodVolumeObj, err := scheme.Scheme.DeepCopy(&podVolume)
|
||||
if err != nil || clonedPodVolumeObj == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to deep copy %q volume object. err=%v", podVolume.Name, err)
|
||||
}
|
||||
|
||||
clonedPodVolume, ok := clonedPodVolumeObj.(*v1.Volume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to v1.Volume", clonedPodVolumeObj)
|
||||
}
|
||||
clonedPodVolume := podVolume.DeepCopy()
|
||||
|
||||
return volume.NewSpecFromVolume(clonedPodVolume), nil
|
||||
}
|
||||
|
@ -146,15 +136,7 @@ func getPVSpecFromCache(name string, pvcReadOnly bool, expectedClaimUID types.UI
|
|||
|
||||
// Do not return the object from the informer, since the store is shared it
|
||||
// may be mutated by another consumer.
|
||||
clonedPVObj, err := scheme.Scheme.DeepCopy(pv)
|
||||
if err != nil || clonedPVObj == nil {
|
||||
return nil, fmt.Errorf("failed to deep copy %q PV object. err=%v", name, err)
|
||||
}
|
||||
|
||||
clonedPV, ok := clonedPVObj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast %q clonedPV %#v to PersistentVolume", name, pv)
|
||||
}
|
||||
clonedPV := pv.DeepCopy()
|
||||
|
||||
return volume.NewSpecFromPersistentVolume(clonedPV, pvcReadOnly), nil
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -308,8 +307,7 @@ func (r *volumeReactor) checkVolumes(expectedVolumes []*v1.PersistentVolume) err
|
|||
for _, v := range r.volumes {
|
||||
// We must clone the volume because of golang race check - it was
|
||||
// written by the controller without any locks on it.
|
||||
clone, _ := scheme.Scheme.DeepCopy(v)
|
||||
v = clone.(*v1.PersistentVolume)
|
||||
v := v.DeepCopy()
|
||||
v.ResourceVersion = ""
|
||||
if v.Spec.ClaimRef != nil {
|
||||
v.Spec.ClaimRef.ResourceVersion = ""
|
||||
|
@ -339,8 +337,7 @@ func (r *volumeReactor) checkClaims(expectedClaims []*v1.PersistentVolumeClaim)
|
|||
for _, c := range r.claims {
|
||||
// We must clone the claim because of golang race check - it was
|
||||
// written by the controller without any locks on it.
|
||||
clone, _ := scheme.Scheme.DeepCopy(c)
|
||||
c = clone.(*v1.PersistentVolumeClaim)
|
||||
c = c.DeepCopy()
|
||||
c.ResourceVersion = ""
|
||||
gotMap[c.Name] = c
|
||||
}
|
||||
|
@ -508,9 +505,7 @@ func (r *volumeReactor) deleteVolumeEvent(volume *v1.PersistentVolume) {
|
|||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
if r.fakeVolumeWatch != nil {
|
||||
clone, _ := scheme.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.fakeVolumeWatch.Delete(volumeClone)
|
||||
r.fakeVolumeWatch.Delete(volume.DeepCopy())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -526,9 +521,7 @@ func (r *volumeReactor) deleteClaimEvent(claim *v1.PersistentVolumeClaim) {
|
|||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
if r.fakeClaimWatch != nil {
|
||||
clone, _ := scheme.Scheme.DeepCopy(claim)
|
||||
claimClone := clone.(*v1.PersistentVolumeClaim)
|
||||
r.fakeClaimWatch.Delete(claimClone)
|
||||
r.fakeClaimWatch.Delete(claim.DeepCopy())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -556,9 +549,7 @@ func (r *volumeReactor) modifyVolumeEvent(volume *v1.PersistentVolume) {
|
|||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
if r.fakeVolumeWatch != nil {
|
||||
clone, _ := scheme.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.fakeVolumeWatch.Modify(volumeClone)
|
||||
r.fakeVolumeWatch.Modify(volume.DeepCopy())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -596,15 +596,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
|
|||
|
||||
dirty := false
|
||||
|
||||
clone, err := scheme.Scheme.DeepCopy(claim)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error cloning claim: %v", err)
|
||||
}
|
||||
claimClone, ok := clone.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone)
|
||||
}
|
||||
|
||||
claimClone := claim.DeepCopy()
|
||||
if claim.Status.Phase != phase {
|
||||
claimClone.Status.Phase = phase
|
||||
dirty = true
|
||||
|
@ -701,15 +693,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
|
|||
return volume, nil
|
||||
}
|
||||
|
||||
clone, err := scheme.Scheme.DeepCopy(volume)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error cloning claim: %v", err)
|
||||
}
|
||||
volumeClone, ok := clone.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone)
|
||||
}
|
||||
|
||||
volumeClone := volume.DeepCopy()
|
||||
volumeClone.Status.Phase = phase
|
||||
volumeClone.Status.Message = message
|
||||
|
||||
|
@ -766,14 +750,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
|
|||
|
||||
// The volume from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
clone, err := scheme.Scheme.DeepCopy(volume)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error cloning pv: %v", err)
|
||||
}
|
||||
volumeClone, ok := clone.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone)
|
||||
}
|
||||
volumeClone := volume.DeepCopy()
|
||||
|
||||
// Bind the volume to the claim if it is not bound yet
|
||||
if volume.Spec.ClaimRef == nil ||
|
||||
|
@ -831,14 +808,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
|
|||
|
||||
// The claim from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
clone, err := scheme.Scheme.DeepCopy(claim)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error cloning claim: %v", err)
|
||||
}
|
||||
claimClone, ok := clone.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone)
|
||||
}
|
||||
claimClone := claim.DeepCopy()
|
||||
|
||||
if shouldBind {
|
||||
dirty = true
|
||||
|
@ -929,15 +899,8 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim
|
|||
func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error {
|
||||
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
|
||||
|
||||
// Save the PV only when any modification is necessary.
|
||||
clone, err := scheme.Scheme.DeepCopy(volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error cloning pv: %v", err)
|
||||
}
|
||||
volumeClone, ok := clone.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unexpected volume cast error : %v", volumeClone)
|
||||
}
|
||||
// Save the PV only when any modification is neccessary.
|
||||
volumeClone := volume.DeepCopy()
|
||||
|
||||
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
|
||||
// The volume was bound by the controller.
|
||||
|
|
|
@ -130,12 +130,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelister
|
|||
return
|
||||
}
|
||||
for _, volume := range volumeList {
|
||||
clone, err := scheme.Scheme.DeepCopy(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("error cloning volume %q: %v", volume.Name, err)
|
||||
continue
|
||||
}
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
volumeClone := volume.DeepCopy()
|
||||
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
|
||||
glog.Errorf("error updating volume cache: %v", err)
|
||||
}
|
||||
|
@ -147,13 +142,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelister
|
|||
return
|
||||
}
|
||||
for _, claim := range claimList {
|
||||
clone, err := scheme.Scheme.DeepCopy(claim)
|
||||
if err != nil {
|
||||
glog.Errorf("error cloning claim %q: %v", claimToClaimKey(claim), err)
|
||||
continue
|
||||
}
|
||||
claimClone := clone.(*v1.PersistentVolumeClaim)
|
||||
if _, err = ctrl.storeClaimUpdate(claimClone); err != nil {
|
||||
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
|
||||
glog.Errorf("error updating claim cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -433,14 +422,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
|
|||
|
||||
// The volume from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
clone, err := scheme.Scheme.DeepCopy(claim)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error cloning pv: %v", err)
|
||||
}
|
||||
claimClone, ok := clone.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone)
|
||||
}
|
||||
claimClone := claim.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
|
||||
newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue