mirror of https://github.com/k3s-io/k3s
Use PATCHs instead of PUTs in CIDR allocator
parent
028c4c9399
commit
19e56eb42d
|
@ -201,7 +201,6 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||
podCIDR := cidr.String()
|
||||
|
||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||
// TODO: change it to using PATCH instead of full Node updates.
|
||||
node, err = ca.nodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
|
@ -220,8 +219,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||
//
|
||||
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
|
||||
}
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err = ca.client.CoreV1().Nodes().Update(node); err == nil {
|
||||
if err = nodeutil.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
|
@ -37,6 +38,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
|
@ -282,7 +284,6 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
|||
|
||||
podCIDR := data.cidr.String()
|
||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||
// TODO: change it to using PATCH instead of full Node updates.
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
|
@ -299,8 +300,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err = r.client.CoreV1().Nodes().Update(node); err == nil {
|
||||
if err = nodeutil.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
|
|
|
@ -333,6 +333,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
}(),
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: []string{"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
expectedAllocatedCIDRFirstRound: "127.123.234.0/30",
|
||||
cidrsToRelease: []string{"127.123.234.0/30"},
|
||||
expectedAllocatedCIDRSecondRound: "127.123.234.0/30",
|
||||
|
|
|
@ -150,6 +150,21 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N
|
|||
return err
|
||||
}
|
||||
|
||||
// PatchNodeCIDR patches the specified node's CIDR to the given value.
|
||||
func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error {
|
||||
raw, err := json.Marshal(cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
||||
}
|
||||
|
||||
patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s}}`, raw))
|
||||
|
||||
if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil {
|
||||
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchNodeStatus patches node status.
|
||||
func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) {
|
||||
oldData, err := json.Marshal(oldNode)
|
||||
|
|
Loading…
Reference in New Issue