mirror of https://github.com/k3s-io/k3s
add FullyLabeledReplicas in Replicaset Status and ReplicationController Status
parent
0787b313eb
commit
a6240c1ab8
|
@ -7877,7 +7877,8 @@
|
|||
"id": "v1beta1.ReplicaSetStatus",
|
||||
"description": "ReplicaSetStatus represents the current status of a ReplicaSet.",
|
||||
"required": [
|
||||
"replicas"
|
||||
"replicas",
|
||||
"fullyLabeledReplicas"
|
||||
],
|
||||
"properties": {
|
||||
"replicas": {
|
||||
|
@ -7885,6 +7886,11 @@
|
|||
"format": "int32",
|
||||
"description": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller"
|
||||
},
|
||||
"fullyLabeledReplicas": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "The number of pods that have labels matching the labels of the pod template of the replicaset."
|
||||
},
|
||||
"observedGeneration": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
|
|
|
@ -17617,7 +17617,8 @@
|
|||
"id": "v1.ReplicationControllerStatus",
|
||||
"description": "ReplicationControllerStatus represents the current status of a replication controller.",
|
||||
"required": [
|
||||
"replicas"
|
||||
"replicas",
|
||||
"fullyLabeledReplicas"
|
||||
],
|
||||
"properties": {
|
||||
"replicas": {
|
||||
|
@ -17625,6 +17626,11 @@
|
|||
"format": "int32",
|
||||
"description": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller"
|
||||
},
|
||||
"fullyLabeledReplicas": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "The number of pods that have labels matching the labels of the pod template of the replication controller."
|
||||
},
|
||||
"observedGeneration": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
|
|
|
@ -4581,6 +4581,13 @@ Both these may change in the future. Incoming requests are matched against the h
|
|||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">fullyLabeledReplicas</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">The number of pods that have labels matching the labels of the pod template of the replicaset.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">integer (int32)</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">observedGeneration</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">ObservedGeneration reflects the generation of the most recently observed ReplicaSet.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
|
@ -5598,7 +5605,7 @@ Both these may change in the future. Incoming requests are matched against the h
|
|||
</div>
|
||||
<div id="footer">
|
||||
<div id="footer-text">
|
||||
Last updated 2016-03-09 19:21:59 UTC
|
||||
Last updated 2016-03-11 23:39:49 UTC
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
|
|
@ -5563,6 +5563,13 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">fullyLabeledReplicas</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">The number of pods that have labels matching the labels of the pod template of the replication controller.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">integer (int32)</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">observedGeneration</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">ObservedGeneration reflects the generation of the most recently observed replication controller.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
|
@ -7625,7 +7632,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
</div>
|
||||
<div id="footer">
|
||||
<div id="footer-text">
|
||||
Last updated 2016-03-03 20:47:22 UTC
|
||||
Last updated 2016-03-11 23:39:43 UTC
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
|
|
@ -2400,6 +2400,7 @@ func DeepCopy_api_ReplicationControllerSpec(in ReplicationControllerSpec, out *R
|
|||
|
||||
func DeepCopy_api_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error {
|
||||
out.Replicas = in.Replicas
|
||||
out.FullyLabeledReplicas = in.FullyLabeledReplicas
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26953,15 +26953,15 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [2]bool
|
||||
var yyq2 [3]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.ObservedGeneration != 0
|
||||
yyq2[2] = x.ObservedGeneration != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(2)
|
||||
r.EncodeArrayStart(3)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
|
@ -26991,9 +26991,28 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -27002,12 +27021,12 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -27081,6 +27100,12 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978
|
|||
} else {
|
||||
x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
case "fullyLabeledReplicas":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
case "observedGeneration":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObservedGeneration = 0
|
||||
|
@ -27098,16 +27123,16 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj6 int
|
||||
var yyb6 bool
|
||||
var yyhl6 bool = l >= 0
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -27117,13 +27142,29 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
} else {
|
||||
x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -27134,17 +27175,17 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
x.ObservedGeneration = int64(r.DecodeInt(64))
|
||||
}
|
||||
for {
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj6-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
@ -51389,7 +51430,7 @@ func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationControlle
|
|||
|
||||
yyrg1 := len(yyv1) > 0
|
||||
yyv21 := yyv1
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232)
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
|
|
|
@ -1380,6 +1380,9 @@ type ReplicationControllerStatus struct {
|
|||
// Replicas is the number of actual replicas.
|
||||
Replicas int `json:"replicas"`
|
||||
|
||||
// The number of pods that have labels matching the labels of the pod template of the replication controller.
|
||||
FullyLabeledReplicas int `json:"fullyLabeledReplicas"`
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the controller.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
}
|
||||
|
|
|
@ -2598,6 +2598,7 @@ func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStat
|
|||
defaulting.(func(*api.ReplicationControllerStatus))(in)
|
||||
}
|
||||
out.Replicas = int32(in.Replicas)
|
||||
out.FullyLabeledReplicas = int32(in.FullyLabeledReplicas)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
@ -5836,6 +5837,7 @@ func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStat
|
|||
defaulting.(func(*ReplicationControllerStatus))(in)
|
||||
}
|
||||
out.Replicas = int(in.Replicas)
|
||||
out.FullyLabeledReplicas = int(in.FullyLabeledReplicas)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2031,6 +2031,7 @@ func deepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *Re
|
|||
|
||||
func deepCopy_v1_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error {
|
||||
out.Replicas = in.Replicas
|
||||
out.FullyLabeledReplicas = in.FullyLabeledReplicas
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26683,15 +26683,15 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [2]bool
|
||||
var yyq2 [3]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.ObservedGeneration != 0
|
||||
yyq2[2] = x.ObservedGeneration != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(2)
|
||||
r.EncodeArrayStart(3)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
|
@ -26721,9 +26721,28 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -26732,12 +26751,12 @@ func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -26811,6 +26830,12 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978
|
|||
} else {
|
||||
x.Replicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
case "fullyLabeledReplicas":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
case "observedGeneration":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObservedGeneration = 0
|
||||
|
@ -26828,16 +26853,16 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj6 int
|
||||
var yyb6 bool
|
||||
var yyhl6 bool = l >= 0
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -26847,13 +26872,29 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
} else {
|
||||
x.Replicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -26864,17 +26905,17 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19
|
|||
x.ObservedGeneration = int64(r.DecodeInt(64))
|
||||
}
|
||||
for {
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj6-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
|
|
@ -1679,6 +1679,9 @@ type ReplicationControllerStatus struct {
|
|||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
|
||||
Replicas int32 `json:"replicas"`
|
||||
|
||||
// The number of pods that have labels matching the labels of the pod template of the replication controller.
|
||||
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas"`
|
||||
|
||||
// ObservedGeneration reflects the generation of the most recently observed replication controller.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
}
|
||||
|
|
|
@ -1320,9 +1320,10 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ReplicationControllerStatus = map[string]string{
|
||||
"": "ReplicationControllerStatus represents the current status of a replication controller.",
|
||||
"replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
|
||||
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.",
|
||||
"": "ReplicationControllerStatus represents the current status of a replication controller.",
|
||||
"replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
|
||||
"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.",
|
||||
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.",
|
||||
}
|
||||
|
||||
func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
|
||||
|
|
|
@ -1858,6 +1858,7 @@ func ValidateReplicationControllerStatusUpdate(controller, oldController *api.Re
|
|||
allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
|
||||
statusPath := field.NewPath("status")
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.Replicas), statusPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...)
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ObservedGeneration), statusPath.Child("observedGeneration"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
@ -14889,15 +14889,15 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [2]bool
|
||||
var yyq2 [3]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.ObservedGeneration != 0
|
||||
yyq2[2] = x.ObservedGeneration != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(2)
|
||||
r.EncodeArrayStart(3)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
|
@ -14927,9 +14927,28 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -14938,12 +14957,12 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -15017,6 +15036,12 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
|||
} else {
|
||||
x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
case "fullyLabeledReplicas":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
case "observedGeneration":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObservedGeneration = 0
|
||||
|
@ -15034,16 +15059,16 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj6 int
|
||||
var yyb6 bool
|
||||
var yyhl6 bool = l >= 0
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -15053,13 +15078,29 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
} else {
|
||||
x.Replicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -15070,17 +15111,17 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
x.ObservedGeneration = int64(r.DecodeInt(64))
|
||||
}
|
||||
for {
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj6-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
@ -18948,7 +18989,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode
|
|||
|
||||
yyrg1 := len(yyv1) > 0
|
||||
yyv21 := yyv1
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552)
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 560)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
|
|
|
@ -815,6 +815,9 @@ type ReplicaSetStatus struct {
|
|||
// Replicas is the number of actual replicas.
|
||||
Replicas int `json:"replicas"`
|
||||
|
||||
// The number of pods that have labels matching the labels of the pod template of the replicaset.
|
||||
FullyLabeledReplicas int `json:"fullyLabeledReplicas"`
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the controller.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
}
|
||||
|
|
|
@ -3610,6 +3610,7 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *ext
|
|||
defaulting.(func(*extensions.ReplicaSetStatus))(in)
|
||||
}
|
||||
out.Replicas = int32(in.Replicas)
|
||||
out.FullyLabeledReplicas = int32(in.FullyLabeledReplicas)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
@ -4853,6 +4854,7 @@ func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *Rep
|
|||
defaulting.(func(*ReplicaSetStatus))(in)
|
||||
}
|
||||
out.Replicas = int(in.Replicas)
|
||||
out.FullyLabeledReplicas = int(in.FullyLabeledReplicas)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1746,6 +1746,7 @@ func deepCopy_v1beta1_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *
|
|||
|
||||
func deepCopy_v1beta1_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error {
|
||||
out.Replicas = in.Replicas
|
||||
out.FullyLabeledReplicas = in.FullyLabeledReplicas
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16254,15 +16254,15 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [2]bool
|
||||
var yyq2 [3]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.ObservedGeneration != 0
|
||||
yyq2[2] = x.ObservedGeneration != 0
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(2)
|
||||
r.EncodeArrayStart(3)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
|
@ -16292,9 +16292,28 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.FullyLabeledReplicas))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -16303,12 +16322,12 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("observedGeneration"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.ObservedGeneration))
|
||||
|
@ -16382,6 +16401,12 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
|||
} else {
|
||||
x.Replicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
case "fullyLabeledReplicas":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
case "observedGeneration":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObservedGeneration = 0
|
||||
|
@ -16399,16 +16424,16 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj6 int
|
||||
var yyb6 bool
|
||||
var yyhl6 bool = l >= 0
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -16418,13 +16443,29 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
} else {
|
||||
x.Replicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.FullyLabeledReplicas = 0
|
||||
} else {
|
||||
x.FullyLabeledReplicas = int32(r.DecodeInt(32))
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -16435,17 +16476,17 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder)
|
|||
x.ObservedGeneration = int64(r.DecodeInt(64))
|
||||
}
|
||||
for {
|
||||
yyj6++
|
||||
if yyhl6 {
|
||||
yyb6 = yyj6 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb6 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb6 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj6-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
|
|
@ -907,6 +907,9 @@ type ReplicaSetStatus struct {
|
|||
// More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller
|
||||
Replicas int32 `json:"replicas"`
|
||||
|
||||
// The number of pods that have labels matching the labels of the pod template of the replicaset.
|
||||
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas"`
|
||||
|
||||
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
}
|
||||
|
|
|
@ -517,9 +517,10 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ReplicaSetStatus = map[string]string{
|
||||
"": "ReplicaSetStatus represents the current status of a ReplicaSet.",
|
||||
"replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
|
||||
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
|
||||
"": "ReplicaSetStatus represents the current status of a ReplicaSet.",
|
||||
"replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller",
|
||||
"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
|
||||
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
|
||||
}
|
||||
|
||||
func (ReplicaSetStatus) SwaggerDoc() map[string]string {
|
||||
|
|
|
@ -687,6 +687,7 @@ func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.Erro
|
|||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.Replicas), field.NewPath("status", "replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.FullyLabeledReplicas), field.NewPath("status", "fullyLabeledReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ObservedGeneration), field.NewPath("status", "observedGeneration"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
|
|
@ -153,12 +153,18 @@ func JobHasDesiredParallelism(c ExtensionsInterface, job *extensions.Job) wait.C
|
|||
// the desired replica count for a deployment equals its updated replicas count.
|
||||
// (non-terminated pods that have the desired template spec).
|
||||
func DeploymentHasDesiredReplicas(c ExtensionsInterface, deployment *extensions.Deployment) wait.ConditionFunc {
|
||||
// If we're given a deployment where the status lags the spec, it either
|
||||
// means that the deployment is stale, or that the deployment manager hasn't
|
||||
// noticed the update yet. Polling status.Replicas is not safe in the latter
|
||||
// case.
|
||||
desiredGeneration := deployment.Generation
|
||||
|
||||
return func() (bool, error) {
|
||||
deployment, err := c.Deployments(deployment.Namespace).Get(deployment.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil
|
||||
return deployment.Status.ObservedGeneration >= desiredGeneration &&
|
||||
deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -547,8 +547,21 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
|||
rsc.manageReplicas(filteredPods, &rs)
|
||||
}
|
||||
|
||||
// Count the number of pods that have labels matching the labels of the pod
|
||||
// template of the replicaSet, the matching pods may have more labels than
|
||||
// are in the template. Because the label of podTemplateSpec is a superset
|
||||
// of the selector of the replicaset, so the possible matching pods must be
|
||||
// part of the filteredPods.
|
||||
fullyLabeledReplicasCount := 0
|
||||
templateLabel := labels.Set(rs.Spec.Template.Labels).AsSelector()
|
||||
for _, pod := range filteredPods {
|
||||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
if err := updateReplicaCount(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, len(filteredPods)); err != nil {
|
||||
if err := updateReplicaCount(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, len(filteredPods), fullyLabeledReplicasCount); err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the replica set ensures
|
||||
// we retry with some fairness.
|
||||
glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rs.Namespace, rs.Name, err)
|
||||
|
|
|
@ -98,12 +98,12 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
|
|||
}
|
||||
|
||||
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet) *api.PodList {
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *api.PodList {
|
||||
pods := []api.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Name: fmt.Sprintf("%s%d", name, i),
|
||||
Labels: labelMap,
|
||||
Namespace: rs.Namespace,
|
||||
},
|
||||
|
@ -147,7 +147,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
|
@ -165,7 +165,7 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 1)
|
||||
|
@ -189,7 +189,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
go manager.worker()
|
||||
|
@ -239,7 +239,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||
rs := newReplicaSet(activePods, labelMap)
|
||||
manager.rsStore.Store.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: activePods}
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs)
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
|
@ -279,11 +279,14 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rs := newReplicaSet(5, labelMap)
|
||||
rs.Spec.Template.Labels = extraLabelMap
|
||||
manager.rsStore.Store.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, ObservedGeneration: 0}
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0}
|
||||
rs.Generation = 1
|
||||
newPodList(manager.podStore.Store, 4, api.PodRunning, labelMap, rs)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||
|
@ -295,8 +298,10 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||
manager.syncReplicaSet(getKey(rs, t))
|
||||
|
||||
// 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod.
|
||||
// 2. Every update to the status should include the Generation of the spec.
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 4, ObservedGeneration: 1}
|
||||
// 2. Status.FullyLabeledReplicas should equal to the number of pods that
|
||||
// has the extra labels, i.e., 2.
|
||||
// 3. Every update to the status should include the Generation of the spec.
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 4, FullyLabeledReplicas: 2, ObservedGeneration: 1}
|
||||
|
||||
decRc := runtime.EncodeOrDie(testapi.Extensions.Codec(), rs)
|
||||
fakeHandler.ValidateRequest(t, testapi.Extensions.ResourcePath(replicaSetResourceName(), rs.Namespace, rs.Name)+"/status", "PUT", &decRc)
|
||||
|
@ -321,7 +326,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rsSpec)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
rsSpec.Status.Replicas = 1
|
||||
|
@ -502,7 +507,7 @@ func TestWatchPods(t *testing.T) {
|
|||
go manager.podController.Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod")
|
||||
testPod := pods.Items[0]
|
||||
testPod.Status.Phase = api.PodFailed
|
||||
fakeWatch.Add(&testPod)
|
||||
|
@ -544,7 +549,7 @@ func TestUpdatePods(t *testing.T) {
|
|||
manager.rsStore.Store.Add(&testRSSpec2)
|
||||
|
||||
// Put one pod in the podStore
|
||||
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap1, testRSSpec1).Items[0]
|
||||
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
||||
pod2 := pod1
|
||||
pod2.Labels = labelMap2
|
||||
|
||||
|
@ -583,7 +588,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
|||
rs := newReplicaSet(1, labelMap)
|
||||
manager.rsStore.Store.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rs)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
|
@ -618,7 +623,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||
})
|
||||
fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRSClient, *rs, numReplicas)
|
||||
updateReplicaCount(fakeRSClient, *rs, numReplicas, 0)
|
||||
updates, gets := 0, 0
|
||||
for _, a := range fakeClient.Actions() {
|
||||
if a.GetResource() != "replicasets" {
|
||||
|
@ -666,7 +671,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
manager.rsStore.Store.Add(rsSpec)
|
||||
|
||||
expectedPods := 0
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec)
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod")
|
||||
|
||||
rsKey, err := controller.KeyFunc(rsSpec)
|
||||
if err != nil {
|
||||
|
@ -783,7 +788,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods)
|
||||
}
|
||||
// Replenish the pod list, since we cut it down sizing up
|
||||
pods = newPodList(nil, replicas, api.PodRunning, labelMap, rsSpec)
|
||||
pods = newPodList(nil, replicas, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -816,7 +821,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod")
|
||||
manager.podStore.Store.Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
|
||||
|
@ -932,7 +937,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||
manager.rsStore.Store.Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest ReplicaSet is synced
|
||||
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0])
|
||||
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
|
||||
rsKey := getKey(controllers[0], t)
|
||||
|
||||
manager.addPod(&pods.Items[0])
|
||||
|
@ -955,7 +960,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rs, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, labelMap, rs).Items[0]
|
||||
pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
|
||||
|
||||
|
@ -975,7 +980,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
// as a deletion.
|
||||
oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs).Items[0]
|
||||
oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
|
|
|
@ -19,17 +19,20 @@ limitations under the License.
|
|||
package replicaset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
// updateReplicaCount attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
||||
func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas int) (updateErr error) {
|
||||
func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas, numFullyLabeledReplicas int) (updateErr error) {
|
||||
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
if rs.Status.Replicas == numReplicas &&
|
||||
rs.Status.FullyLabeledReplicas == numFullyLabeledReplicas &&
|
||||
rs.Generation == rs.Status.ObservedGeneration {
|
||||
return nil
|
||||
}
|
||||
|
@ -41,10 +44,12 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli
|
|||
|
||||
var getErr error
|
||||
for i, rs := 0, &rs; ; i++ {
|
||||
glog.V(4).Infof("Updating replica count for ReplicaSet: %v, %d->%d (need %d), sequence No: %v->%v",
|
||||
rs.Name, rs.Status.Replicas, numReplicas, rs.Spec.Replicas, rs.Status.ObservedGeneration, generation)
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, numReplicas, rs.Spec.Replicas) +
|
||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, numFullyLabeledReplicas) +
|
||||
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, generation))
|
||||
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: numReplicas, ObservedGeneration: generation}
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation}
|
||||
_, updateErr = rsClient.UpdateStatus(rs)
|
||||
if updateErr == nil || i >= statusUpdateRetries {
|
||||
return updateErr
|
||||
|
|
|
@ -540,8 +540,21 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
|||
rm.manageReplicas(filteredPods, &rc)
|
||||
}
|
||||
|
||||
// Count the number of pods that have labels matching the labels of the pod
|
||||
// template of the replication controller, the matching pods may have more
|
||||
// labels than are in the template. Because the label of podTemplateSpec is
|
||||
// a superset of the selector of the replication controller, so the possible
|
||||
// matching pods must be part of the filteredPods.
|
||||
fullyLabeledReplicasCount := 0
|
||||
templateLabel := labels.Set(rc.Spec.Template.Labels).AsSelector()
|
||||
for _, pod := range filteredPods {
|
||||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
if err := updateReplicaCount(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
|
||||
if err := updateReplicaCount(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, len(filteredPods), fullyLabeledReplicasCount); err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the controller ensures
|
||||
// we retry with some fairness.
|
||||
glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)
|
||||
|
|
|
@ -96,12 +96,12 @@ func newReplicationController(replicas int) *api.ReplicationController {
|
|||
}
|
||||
|
||||
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController) *api.PodList {
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
|
||||
pods := []api.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Name: fmt.Sprintf("%s%d", name, i),
|
||||
Labels: rc.Spec.Selector,
|
||||
Namespace: rc.Namespace,
|
||||
},
|
||||
|
@ -144,7 +144,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
|||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
|
@ -161,7 +161,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
|||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
validateSyncReplication(t, &fakePodControl, 0, 1)
|
||||
|
@ -184,7 +184,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||
// the controller matching the selectors of the deleted pod into the work queue.
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, controllerSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
go manager.worker()
|
||||
|
@ -233,7 +233,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||
rc := newReplicationController(activePods)
|
||||
manager.rcStore.Store.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: activePods}
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, rc)
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
|
@ -274,9 +274,13 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(5)
|
||||
manager.rcStore.Store.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ObservedGeneration: 0}
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0}
|
||||
rc.Generation = 1
|
||||
newPodList(manager.podStore.Store, 4, api.PodRunning, rc)
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, rc, "pod")
|
||||
rcCopy := *rc
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rcCopy.Spec.Selector = extraLabelMap
|
||||
newPodList(manager.podStore.Store, 2, api.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
||||
|
@ -288,7 +292,9 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
|||
manager.syncReplicationController(getKey(rc, t))
|
||||
|
||||
// 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod.
|
||||
// 2. Every update to the status should include the Generation of the spec.
|
||||
// 2. Status.FullyLabeledReplicas should equal to the number of pods that
|
||||
// has the extra labels, i.e., 2.
|
||||
// 3. Every update to the status should include the Generation of the spec.
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 4, ObservedGeneration: 1}
|
||||
|
||||
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
||||
|
@ -313,7 +319,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
|||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
controllerSpec.Status.Replicas = 1
|
||||
|
@ -488,7 +494,7 @@ func TestWatchPods(t *testing.T) {
|
|||
go manager.podController.Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec, "pod")
|
||||
testPod := pods.Items[0]
|
||||
testPod.Status.Phase = api.PodFailed
|
||||
fakeWatch.Add(&testPod)
|
||||
|
@ -528,7 +534,7 @@ func TestUpdatePods(t *testing.T) {
|
|||
manager.rcStore.Store.Add(&testControllerSpec2)
|
||||
|
||||
// Put one pod in the podStore
|
||||
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1).Items[0]
|
||||
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod2 := pod1
|
||||
pod2.Labels = testControllerSpec2.Spec.Selector
|
||||
|
||||
|
@ -567,7 +573,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
|||
rc := newReplicationController(1)
|
||||
manager.rcStore.Store.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2}
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, rc)
|
||||
newPodList(manager.podStore.Store, 1, api.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
|
@ -604,7 +610,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||
})
|
||||
fakeRCClient := c.Core().ReplicationControllers("default")
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas)
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas, 0)
|
||||
updates, gets := 0, 0
|
||||
for _, a := range c.Actions() {
|
||||
if a.GetResource() != "replicationcontrollers" {
|
||||
|
@ -651,7 +657,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
manager.rcStore.Store.Add(controllerSpec)
|
||||
|
||||
expectedPods := 0
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec)
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod")
|
||||
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
|
@ -767,7 +773,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods)
|
||||
}
|
||||
// Replenish the pod list, since we cut it down sizing up
|
||||
pods = newPodList(nil, replicas, api.PodRunning, controllerSpec)
|
||||
pods = newPodList(nil, replicas, api.PodRunning, controllerSpec, "pod")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -799,7 +805,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
|||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, controllerSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod")
|
||||
manager.podStore.Store.Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
|
||||
|
@ -914,7 +920,7 @@ func TestOverlappingRCs(t *testing.T) {
|
|||
manager.rcStore.Store.Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest rc is synced
|
||||
pods := newPodList(nil, 1, api.PodPending, controllers[0])
|
||||
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
|
||||
rcKey := getKey(controllers[0], t)
|
||||
|
||||
manager.addPod(&pods.Items[0])
|
||||
|
@ -936,7 +942,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec).Items[0]
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
||||
|
||||
|
@ -956,7 +962,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
// as a deletion.
|
||||
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec).Items[0]
|
||||
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
|
|
|
@ -19,17 +19,20 @@ limitations under the License.
|
|||
package replication
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
)
|
||||
|
||||
// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
|
||||
func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
|
||||
func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, controller api.ReplicationController, numReplicas, numFullyLabeledReplicas int) (updateErr error) {
|
||||
// This is the steady state. It happens when the rc doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
if controller.Status.Replicas == numReplicas &&
|
||||
controller.Status.FullyLabeledReplicas == numFullyLabeledReplicas &&
|
||||
controller.Generation == controller.Status.ObservedGeneration {
|
||||
return nil
|
||||
}
|
||||
|
@ -41,10 +44,12 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface,
|
|||
|
||||
var getErr error
|
||||
for i, rc := 0, &controller; ; i++ {
|
||||
glog.V(4).Infof("Updating replica count for rc: %v, %d->%d (need %d), sequence No: %v->%v",
|
||||
controller.Name, controller.Status.Replicas, numReplicas, controller.Spec.Replicas, controller.Status.ObservedGeneration, generation)
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", controller.Namespace, controller.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", controller.Status.Replicas, numReplicas, controller.Spec.Replicas) +
|
||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", controller.Status.FullyLabeledReplicas, numFullyLabeledReplicas) +
|
||||
fmt.Sprintf("sequence No: %v->%v", controller.Status.ObservedGeneration, generation))
|
||||
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: numReplicas, ObservedGeneration: generation}
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation}
|
||||
_, updateErr = rcClient.UpdateStatus(rc)
|
||||
if updateErr == nil || i >= statusUpdateRetries {
|
||||
return updateErr
|
||||
|
|
|
@ -172,7 +172,7 @@ func rsAndPodsWithHashKeySynced(deployment *extensions.Deployment, c clientset.I
|
|||
|
||||
// addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps:
|
||||
// 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created
|
||||
// 2. Add hash label to all pods this rs owns
|
||||
// 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas
|
||||
// 3. Add hash label to the rs's label and selector
|
||||
func addHashKeyToRSAndPods(deployment *extensions.Deployment, c clientset.Interface, rs extensions.ReplicaSet, getPodList podListFunc) (updatedRS *extensions.ReplicaSet, err error) {
|
||||
updatedRS = &rs
|
||||
|
@ -236,6 +236,15 @@ func addHashKeyToRSAndPods(deployment *extensions.Deployment, c clientset.Interf
|
|||
return updatedRS, nil
|
||||
}
|
||||
|
||||
// We need to wait for the replicaset controller to observe the pods being
|
||||
// labeled with pod template hash. Because previously we've called
|
||||
// waitForReplicaSetUpdated, the replicaset controller should have dropped
|
||||
// FullyLabeledReplicas to 0 already, we only need to wait it to increase
|
||||
// back to the number of replicas in the spec.
|
||||
if err = waitForPodsHashPopulated(c, updatedRS.Generation, namespace, updatedRS.Name); err != nil {
|
||||
return nil, fmt.Errorf("%s %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Kind, updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
|
||||
// 3. Update rs label and selector to include the new hash label
|
||||
// Copy the old selector, so that we can scrub out any orphaned pods
|
||||
if updatedRS, rsUpdated, err = rsutil.UpdateRSWithRetries(c.Extensions().ReplicaSets(namespace), updatedRS,
|
||||
|
@ -270,6 +279,17 @@ func waitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, na
|
|||
})
|
||||
}
|
||||
|
||||
func waitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error {
|
||||
return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration &&
|
||||
rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil
|
||||
})
|
||||
}
|
||||
|
||||
// labelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
// The returned bool value can be used to tell if all pods are actually labeled.
|
||||
func labelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
|
||||
|
|
|
@ -291,21 +291,25 @@ func TestGetNewRC(t *testing.T) {
|
|||
func TestGetOldRCs(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRS := generateRS(newDeployment)
|
||||
newRS.Status.FullyLabeledReplicas = newRS.Spec.Replicas
|
||||
newPod := generatePodFromRS(newRS)
|
||||
|
||||
// create 2 old deployments and related replica sets/pods, with the same labels but different template
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
oldDeployment2 := generateDeployment("nginx")
|
||||
oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2"
|
||||
oldRS2 := generateRS(oldDeployment2)
|
||||
oldRS2.Status.FullyLabeledReplicas = oldRS2.Spec.Replicas
|
||||
oldPod2 := generatePodFromRS(oldRS2)
|
||||
|
||||
// create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment
|
||||
existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS.Status.FullyLabeledReplicas = existedRS.Spec.Replicas
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
|
|
|
@ -27,10 +27,12 @@ import (
|
|||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -434,12 +436,49 @@ func testDeploymentCleanUpPolicy(f *Framework) {
|
|||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
|
||||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
}
|
||||
options := api.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
w, err := c.Pods(ns).Watch(options)
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
numPodCreation := 1
|
||||
for {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
continue
|
||||
}
|
||||
numPodCreation--
|
||||
if numPodCreation < 0 {
|
||||
Failf("Expect only one pod creation, the second creation event: %#v\n", event)
|
||||
}
|
||||
pod, ok := event.Object.(*api.Pod)
|
||||
if !ok {
|
||||
Fail("Expect event Object to be a pod")
|
||||
}
|
||||
if pod.Spec.Containers[0].Name != redisImageName {
|
||||
Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
|
|
Loading…
Reference in New Issue