Merge pull request #15999 from aveshagarwal/master-issue-15624

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2015-11-24 17:07:10 -08:00
commit d42030170b
44 changed files with 8781 additions and 8587 deletions

View File

@ -12037,6 +12037,10 @@
"type": "integer",
"format": "int32",
"description": "The number of times this event has occurred."
},
"type": {
"type": "string",
"description": "Type of this event (Normal, Warning), new types could be added in the future"
}
}
},

View File

@ -389,5 +389,5 @@ func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, "Starting", "Starting kube-proxy.")
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}

View File

@ -86,7 +86,7 @@ func (s *controller) scheduleOne() {
dest, err := s.algorithm.Schedule(pod)
if err != nil {
log.V(1).Infof("Failed to schedule: %+v", pod)
s.recorder.Eventf(pod, FailedScheduling, "Error scheduling: %v", err)
s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Error scheduling: %v", err)
s.error(pod, err)
return
}
@ -99,9 +99,9 @@ func (s *controller) scheduleOne() {
}
if err := s.binder.Bind(b); err != nil {
log.V(1).Infof("Failed to bind pod: %+v", err)
s.recorder.Eventf(pod, FailedScheduling, "Binding rejected: %v", err)
s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Binding rejected: %v", err)
s.error(pod, err)
return
}
s.recorder.Eventf(pod, Scheduled, "Successfully assigned %v to %v", pod.Name, dest)
s.recorder.Eventf(pod, api.EventTypeNormal, Scheduled, "Successfully assigned %v to %v", pod.Name, dest)
}

View File

@ -313,6 +313,7 @@ func NewTestOffer(id string) *mesos.Offer {
// Add assertions to reason about event streams
type Event struct {
Object runtime.Object
Type string
Reason string
Message string
}
@ -334,15 +335,15 @@ func NewEventObserver() *EventObserver {
}
}
func (o *EventObserver) Event(object runtime.Object, reason, message string) {
o.fifo <- Event{Object: object, Reason: reason, Message: message}
func (o *EventObserver) Event(object runtime.Object, eventtype, reason, message string) {
o.fifo <- Event{Object: object, Type: eventtype, Reason: reason, Message: message}
}
func (o *EventObserver) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
o.fifo <- Event{Object: object, Reason: reason, Message: fmt.Sprintf(messageFmt, args...)}
func (o *EventObserver) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
o.fifo <- Event{Object: object, Type: eventtype, Reason: reason, Message: fmt.Sprintf(messageFmt, args...)}
}
func (o *EventObserver) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) {
o.fifo <- Event{Object: object, Reason: reason, Message: fmt.Sprintf(messageFmt, args...)}
func (o *EventObserver) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
o.fifo <- Event{Object: object, Type: eventtype, Reason: reason, Message: fmt.Sprintf(messageFmt, args...)}
}
func (a *EventAssertions) Event(observer *EventObserver, pred EventPredicate, msgAndArgs ...interface{}) bool {

View File

@ -5095,7 +5095,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
</div>
<div id="footer">
<div id="footer-text">
Last updated 2015-11-06 18:46:07 UTC
Last updated 2015-11-13 20:43:27 UTC
</div>
</div>
</body>

View File

@ -5891,6 +5891,13 @@ The resulting set of endpoints can be viewed as:<br>
<td class="tableblock halign-left valign-top"><p class="tableblock">integer (int32)</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">type</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Type of this event (Normal, Warning), new types could be added in the future</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>

View File

@ -23664,7 +23664,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
</div>
<div id="footer">
<div id="footer-text">
Last updated 2015-11-06 18:46:00 UTC
Last updated 2015-11-13 20:43:21 UTC
</div>
</div>
</body>

View File

@ -511,6 +511,7 @@ func deepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error {
return err
}
out.Count = in.Count
out.Type = in.Type
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1781,6 +1781,14 @@ type EventSource struct {
Host string `json:"host,omitempty"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
@ -1811,6 +1819,9 @@ type Event struct {
// The number of times this event has occurred.
Count int `json:"count,omitempty"`
// Type of this event (Normal, Warning), new types could be added in the future.
Type string `json:"type,omitempty"`
}
// EventList is a list of events.

View File

@ -698,6 +698,7 @@ func autoconvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.S
return err
}
out.Count = in.Count
out.Type = in.Type
return nil
}
@ -3722,6 +3723,7 @@ func autoconvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.S
return err
}
out.Count = in.Count
out.Type = in.Type
return nil
}

View File

@ -545,6 +545,7 @@ func deepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error {
return err
}
out.Count = in.Count
out.Type = in.Type
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -2220,6 +2220,14 @@ type EventSource struct {
Host string `json:"host,omitempty"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
@ -2251,6 +2259,9 @@ type Event struct {
// The number of times this event has occurred.
Count int `json:"count,omitempty"`
// Type of this event (Normal, Warning), new types could be added in the future
Type string `json:"type,omitempty"`
}
// EventList is a list of events.

View File

@ -343,6 +343,7 @@ var map_Event = map[string]string{
"firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
"lastTimestamp": "The time at which the most recent occurrence of this event was recorded.",
"count": "The number of times this event has occurred.",
"type": "Type of this event (Normal, Warning), new types could be added in the future",
}
func (Event) SwaggerDoc() map[string]string {

View File

@ -53,6 +53,7 @@ type EventRecorder interface {
// Event constructs an event from the given information and puts it in the queue for sending.
// 'object' is the object this event is about. Event will make a reference-- or you may also
// pass a reference to the object directly.
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
// to automate handling of events, so imagine people writing switch statements to handle them.
@ -60,13 +61,13 @@ type EventRecorder interface {
// 'message' is intended to be human readable.
//
// The resulting event will be created in the same namespace as the reference object.
Event(object runtime.Object, reason, message string)
Event(object runtime.Object, eventtype, reason, message string)
// Eventf is just like Event, but with Sprintf for the message field.
Eventf(object runtime.Object, reason, messageFmt string, args ...interface{})
Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{})
// PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field.
PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{})
PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{})
}
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
@ -202,7 +203,7 @@ func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingE
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
return eventBroadcaster.StartEventWatcher(
func(e *api.Event) {
logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message)
logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message)
})
}
@ -240,14 +241,19 @@ type recorderImpl struct {
clock util.Clock
}
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, reason, message string) {
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
ref, err := api.GetReference(object)
if err != nil {
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message)
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return
}
event := recorder.makeEvent(ref, reason, message)
if !validateEventType(eventtype) {
glog.Errorf("Unsupported event type: '%v'", eventtype)
return
}
event := recorder.makeEvent(ref, eventtype, reason, message)
event.Source = recorder.source
go func() {
@ -256,19 +262,27 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unv
}()
}
func (recorder *recorderImpl) Event(object runtime.Object, reason, message string) {
recorder.generateEvent(object, unversioned.Now(), reason, message)
func validateEventType(eventtype string) bool {
switch eventtype {
case api.EventTypeNormal, api.EventTypeWarning:
return true
}
return false
}
func (recorder *recorderImpl) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
recorder.Event(object, reason, fmt.Sprintf(messageFmt, args...))
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message)
}
func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, timestamp, reason, fmt.Sprintf(messageFmt, args...))
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, message string) *api.Event {
func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event {
t := unversioned.Time{recorder.clock.Now()}
namespace := ref.Namespace
if namespace == "" {
@ -285,5 +299,6 @@ func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, messag
FirstTimestamp: t,
LastTimestamp: t,
Count: 1,
Type: eventtype,
}
}

View File

@ -127,6 +127,7 @@ func TestEventf(t *testing.T) {
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
@ -136,6 +137,7 @@ func TestEventf(t *testing.T) {
}{
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -156,12 +158,14 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: api.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
@ -181,12 +185,14 @@ func TestEventf(t *testing.T) {
Message: "some other verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): reason: 'Killed' some other verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -207,12 +213,14 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 2,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -233,12 +241,14 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -259,12 +269,14 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 3,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -285,12 +297,14 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Stopped' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -311,8 +325,9 @@ func TestEventf(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 2,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Stopped' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
@ -344,7 +359,7 @@ func TestEventf(t *testing.T) {
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
@ -434,10 +449,10 @@ func TestWriteEventError(t *testing.T) {
recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
for caseName := range table {
clock.Step(1 * time.Second)
recorder.Event(ref, "Reason", caseName)
recorder.Event(ref, api.EventTypeNormal, "Reason", caseName)
runtime.Gosched()
}
recorder.Event(ref, "Reason", "finished")
recorder.Event(ref, api.EventTypeNormal, "Reason", "finished")
<-done
for caseName, item := range table {
@ -484,7 +499,7 @@ func TestLotsOfEvents(t *testing.T) {
}
for i := 0; i < maxQueuedEvents; i++ {
// we need to vary the reason to prevent aggregation
go recorder.Eventf(ref, "Reason-"+string(i), strconv.Itoa(i))
go recorder.Eventf(ref, api.EventTypeNormal, "Reason-"+string(i), strconv.Itoa(i))
}
// Make sure no events were dropped by either of the listeners.
for i := 0; i < maxQueuedEvents; i++ {
@ -515,6 +530,7 @@ func TestEventfNoNamespace(t *testing.T) {
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
@ -524,6 +540,7 @@ func TestEventfNoNamespace(t *testing.T) {
}{
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -544,8 +561,9 @@ func TestEventfNoNamespace(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
}
@ -578,7 +596,7 @@ func TestEventfNoNamespace(t *testing.T) {
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
@ -621,6 +639,7 @@ func TestMultiSinkCache(t *testing.T) {
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
@ -630,6 +649,7 @@ func TestMultiSinkCache(t *testing.T) {
}{
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -650,12 +670,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: api.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
@ -675,12 +697,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some other verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): reason: 'Killed' some other verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -701,12 +725,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 2,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -727,12 +753,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: api.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -753,12 +781,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 3,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): reason: 'Started' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -779,12 +809,14 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 1,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Stopped' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: api.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@ -805,8 +837,9 @@ func TestMultiSinkCache(t *testing.T) {
Message: "some verbose message: 1",
Source: api.EventSource{Component: "eventTest"},
Count: 2,
Type: api.EventTypeNormal,
},
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): reason: 'Stopped' some verbose message: 1`,
expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
@ -844,7 +877,7 @@ func TestMultiSinkCache(t *testing.T) {
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {
@ -860,7 +893,7 @@ func TestMultiSinkCache(t *testing.T) {
sinkWatcher2 := eventBroadcaster.StartRecordingToSink(&testEvents2)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {

View File

@ -51,6 +51,7 @@ func getEventKey(event *api.Event) string {
event.InvolvedObject.Name,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
event.Type,
event.Reason,
event.Message,
},
@ -71,7 +72,7 @@ func DefaultEventFilterFunc(event *api.Event) bool {
// localKey - key that makes this event in the local group
type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string)
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, and event.Reason
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason
func EventAggregatorByReasonFunc(event *api.Event) (string, string) {
return strings.Join([]string{
event.Source.Component,
@ -81,6 +82,7 @@ func EventAggregatorByReasonFunc(event *api.Event) (string, string) {
event.InvolvedObject.Name,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
event.Type,
event.Reason,
},
""), event.Message
@ -179,6 +181,7 @@ func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error
InvolvedObject: newEvent.InvolvedObject,
LastTimestamp: now,
Message: e.messageFunc(newEvent),
Type: newEvent.Type,
Reason: newEvent.Reason,
Source: newEvent.Source,
}

View File

@ -50,6 +50,7 @@ func makeEvent(reason, message string, involvedObject api.ObjectReference) api.E
Count: 1,
FirstTimestamp: eventTime,
LastTimestamp: eventTime,
Type: api.EventTypeNormal,
}
return event
}

View File

@ -28,13 +28,13 @@ type FakeRecorder struct {
Events []string
}
func (f *FakeRecorder) Event(object runtime.Object, reason, message string) {
f.Events = append(f.Events, fmt.Sprintf("%s %s", reason, message))
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
f.Events = append(f.Events, fmt.Sprintf("%s %s %s", eventtype, reason, message))
}
func (f *FakeRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
f.Events = append(f.Events, fmt.Sprintf(reason+" "+messageFmt, args...))
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
f.Events = append(f.Events, fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...))
}
func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) {
func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
}

View File

@ -74,6 +74,7 @@ func TestEventCreate(t *testing.T) {
FirstTimestamp: timeStamp,
LastTimestamp: timeStamp,
Count: 1,
Type: api.EventTypeNormal,
}
c := &testClient{
Request: testRequest{
@ -113,6 +114,7 @@ func TestEventGet(t *testing.T) {
FirstTimestamp: timeStamp,
LastTimestamp: timeStamp,
Count: 1,
Type: api.EventTypeNormal,
}
c := &testClient{
Request: testRequest{
@ -152,6 +154,7 @@ func TestEventList(t *testing.T) {
FirstTimestamp: timeStamp,
LastTimestamp: timeStamp,
Count: 1,
Type: api.EventTypeNormal,
},
},
}

View File

@ -308,11 +308,11 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
return fmt.Errorf("unable to create pods, no labels")
}
if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, "FailedCreate", "Error creating: %v", err)
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
return fmt.Errorf("unable to create pods: %v", err)
} else {
glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name)
r.Recorder.Eventf(object, "SuccessfulCreate", "Created pod: %v", newPod.Name)
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name)
}
return nil
}

View File

@ -261,7 +261,7 @@ func (d *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationControll
}
newRC, err := d.scaleRC(rc, newScale)
if err == nil {
d.eventRecorder.Eventf(&deployment, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale)
d.eventRecorder.Eventf(&deployment, api.EventTypeNormal, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale)
}
return newRC, err
}

View File

@ -247,7 +247,7 @@ func (nc *NodeController) Run(period time.Duration) {
if completed {
glog.Infof("All pods terminated on %s", value.Value)
nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
nc.recordNodeEvent(value.Value, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0
}
@ -354,7 +354,7 @@ func (nc *NodeController) monitorNodeStatus() error {
for _, node := range nodes.Items {
if !nc.knownNodeSet.Has(node.Name) {
glog.V(1).Infof("NodeController observed a new Node: %#v", node)
nc.recordNodeEvent(node.Name, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name))
nc.recordNodeEvent(node.Name, api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name))
nc.cancelPodEviction(node.Name)
nc.knownNodeSet.Insert(node.Name)
}
@ -369,7 +369,7 @@ func (nc *NodeController) monitorNodeStatus() error {
deleted := nc.knownNodeSet.Difference(observedSet)
for nodeName := range deleted {
glog.V(1).Infof("NodeController observed a Node deletion: %v", nodeName)
nc.recordNodeEvent(nodeName, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName))
nc.recordNodeEvent(nodeName, api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName))
nc.evictPods(nodeName)
nc.knownNodeSet.Delete(nodeName)
}
@ -440,7 +440,7 @@ func (nc *NodeController) monitorNodeStatus() error {
}
if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound {
glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
nc.recordNodeEvent(node.Name, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
nc.recordNodeEvent(node.Name, api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
remaining, err := nc.hasPods(node.Name)
if err != nil {
@ -494,7 +494,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
}
}
func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event string) {
func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) {
ref := &api.ObjectReference{
Kind: "Node",
Name: nodeName,
@ -502,7 +502,7 @@ func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
nc.recorder.Eventf(ref, reason, "Node %s event: %s", nodeName, event)
nc.recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) {
@ -515,7 +515,7 @@ func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status stri
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
nc.recorder.Eventf(ref, new_status, "Node %s status is now: %s", node.Name, new_status)
nc.recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
@ -723,7 +723,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
}
if len(pods.Items) > 0 {
nc.recordNodeEvent(nodeName, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
nc.recordNodeEvent(nodeName, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for _, pod := range pods.Items {
@ -737,7 +737,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
}
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
@ -784,7 +784,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
if remaining < 0 {
remaining = 0
glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace)
nc.recordNodeEvent(nodeName, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName))
nc.recordNodeEvent(nodeName, api.EventTypeNormal, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName))
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err)
complete = false

View File

@ -80,7 +80,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
if err != nil {
a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error())
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
return 0, nil, fmt.Errorf("failed to get cpu utilization: %v", err)
}
@ -97,14 +97,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
scale, err := a.client.Extensions().Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
if err != nil {
a.eventRecorder.Event(&hpa, "FailedGetScale", err.Error())
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
}
currentReplicas := scale.Status.Replicas
desiredReplicas, currentUtilization, err := a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil {
a.eventRecorder.Event(&hpa, "FailedComputeReplicas", err.Error())
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
}
@ -145,10 +145,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
scale.Spec.Replicas = desiredReplicas
_, err = a.client.Extensions().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(&hpa, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas)
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
hpa.Name, currentReplicas, desiredReplicas)
} else {
@ -168,7 +168,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
_, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
if err != nil {
a.eventRecorder.Event(&hpa, "FailedUpdateStatus", err.Error())
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
}
return nil

View File

@ -245,7 +245,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
message += " (will not retry): "
}
message += err.Error()
s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message)
s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)
return err, retry
}
// Always update the cache upon success.
@ -255,14 +255,14 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
cachedService.appliedState = service
s.cache.set(namespacedName.String(), cachedService)
case cache.Deleted:
s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
if err != nil {
message := "Error deleting load balancer (will retry): " + err.Error()
s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message)
s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
return err, retryable
}
s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
s.cache.delete(namespacedName.String())
default:
glog.Errorf("Unexpected delta type: %v", delta.Type)
@ -305,11 +305,11 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
if needDelete {
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", namespacedName)
s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil {
return err, retryable
}
s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
}
service.Status.LoadBalancer = api.LoadBalancerStatus{}
@ -319,12 +319,12 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
// The load balancer doesn't exist yet, so create it.
s.eventRecorder.Event(service, "CreatingLoadBalancer", "Creating load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer")
err := s.createLoadBalancer(service)
if err != nil {
return fmt.Errorf("Failed to create load balancer for service %s: %v", namespacedName, err), retryable
}
s.eventRecorder.Event(service, "CreatedLoadBalancer", "Created load balancer")
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer")
}
// Write the state if changed
@ -686,7 +686,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
name := cloudprovider.GetLoadBalancerName(service)
err := s.balancer.UpdateTCPLoadBalancer(name, s.zone.Region, hosts)
if err == nil {
s.eventRecorder.Event(service, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
return nil
}
@ -697,7 +697,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
return nil
}
s.eventRecorder.Eventf(service, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
s.eventRecorder.Eventf(service, api.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
return err
}

View File

@ -1512,15 +1512,16 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
return
}
sort.Sort(SortableEvents(el.Items))
fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n")
fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n")
fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tType\tReason\tMessage\n")
fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t────────\t──────\t───────\n")
for _, e := range el.Items {
fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n",
fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n",
translateTimestamp(e.FirstTimestamp),
translateTimestamp(e.LastTimestamp),
e.Count,
e.Source,
e.InvolvedObject.FieldPath,
e.Type,
e.Reason,
e.Message)
}

View File

@ -85,6 +85,7 @@ func TestPodDescribeResultsSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "scheduler"},
@ -92,6 +93,7 @@ func TestPodDescribeResultsSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "kubelet"},
@ -99,6 +101,7 @@ func TestPodDescribeResultsSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
},
})

View File

@ -394,7 +394,7 @@ var ingressColumns = []string{"NAME", "RULE", "BACKEND", "ADDRESS"}
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"}
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"}
var limitRangeColumns = []string{"NAME", "AGE"}
var resourceQuotaColumns = []string{"NAME", "AGE"}
var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
@ -1203,13 +1203,14 @@ func printEvent(event *api.Event, w io.Writer, withNamespace bool, wide bool, sh
}
}
if _, err := fmt.Fprintf(
w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s",
w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s",
translateTimestamp(event.FirstTimestamp),
translateTimestamp(event.LastTimestamp),
event.Count,
event.InvolvedObject.Name,
event.InvolvedObject.Kind,
event.InvolvedObject.FieldPath,
event.Type,
event.Reason,
event.Source,
event.Message,

View File

@ -507,6 +507,7 @@ func TestPrintEventsResultSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "scheduler"},
@ -514,6 +515,7 @@ func TestPrintEventsResultSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "kubelet"},
@ -521,6 +523,7 @@ func TestPrintEventsResultSorted(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
},
}
@ -902,6 +905,7 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
isNamespaced: true,
},

View File

@ -60,6 +60,7 @@ func TestSortableEvents(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "scheduler"},
@ -67,6 +68,7 @@ func TestSortableEvents(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "kubelet"},
@ -74,6 +76,7 @@ func TestSortableEvents(t *testing.T) {
FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
})

View File

@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
name := bestPodIdentString(pod)
err := utilerrors.NewAggregate(errlist)
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue
}
filtered = append(filtered, pod)

View File

@ -132,21 +132,21 @@ func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*
return nil, false
}
func (irecorder *innerEventRecorder) Event(object runtime.Object, reason, message string) {
func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Event(ref, reason, message)
irecorder.recorder.Event(ref, eventtype, reason, message)
}
}
func (irecorder *innerEventRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Eventf(ref, reason, messageFmt, args...)
irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...)
}
}
func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) {
func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.PastEventf(ref, timestamp, reason, messageFmt, args...)
irecorder.recorder.PastEventf(ref, timestamp, eventtype, reason, messageFmt, args...)
}
}

View File

@ -63,9 +63,9 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool {
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *imagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) {
func (puller *imagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
puller.recorder.Event(ref, event, msg)
puller.recorder.Event(ref, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
@ -83,18 +83,18 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
present, err := puller.runtime.IsImagePresent(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg
}
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
puller.logIt(ref, "Pulled", logPrefix, msg, glog.Info)
puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, msg, glog.Info)
return nil, ""
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg
}
}
@ -102,12 +102,12 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg
}
puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
puller.logIt(ref, api.EventTypeNormal, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
if err := puller.runtime.PullImage(spec, pullSecrets); err != nil {
puller.logIt(ref, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.logIt(ref, api.EventTypeWarning, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
@ -116,7 +116,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
return ErrImagePull, err.Error()
}
}
puller.logIt(ref, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.backOff.GC()
return nil, ""
}

View File

@ -64,9 +64,9 @@ func NewSerializedImagePuller(recorder record.EventRecorder, runtime Runtime, im
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) {
func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
puller.recorder.Event(ref, event, msg)
puller.recorder.Event(ref, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
@ -84,18 +84,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
present, err := puller.runtime.IsImagePresent(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg
}
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
puller.logIt(ref, PulledImage, logPrefix, msg, glog.Info)
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info)
return nil, ""
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg
}
}
@ -103,7 +103,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg
}
@ -118,7 +118,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
returnChan: returnChan,
}
if err = <-returnChan; err != nil {
puller.logIt(ref, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
@ -127,14 +127,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
return ErrImagePull, err.Error()
}
}
puller.logIt(ref, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.backOff.GC()
return nil, ""
}
func (puller *serializedImagePuller) pullImages() {
for pullRequest := range puller.pullRequests {
puller.logIt(pullRequest.ref, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
}
}

View File

@ -757,11 +757,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
if err != nil {
dm.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
return kubecontainer.ContainerID{}, err
}
dm.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
@ -817,11 +817,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyHostConfig(pod, container, hc)
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
dm.recorder.Eventf(ref, kubecontainer.FailedToStartContainer,
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer,
"Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
return kubecontainer.ContainerID{}, err
}
dm.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
}
@ -1437,7 +1437,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
if reason != "" {
message = fmt.Sprint(message, ": ", reason)
}
dm.recorder.Event(ref, kubecontainer.KillingContainer, message)
dm.recorder.Event(ref, api.EventTypeNormal, kubecontainer.KillingContainer, message)
dm.containerRefManager.ClearRef(containerID)
}
return err
@ -1817,7 +1817,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
if err != nil {
glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err)
}
dm.recorder.Eventf(ref, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
}
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
@ -2049,7 +2049,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
stableName, _ := BuildDockerName(dockerName, container)
if backOff.IsInBackOffSince(stableName, ts.Time) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
dm.recorder.Eventf(ref, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod))
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)

View File

@ -195,7 +195,7 @@ func (im *realImageManager) GarbageCollect() error {
// Check valid capacity.
if capacity == 0 {
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
im.recorder.Eventf(im.nodeRef, container.InvalidDiskCapacity, err.Error())
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.InvalidDiskCapacity, err.Error())
return err
}
@ -211,7 +211,7 @@ func (im *realImageManager) GarbageCollect() error {
if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, container.FreeDiskSpaceFailed, err.Error())
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.FreeDiskSpaceFailed, err.Error())
return err
}
}

View File

@ -882,7 +882,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
glog.Warning("No api server defined - no node status update will be sent.")
}
if err := kl.initializeModules(); err != nil {
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, err.Error())
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.KubeletSetupFailed, err.Error())
glog.Error(err)
kl.runtimeState.setInitError(err)
}
@ -1635,7 +1635,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
// Mount volumes.
podVolumes, err := kl.mountExternalVolumes(pod)
if err != nil {
kl.recorder.Eventf(ref, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
return err
}
@ -1700,7 +1700,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
}
if egress != nil || ingress != nil {
if podUsesHostNetwork(pod) {
kl.recorder.Event(pod, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
} else if kl.shaper != nil {
status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
@ -1715,7 +1715,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
}
} else {
kl.recorder.Event(pod, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
}
}
@ -2205,7 +2205,7 @@ func (kl *Kubelet) matchesNodeSelector(pod *api.Pod) bool {
}
func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
kl.recorder.Eventf(pod, reason, message)
kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(pod, api.PodStatus{
Phase: api.PodFailed,
Reason: reason,
@ -2606,11 +2606,11 @@ func (kl *Kubelet) updateNodeStatus() error {
return fmt.Errorf("update node status exceeds retry count")
}
func (kl *Kubelet) recordNodeStatusEvent(event string) {
func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) {
glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.nodeName, event)
kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event)
}
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
@ -2721,7 +2721,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, kubecontainer.NodeRebooted,
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.NodeRebooted,
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
}
node.Status.NodeInfo.BootID = info.BootID
@ -2783,9 +2783,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
}
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
if newNodeReadyCondition.Status == api.ConditionTrue {
kl.recordNodeStatusEvent(kubecontainer.NodeReady)
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeReady)
} else {
kl.recordNodeStatusEvent(kubecontainer.NodeNotReady)
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotReady)
}
}
@ -2827,7 +2827,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
nodeOODCondition.Reason = "KubeletOutOfDisk"
nodeOODCondition.Message = "out of disk space"
nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent("NodeOutOfDisk")
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk")
}
} else {
if nodeOODCondition.Status != api.ConditionFalse {
@ -2835,7 +2835,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
nodeOODCondition.Reason = "KubeletHasSufficientDisk"
nodeOODCondition.Message = "kubelet has sufficient disk space available"
nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent("NodeHasSufficientDisk")
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk")
}
}
@ -2845,9 +2845,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeStatusEvent(kubecontainer.NodeNotSchedulable)
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotSchedulable)
} else {
kl.recordNodeStatusEvent(kubecontainer.NodeSchedulable)
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeSchedulable)
}
oldNodeUnschedulable = node.Spec.Unschedulable
}
@ -3032,7 +3032,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
// TODO: Consider include the container information.
if kl.pastActiveDeadline(pod) {
reason := "DeadlineExceeded"
kl.recorder.Eventf(pod, reason, "Pod was active on the node longer than specified deadline")
kl.recorder.Eventf(pod, api.EventTypeNormal, reason, "Pod was active on the node longer than specified deadline")
return api.PodStatus{
Phase: api.PodFailed,
Reason: reason,
@ -3157,7 +3157,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
// BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, kubecontainer.StartingKubelet, "Starting kubelet.")
kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, kubecontainer.StartingKubelet, "Starting kubelet.")
}
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {

View File

@ -64,7 +64,7 @@ func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error {
for event := range eventChannel.GetChannel() {
glog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, systemOOMEvent, "System OOM encountered")
ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered")
}
glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
}()

View File

@ -123,7 +123,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
minRuntimeCacheTime = time.Now()
if err != nil {
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
p.recorder.Eventf(newWork.pod, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
p.recorder.Eventf(newWork.pod, api.EventTypeWarning, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
return err
}
newWork.updateCompleteFn()

View File

@ -96,12 +96,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus,
if err != nil {
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
if hasRef {
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
}
} else { // result != probe.Success
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
if hasRef {
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
}
}
return results.Failure, err

View File

@ -676,13 +676,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
uuid := util.ShortenString(id.uuid, 8)
switch reason {
case "Created":
r.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
case "Started":
r.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
case "Failed":
r.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
case "Killing":
r.recorder.Eventf(ref, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
default:
glog.Errorf("rkt: Unexpected event %q", reason)
}
@ -707,7 +707,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
continue
}
if prepareErr != nil {
r.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
continue
}
containerID := runtimePod.Containers[i].ID

View File

@ -125,7 +125,7 @@ func (s *Scheduler) scheduleOne() {
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
if err != nil {
glog.V(1).Infof("Failed to schedule: %+v", pod)
s.config.Recorder.Eventf(pod, "FailedScheduling", "%v", err)
s.config.Recorder.Eventf(pod, api.EventTypeWarning, "FailedScheduling", "%v", err)
s.config.Error(pod, err)
return
}
@ -145,11 +145,11 @@ func (s *Scheduler) scheduleOne() {
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
if err != nil {
glog.V(1).Infof("Failed to bind pod: %+v", err)
s.config.Recorder.Eventf(pod, "FailedScheduling", "Binding rejected: %v", err)
s.config.Recorder.Eventf(pod, api.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err)
s.config.Error(pod, err)
return
}
s.config.Recorder.Eventf(pod, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest)
s.config.Recorder.Eventf(pod, api.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest)
// tell the model to assume that this binding took effect.
assumed := *pod
assumed.Spec.NodeName = dest

View File

@ -1665,8 +1665,8 @@ func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
Logf("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
Logf("source %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
Logf("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)