Merge pull request #31659 from timstclair/aa-nodestatus

Automatic merge from submit-queue

Append "AppArmor enabled" to the Node ready condition message

As discussed, add a "AppArmor enabled" message to the node ready condition message. This is a temporary solution to surfacing the AppArmor status until node feature reporting is enabled.

Example:
```
$ kubectl get nodes e2e-test-stclair-minion-group-lmvk -o yaml
...
  - lastHeartbeatTime: 2016-08-30T00:52:11Z
    lastTransitionTime: 2016-08-30T00:43:28Z
    message: kubelet is posting ready status. AppArmor enabled
    reason: KubeletReady
    status: "True"
    type: Ready
...
```

---

1.4 justification:

- Risk: Low. This is a small change to append a human readable message.
- Rollback: Nothing depends on this functionality.
- Cost: Not knowing whether AppArmor is actually supported by a node. Although pods should be rejected if it's not enabled, we can't do anything for older (< v1.4) nodes. This positive affirmation provides confirmation that AppArmor is enabled for the current version.
pull/6/head
Kubernetes Submit Queue 2016-08-31 12:35:00 -07:00 committed by GitHub
commit e693a61991
4 changed files with 21 additions and 4 deletions

View File

@ -77,6 +77,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/bandwidth"
"k8s.io/kubernetes/pkg/util/clock"
@ -736,7 +737,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
klet.AddPodSyncLoopHandler(activeDeadlineHandler)
klet.AddPodSyncHandler(activeDeadlineHandler)
klet.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(kubeCfg.ContainerRuntime))
klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime)
klet.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
// apply functional Option's
for _, opt := range kubeDeps.Options {
@ -1041,6 +1043,9 @@ type Kubelet struct {
// The bit of the fwmark space to mark packets for dropping.
iptablesDropBit int
// The AppArmor validator for checking whether AppArmor is supported.
appArmorValidator apparmor.Validator
}
// setupDataDirs creates:

View File

@ -489,6 +489,13 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
}
}
// Append AppArmor status if it's enabled.
// TODO(timstclair): This is a temporary message until node feature reporting is added.
if newNodeReadyCondition.Status == api.ConditionTrue &&
kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil {
newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message)
}
// Record any soft requirements that were not met in the container manager.
status := kl.containerManager.Status()
if status.SoftRequirements != nil {

View File

@ -144,9 +144,9 @@ func getHttpRespBody(resp *http.Response) string {
return ""
}
func NewAppArmorAdmitHandler(runtime string) PodAdmitHandler {
func NewAppArmorAdmitHandler(validator apparmor.Validator) PodAdmitHandler {
return &appArmorAdmitHandler{
Validator: apparmor.NewValidator(runtime),
Validator: validator,
}
}

View File

@ -37,6 +37,7 @@ var isDisabledBuild bool
// Interface for validating that a pod with with an AppArmor profile can be run by a Node.
type Validator interface {
Validate(pod *api.Pod) error
ValidateHost() error
}
func NewValidator(runtime string) Validator {
@ -64,7 +65,7 @@ func (v *validator) Validate(pod *api.Pod) error {
return nil
}
if v.validateHostErr != nil {
if v.ValidateHost() != nil {
return v.validateHostErr
}
@ -87,6 +88,10 @@ func (v *validator) Validate(pod *api.Pod) error {
return nil
}
func (v *validator) ValidateHost() error {
return v.validateHostErr
}
// Verify that the host and runtime is capable of enforcing AppArmor profiles.
func validateHost(runtime string) error {
// Check feature-gates