From ad743a922aae18baba9a1907d4d1a7b37bf781af Mon Sep 17 00:00:00 2001 From: Vishnu kannan Date: Sun, 12 Mar 2017 11:26:27 -0700 Subject: [PATCH] remove dead code in gpu manager Signed-off-by: Vishnu kannan --- pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go | 15 ++++----------- pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go | 9 +++------ 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go index 8f4b5c6437..d83c83d386 100644 --- a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go +++ b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go @@ -102,10 +102,7 @@ func (ngm *nvidiaGPUManager) Start() error { return err } // It's possible that the runtime isn't available now. - allocatedGPUs, err := ngm.gpusInUse() - if err == nil { - ngm.allocated = allocatedGPUs - } + ngm.allocated = ngm.gpusInUse() // We ignore errors when identifying allocated GPUs because it is possible that the runtime interfaces may be not be logically up. return nil } @@ -141,11 +138,7 @@ func (ngm *nvidiaGPUManager) AllocateGPU(pod *v1.Pod, container *v1.Container) ( defer ngm.Unlock() if ngm.allocated == nil { // Initialization is not complete. Try now. Failures can no longer be tolerated. - allocated, err := ngm.gpusInUse() - if err != nil { - return nil, fmt.Errorf("Failed to allocate GPUs because of issues identifying GPUs in use: %v", err) - } - ngm.allocated = allocated + ngm.allocated = ngm.gpusInUse() } else { // update internal list of GPUs in use prior to allocating new GPUs. ngm.updateAllocatedGPUs() @@ -217,7 +210,7 @@ func (ngm *nvidiaGPUManager) discoverGPUs() error { } // gpusInUse returns a list of GPUs in use along with the respective pods that are using it. -func (ngm *nvidiaGPUManager) gpusInUse() (*podGPUs, error) { +func (ngm *nvidiaGPUManager) gpusInUse() *podGPUs { pods := ngm.activePodsLister.GetActivePods() type containerIdentifier struct { id string @@ -274,7 +267,7 @@ func (ngm *nvidiaGPUManager) gpusInUse() (*podGPUs, error) { } } } - return ret, nil + return ret } func isValidPath(path string) bool { diff --git a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go index db80f03cf1..6c89383994 100644 --- a/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go +++ b/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager_test.go @@ -70,9 +70,8 @@ func TestMultiContainerPodGPUAllocation(t *testing.T) { } // Expect that no devices are in use. - gpusInUse, err := testGpuManager.gpusInUse() + gpusInUse := testGpuManager.gpusInUse() as := assert.New(t) - as.Nil(err) as.Equal(len(gpusInUse.devices()), 0) // Allocated GPUs for a pod with two containers. @@ -121,9 +120,8 @@ func TestMultiPodGPUAllocation(t *testing.T) { } // Expect that no devices are in use. - gpusInUse, err := testGpuManager.gpusInUse() + gpusInUse := testGpuManager.gpusInUse() as := assert.New(t) - as.Nil(err) as.Equal(len(gpusInUse.devices()), 0) // Allocated GPUs for a pod with two containers. @@ -155,9 +153,8 @@ func TestPodContainerRestart(t *testing.T) { } // Expect that no devices are in use. - gpusInUse, err := testGpuManager.gpusInUse() + gpusInUse := testGpuManager.gpusInUse() as := assert.New(t) - as.Nil(err) as.Equal(len(gpusInUse.devices()), 0) // Make a pod with one containers that requests two GPUs.