深入分析kubelet(7)—— 选取GPU挂载
深入浅出kubernetes之device-plugins主要分析device-plugin资源上报部分,本来着重分析下分配过程。
device-plugin
kubelet过于复杂,所以通过device-plugin反推
interface
kubernetes\pkg\kubelet\apis\deviceplugin\v1beta1\api.pb.go
type DevicePluginServer interface {
GetDevicePluginOptions(context.Context, *Empty) (*DevicePluginOptions, error)
ListAndWatch(*Empty, DevicePlugin_ListAndWatchServer) error
Allocate(context.Context, *AllocateRequest) (*AllocateResponse, error)
PreStartContainer(context.Context, *PreStartContainerRequest) (*PreStartContainerResponse, error)
}
最重要的是ListAndWatch()/Allocate()
,因为另外两个方法直接返回结果,没有任何逻辑
ListAndWatch
k8s-device-plugin\server.go
func (m *NvidiaDevicePlugin) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error {
s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs})
for {
select {
case <-m.stop:
return nil
case d := <-m.health:
d.Health = pluginapi.Unhealthy
s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs})
}
}
}
老朋友了,list所有设备,并长连接http-steaming将变化发到客户端。
// E.g:
// struct Device {
// ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e",
// State: "Healthy",
// }
type Device struct {
ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
Health string `protobuf:"bytes,2,opt,name=health,proto3" json:"health,omitempty"`
}
目前设备信息只有设备号和健康状态,没办法扩展,所以也就不知道GPU拓扑=。=,所以说目前也就支持GPU数量。
Allocate
func (m *NvidiaDevicePlugin) Allocate(ctx context.Context, reqs *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) {
devs := m.devs
responses := pluginapi.AllocateResponse{}
for _, req := range reqs.ContainerRequests {
response := pluginapi.ContainerAllocateResponse{
Envs: map[string]string{
"NVIDIA_VISIBLE_DEVICES": strings.Join(req.DevicesIDs, ","),
},
}
for _, id := range req.DevicesIDs {
if !deviceExists(devs, id) {
return nil, fmt.Errorf("invalid allocation request: unknown device: %s", id)
}
}
responses.ContainerResponses = append(responses.ContainerResponses, &response)
}
return &responses, nil
}
Allocate做了两件事情,返回NVIDIA_VISIBLE_DEVICES
环境变量,以及检查设备是否存在。
Note:
- 这里其实就已经告诉了我们分配逻辑,即kubelet根据
limit
选择挂载具体的GPU卡,然后将设备号发送给device-plugin,得到env; - 以后想在调度器里面根据GPU拓扑选择GPU卡,是很难实现的,并且调度器本身逻辑只创建bind,赋值node name,要想再把设备号加进去比较困难。
kubelet
从上面我们可以知道最重要的就是Allocate方法,所以我们首先去找kubelet中Allocate方法的调用。
kubernetes\pkg\kubelet\cm\devicemanager\endpoint.go
type endpoint interface {
run()
stop()
allocate(devs []string) (*pluginapi.AllocateResponse, error)
preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error)
callback(resourceName string, devices []pluginapi.Device)
isStopped() bool
stopGracePeriodExpired() bool
}
其中最重要的就是run和allocate,分别会调用device-plugin的ListAndWatch()和Allocate()。
run
func (e *endpointImpl) run() {
stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{})
for {
response, err := stream.Recv()
devs := response.Devices
var newDevs []pluginapi.Device
for _, d := range devs {
newDevs = append(newDevs, *d)
}
e.callback(e.resourceName, newDevs)
}
}
调用ListAndWatch,再调用callback处理设备
kubernetes\pkg\kubelet\cm\devicemanager\manager.go
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) {
m.mutex.Lock()
m.healthyDevices[resourceName] = sets.NewString()
m.unhealthyDevices[resourceName] = sets.NewString()
for _, dev := range devices {
if dev.Health == pluginapi.Healthy {
m.healthyDevices[resourceName].Insert(dev.ID)
} else {
m.unhealthyDevices[resourceName].Insert(dev.ID)
}
}
m.mutex.Unlock()
m.writeCheckpoint()
}
这里就看到在kubelet.ContainerManager.deviceManager
中保存了设备ID,数据结构是map[string]sets.String
allocate
kubernetes\pkg\kubelet\cm\devicemanager\endpoint.go
func (e *endpointImpl) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
return e.client.Allocate(context.Background(), &pluginapi.AllocateRequest{
ContainerRequests: []*pluginapi.ContainerAllocateRequest{
{DevicesIDs: devs},
},
})
}
这里就直接发了gRPC请求,看下函数调用处是怎么选择设备ID的。
kubernetes\pkg\kubelet\cm\devicemanager\manager.go
func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error {
podUID := string(pod.UID)
contName := container.Name
allocatedDevicesUpdated := false
for k, v := range container.Resources.Limits {
resource := string(k)
needed := int(v.Value())
allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource])
startRPCTime := time.Now()
m.mutex.Lock()
e, ok := m.endpoints[resource]
m.mutex.Unlock()
devs := allocDevices.UnsortedList()
resp, err := e.allocate(devs)
// Update internal cached podDevices state.
m.mutex.Lock()
m.podDevices.insert(podUID, contName, resource, allocDevices, resp.ContainerResponses[0])
m.mutex.Unlock()
}
// Checkpoints device to container allocation information.
return m.writeCheckpoint()
}
- 通过devicesToAllocate方法获得分配的设备ID
- 调用allocate方法,获取响应env
- 更新devicemanager.podDevices数据
func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
needed := required
devices = sets.NewString()
devicesInUse := m.allocatedDevices[resource]
available := m.healthyDevices[resource].Difference(devicesInUse)
allocated := available.UnsortedList()[:needed]
for _, device := range allocated {
m.allocatedDevices[resource].Insert(device)
devices.Insert(device)
}
return devices, nil
}
分配资源逻辑
- 获取容器已分配资源
- 从cache中获取已使用的设备
- 比较全部设备与已用设备,得到可用设备
- 随机从可用设备选出设备ID
- 更新已用设备cache
- 返回取得的设备ID
这里就一切真相大白了,kubelet是随机去GPU挂载的。
保存资源分配情况
kubernetes\pkg\kubelet\cm\devicemanager\pod_devices.go
func (pdev podDevices) insert(podUID, contName, resource string, devices sets.String, resp *pluginapi.ContainerAllocateResponse) {
if _, podExists := pdev[podUID]; !podExists {
pdev[podUID] = make(containerDevices)
}
if _, contExists := pdev[podUID][contName]; !contExists {
pdev[podUID][contName] = make(resourceAllocateInfo)
}
pdev[podUID][contName][resource] = deviceAllocateInfo{
deviceIds: devices,
allocResp: resp,
}
}
这里就保存了每个Pod下每个contrainer的每种资源的使用情况。
// Returns combined container runtime settings to consume the container's allocated devices.
func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions {}
deviceRunContainerOptions
方法返回了创建容器所需的设备信息配置参数。
ps. 一般来说信息不会存两份,所以资源分配情况应该只存在于devicemanager中;只有在需要的时候,返回对应的配置文件就好。