daemonset controller是kube-controller-manager元件中衆多控制器中的一個,是 daemonset 資源對象的控制器,其通過對daemonset、pod、node、ControllerRevision四種資源的監聽,當這四種資源發生變化時會觸發 daemonset controller 對相應的daemonset資源進行調諧操作,進而完成daemonset在合适node上pod的建立、在不合适node上pod的删除、daemonset的滾動更新、daemonset狀态status更新、舊版本daemonset清理等操作。
daemonset controller分析
daemonset controller簡介
daemonset controller架構圖
daemonset controller的大緻組成和處理流程如下圖,daemonset controller對daemonset、pod、node、ControllerRevision對象注冊了event handler,當有事件時,會watch到然後将對應的daemonset對象放入到queue中,然後
syncDaemonset
方法為daemonset controller調諧daemonset對象的核心處理邏輯所在,從queue中取出daemonset對象,做調諧處理。
daemonset更新政策
(1)OnDelete:使用 OnDelete 更新政策時,在更新 DaemonSet pod模闆後,隻有當你手動删除老的 DaemonSet pods 之後,新的 DaemonSet Pod 才會被自動建立。
(2)RollingUpdate:預設的更新政策。使用 RollingUpdate 更新政策時,在更新 DaemonSet pod模闆後, 老的 DaemonSet pods 将被删除,并且将根據滾動更新配置自動建立新的 DaemonSet pods。 滾動更新期間,最多隻能有 DaemonSet 的一個 Pod 運作于每個節點上。
daemonset controller分析将分為兩大塊進行,分别是:
(1)daemonset controller初始化與啟動分析;
(2)daemonset controller處理邏輯分析。
1.daemonset controller初始化與啟動分析
基于tag v1.17.4
https://github.com/kubernetes/kubernetes/releases/tag/v1.17.4
直接看到startDaemonSetController函數,作為daemonset controller初始化與啟動分析的入口。
startDaemonSetController
startDaemonSetController主要邏輯:
(1)調用daemon.NewDaemonSetsController建立并初始化DaemonSetsController;
(2)拉起一個goroutine,跑DaemonSetsController的Run方法。
// cmd/kube-controller-manager/app/apps.go
func startDaemonSetController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}] {
return nil, false, nil
}
dsc, err := daemon.NewDaemonSetsController(
ctx.InformerFactory.Apps().V1().DaemonSets(),
ctx.InformerFactory.Apps().V1().ControllerRevisions(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Core().V1().Nodes(),
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
flowcontrol.NewBackOff(1*time.Second, 15*time.Minute),
)
if err != nil {
return nil, true, fmt.Errorf("error creating DaemonSets controller: %v", err)
}
go dsc.Run(int(ctx.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs), ctx.Stop)
return nil, true, nil
}
1.1 daemon.NewDaemonSetsController
從
daemon.NewDaemonSetsController
函數代碼中可以看到,daemonset controller注冊了daemonset、node、pod與ControllerRevisions對象的EventHandler,也即對這幾個對象的event進行監聽,把event放入事件隊列并做處理。并且将
dsc.syncDaemonSet
方法指派給
dsc.syncHandler
,也即注冊為核心處理方法,在
dsc.Run
方法中會調用該核心處理方法來調諧daemonset對象(核心處理方法後面會進行詳細分析)。
// pkg/controller/daemon/daemon_controller.go
func NewDaemonSetsController(
daemonSetInformer appsinformers.DaemonSetInformer,
historyInformer appsinformers.ControllerRevisionInformer,
podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer,
kubeClient clientset.Interface,
failedPodsBackoff *flowcontrol.Backoff,
) (*DaemonSetsController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
dsc := &DaemonSetsController{
kubeClient: kubeClient,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
},
crControl: controller.RealControllerRevisionControl{
KubeClient: kubeClient,
},
burstReplicas: BurstReplicas,
expectations: controller.NewControllerExpectations(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"),
}
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*apps.DaemonSet)
klog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
},
UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*apps.DaemonSet)
curDS := cur.(*apps.DaemonSet)
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
},
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsLister = daemonSetInformer.Lister()
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
historyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addHistory,
UpdateFunc: dsc.updateHistory,
DeleteFunc: dsc.deleteHistory,
})
dsc.historyLister = historyInformer.Lister()
dsc.historyStoreSynced = historyInformer.Informer().HasSynced
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addPod,
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
})
dsc.podLister = podInformer.Lister()
// This custom indexer will index pods based on their NodeName which will decrease the amount of pods we need to get in simulate() call.
podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{
"nodeName": indexByPodNodeName,
})
dsc.podNodeIndex = podInformer.Informer().GetIndexer()
dsc.podStoreSynced = podInformer.Informer().HasSynced
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addNode,
UpdateFunc: dsc.updateNode,
},
)
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
dsc.nodeLister = nodeInformer.Lister()
dsc.syncHandler = dsc.syncDaemonSet
dsc.enqueueDaemonSet = dsc.enqueue
dsc.failedPodsBackoff = failedPodsBackoff
return dsc, nil
}
1.2 dsc.Run
主要看到for循環處,根據workers的值(預設值為2),啟動相應數量的goroutine,跑
dsc.runWorker
方法,主要是調用前面講到的daemonset controller核心處理方法
dsc.syncDaemonSet
。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dsc.queue.ShutDown()
klog.Infof("Starting daemon sets controller")
defer klog.Infof("Shutting down daemon sets controller")
if !cache.WaitForNamedCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dsc.runWorker, time.Second, stopCh)
}
go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, stopCh)
<-stopCh
}
1.2.1 dsc.runWorker
從queue隊列中取出事件key,并調用
dsc.syncHandle
即
dsc.syncDaemonSet
做調諧處理。queue隊列裡的事件來源前面講過,是daemonset controller注冊的daemonset、node、pod與ControllerRevisions對象的EventHandler,它們的變化event會被監聽到然後放入queue中。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) runWorker() {
for dsc.processNextWorkItem() {
}
}
func (dsc *DaemonSetsController) processNextWorkItem() bool {
dsKey, quit := dsc.queue.Get()
if quit {
return false
}
defer dsc.queue.Done(dsKey)
err := dsc.syncHandler(dsKey.(string))
if err == nil {
dsc.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
dsc.queue.AddRateLimited(dsKey)
return true
}
2.daemonset controller核心處理邏輯分析
syncDaemonSet
直接看到daemonset controller核心處理方法syncDaemonSet。
主要邏輯:
(1)擷取執行方法時的目前時間,并定義
defer
函數,用于計算該方法總執行時間,也即統計對一個 daemonset 進行同步調諧操作的耗時;
(2)根據 daemonset 對象的命名空間與名稱,擷取 daemonset 對象;
(3)擷取所有node的對象清單;
(4)判斷daemonset對象的DeletionTimestamp是否為空,不為空則直接return,代表該daemonset對象正在被删除,無需再調諧;
(5)調用dsc.constructHistory擷取daemonset的曆史版本;
(6)調用dsc.expectations.SatisfiedExpectations,判斷該daemonset對象是否滿足expectations機制(expectations機制與replicaset controller分析中的用途一緻,這裡不再展開分析),不滿足則調用dsc.updateDaemonSetStatus更新daemonset狀态後直接return;
(7)調用dsc.manage,dsc.manage方法中不區分新舊daemonset版本的pod,隻保證daemonset的pod運作在每一個合适條件的node上,在合适的node上沒有daemonset的pod時建立pod,且把不符合條件的node上的daemonset pod删除掉;
(8)再次調用dsc.expectations.SatisfiedExpectations判斷是否滿足expectations機制,滿足則判斷daemonset配置的更新政策,如果是滾動更新則調用dsc.rollingUpdate,主要用于處理daemonset對象的滾動更新處理,根據配置的滾動更新配置,删除舊的pod(pod的建立操作在dsc.manage方法中進行);
當daemonset更新政策配置為OnDelete時,這裡不做額外處理,因為隻有當手動删除老的 DaemonSet pods 之後,新的 DaemonSet Pod 才會被自動建立,手動删除老的pod後,将在dsc.manage方法中建立新版本的pod;
(9)調用dsc.cleanupHistory,根據daemonset的
spec.revisionHistoryLimit
配置以及版本新舊順序(優先清理最老舊版本)來清理daemonset的已經不存在pod的曆史版本;
(10)最後調用dsc.updateDaemonSetStatus,根據現存daemonset pod的部署情況以及pod的狀态、node是否滿足pod運作條件等資訊,更新daemonset的status。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
if errors.IsNotFound(err) {
klog.V(3).Infof("daemon set has been deleted %v", key)
dsc.expectations.DeleteExpectations(key)
return nil
}
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
}
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, SelectingAllReason, "This daemon set is selecting all pods. A non-empty selector is required.")
return nil
}
// Don't process a daemon set until all its creations and deletions have been processed.
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
// then we do not want to call manage on foo until the daemon pods have been created.
dsKey, err := controller.KeyFunc(ds)
if err != nil {
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
}
// If the DaemonSet is being deleted (either by foreground deletion or
// orphan deletion), we cannot be sure if the DaemonSet history objects
// it owned still exist -- those history objects can either be deleted
// or orphaned. Garbage collector doesn't guarantee that it will delete
// DaemonSet pods before deleting DaemonSet history objects, because
// DaemonSet history doesn't own DaemonSet pods. We cannot reliably
// calculate the status of a DaemonSet being deleted. Therefore, return
// here without updating status for the DaemonSet being deleted.
if ds.DeletionTimestamp != nil {
return nil
}
// Construct histories of the DaemonSet, and get the hash of current history
cur, old, err := dsc.constructHistory(ds)
if err != nil {
return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
}
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
if !dsc.expectations.SatisfiedExpectations(dsKey) {
// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
return dsc.updateDaemonSetStatus(ds, nodeList, hash, false)
}
err = dsc.manage(ds, nodeList, hash)
if err != nil {
return err
}
// Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(dsKey) {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds, nodeList, hash)
}
if err != nil {
return err
}
}
err = dsc.cleanupHistory(ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
}
return dsc.updateDaemonSetStatus(ds, nodeList, hash, true)
}
2.1 dsc.manage
dsc.manage方法中不區分新舊daemonset版本的pod,主要是用于保證daemonset的pod運作在每一個合适條件的node上,在合适的node上沒有daemonset的pod時建立pod,且把不符合條件的node上的daemonset pod删除掉。
(1)調用dsc.getNodesToDaemonPods,根據daemonset的Selector擷取daemonset的所有pod,然後傳回pod與node的對應關聯關系map;
(2)周遊前面擷取到的node清單,執行dsc.podsShouldBeOnNode,根據pod是否指定了nodeName、nodeSelector、ToleratesNodeTaints等,以及node對象的相關資訊來做比對,來确定在某個node上是否已經存在daemonset對應的pod,以及是要為該daemonset建立pod還是删除pod;
(3)調用getUnscheduledPodsWithoutNode,将pod的nodeName與前面擷取到的node清單比對,将nodeName不存在的pod加入到要被删除的pod清單中;
(4)調用dsc.syncNodes,根據前面擷取到的要建立的pod的node清單以及要删除的pod清單,做相應的建立、删除pod的操作。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
// Find out the pods which are created for the nodes by DaemonSet.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
var nodesNeedingDaemonPods, podsToDelete []string
for _, node := range nodeList {
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode, err := dsc.podsShouldBeOnNode(
node, nodeToDaemonPods, ds)
if err != nil {
continue
}
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
}
// Remove unscheduled pods assigned to not existing nodes when daemonset pods are scheduled by scheduler.
// If node doesn't exist then pods are never scheduled and can't be deleted by PodGCController.
podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...)
// Label new pods using the hash label value of the current history when creating them
if err = dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil {
return err
}
return nil
}
2.1.1 dsc.podsShouldBeOnNode
dsc.podsShouldBeOnNode方法用于判斷一個node上是否需要運作daemonset pod,方法傳回nodesNeedingDaemonPods與podsToDelete,分别代表需要運作daemonset pod的node、需要被删除的pod清單。
(1)調用dsc.nodeShouldRunDaemonPod,傳回shouldSchedule與shouldContinueRunning,分别代表daemonset pod是否應該排程到某node、某node上的daemonset pod是否可以繼續運作;
(2)當shouldSchedule為true,即pod應該排程到某node,但現在不存在時,将該node添加到nodesNeedingDaemonPods;
(3)當shouldContinueRunning為true,找出在該node上還在運作沒有退出的daemonset pod清單,然後按照pod建立時間排序,隻保留最新建立的pod,其餘的加入到podsToDelete;
(4)當shouldContinueRunning為false,即daemonset pod不應繼續在某node上運作,且現在該node已經存在該daemonset pod時,将node上該daemonset的所有pod都加入到podsToDelete;
(5)傳回nodesNeedingDaemonPods與podsToDelete,分别代表需要運作daemonset pod的node、需要被删除的pod清單。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) podsShouldBeOnNode(
node *v1.Node,
nodeToDaemonPods map[string][]*v1.Pod,
ds *apps.DaemonSet,
) (nodesNeedingDaemonPods, podsToDelete []string, err error) {
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return
}
daemonPods, exists := nodeToDaemonPods[node.Name]
switch {
case shouldSchedule && !exists:
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
case shouldContinueRunning:
// If a daemon pod failed, delete it
// If there's non-daemon pods left on this node, we will create it in the next sync loop
var daemonPodsRunning []*v1.Pod
for _, pod := range daemonPods {
if pod.DeletionTimestamp != nil {
continue
}
if pod.Status.Phase == v1.PodFailed {
// This is a critical place where DS is often fighting with kubelet that rejects pods.
// We need to avoid hot looping and backoff.
backoffKey := failedPodsBackoffKey(ds, node.Name)
now := dsc.failedPodsBackoff.Clock.Now()
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
if inBackoff {
delay := dsc.failedPodsBackoff.Get(backoffKey)
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
pod.Namespace, pod.Name, node.Name, delay)
dsc.enqueueDaemonSetAfter(ds, delay)
continue
}
dsc.failedPodsBackoff.Next(backoffKey, now)
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
klog.V(2).Infof(msg)
// Emit an event so that it's discoverable to users.
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
podsToDelete = append(podsToDelete, pod.Name)
} else {
daemonPodsRunning = append(daemonPodsRunning, pod)
}
}
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
// Sort the daemon pods by creation time, so the oldest is preserved.
if len(daemonPodsRunning) > 1 {
sort.Sort(podByCreationTimestampAndPhase(daemonPodsRunning))
for i := 1; i < len(daemonPodsRunning); i++ {
podsToDelete = append(podsToDelete, daemonPodsRunning[i].Name)
}
}
case !shouldContinueRunning && exists:
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
for _, pod := range daemonPods {
if pod.DeletionTimestamp != nil {
continue
}
podsToDelete = append(podsToDelete, pod.Name)
}
}
return nodesNeedingDaemonPods, podsToDelete, nil
}
dsc.nodeShouldRunDaemonPod
關于dsc.nodeShouldRunDaemonPod方法,不做展開分析,它主要是調用dsc.simulate執行Predicates預選算法來檢查某個node 是否滿足pod的運作條件,如果預選失敗,則根據失敗資訊,傳回wantToRun、shouldSchedule、shouldContinueRunning,分别代表node與pod的selector、taints 等是否比對(不考慮node資源是否充足)、daemonset pod是否應該排程到某node、某node上的daemonset pod是否可以繼續運作,預選成功則全都傳回true。
2.1.2 dsc.syncNodes
dsc.syncNodes是daemonset controller對pod進行建立和删除操作的方法。
該方法也涉及到expectations機制,與replicaset controller中的expectations機制作用一緻,使用上也基本一緻,忘記的可以回頭看下replicaset controller分析中對expectations機制的分析,這裡不再對expectations機制展開分析。
(1)計算要建立、删除pod的數量,上限為dsc.burstReplicas(250),即每一次對daemonset對象的同步操作,能建立/删除的pod數量上限為250,超出的部分需要在下一次同步操作才能進行;
(2)調用dsc.expectations.SetExpectations,設定expectations;
(3)調用util.CreatePodTemplate,計算并擷取要建立的podTemplate;
(4)先進行pod的建立操作:pod的建立與replicaset controller建立pod類似,使用了慢開始算法,分多批次進行建立,第一批建立1個pod,第二批建立2個pod,第三批建立4個pod,以2倍往下依次執行,直到達到期望為止;而每一批次的建立,會拉起與要建立pod數量相等的goroutine,每個goroutine負責建立一個pod,并使用WaitGroup等待該批次的所有建立任務完成,再進行下一批次的建立;
(4)再進行pod的删除操作:對于每個要删除的pod,都拉起一個goroutine來做删除操作,并使用WaitGroup等待所有goroutine完成。
// pkg/controller/daemon/daemon_controller.go
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
// We need to set expectations before creating/deleting pods to avoid race conditions.
dsKey, err := controller.KeyFunc(ds)
if err != nil {
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
}
createDiff := len(nodesNeedingDaemonPods)
deleteDiff := len(podsToDelete)
if createDiff > dsc.burstReplicas {
createDiff = dsc.burstReplicas
}
if deleteDiff > dsc.burstReplicas {
deleteDiff = dsc.burstReplicas
}
dsc.expectations.SetExpectations(dsKey, createDiff, deleteDiff)
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
errCh := make(chan error, createDiff+deleteDiff)
klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
createWait := sync.WaitGroup{}
// If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
generation, err := util.GetTemplateGeneration(ds)
if err != nil {
generation = nil
}
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start".
// This handles attempts to start large numbers of pods that would
// likely all fail with the same error. For example a project with a
// low quota that attempts to create a large number of pods will be
// prevented from spamming the API service with the pod create requests
// after one of its pods fails. Conveniently, this also prevents the
// event spam that those failures would generate.
batchSize := integer.IntMin(createDiff, controller.SlowStartInitialBatchSize)
for pos := 0; createDiff > pos; batchSize, pos = integer.IntMin(2*batchSize, createDiff-(pos+batchSize)), pos+batchSize {
errorCount := len(errCh)
createWait.Add(batchSize)
for i := pos; i < pos+batchSize; i++ {
go func(ix int) {
defer createWait.Done()
podTemplate := template.DeepCopy()
// The pod's NodeAffinity will be updated to make sure the Pod is bound
// to the target node by default scheduler. It is safe to do so because there
// should be no conflicting node affinity with the target node.
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
err := dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
ds, metav1.NewControllerRef(ds, controllerKind))
if err != nil {
if errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
// If the namespace is being torn down, we can safely ignore
// this error since all subsequent creations will fail.
return
}
}
if err != nil {
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.CreationObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
}
}(i)
}
createWait.Wait()
// any skipped pods that we never attempted to start shouldn't be expected.
skippedPods := createDiff - (batchSize + pos)
if errorCount < len(errCh) && skippedPods > 0 {
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
dsc.expectations.LowerExpectations(dsKey, skippedPods, 0)
// The skipped pods will be retried later. The next controller resync will
// retry the slow start process.
break
}
}
klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
deleteWait := sync.WaitGroup{}
deleteWait.Add(deleteDiff)
for i := 0; i < deleteDiff; i++ {
go func(ix int) {
defer deleteWait.Done()
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.DeletionObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
}
}(i)
}
deleteWait.Wait()
// collect errors if any for proper reporting/retry logic in the controller
errors := []error{}
close(errCh)
for err := range errCh {
errors = append(errors, err)
}
return utilerrors.NewAggregate(errors)
}
2.2 dsc.rollingUpdate
dsc.rollingUpdate方法主要用于處理daemonset對象的滾動更新處理,根據配置的滾動更新配置,删除舊的pod(pod的建立操作在dsc.manage方法中進行)。
(1)調用dsc.getNodesToDaemonPods,擷取daemonset所屬pod與node的對應關聯關系map;
(2)調用dsc.getAllDaemonSetPods,擷取所有的舊版本daemonset的pod;
(3)調用dsc.getUnavailableNumbers,根據daemonset的滾動更新政策配置擷取maxUnavailable值,再擷取numUnavailable值,numUnavailable代表在符合條件的node節點中,沒有daemonset對應的pod或者pod處于Unavailable狀态的node數量;
(4)調用util.SplitByAvailablePods,将舊版本daemonset的所有pod分成oldAvailablePods清單,以及oldUnavailablePods清單;
(5)定義一個字元串數組oldPodsToDelete,用于儲存準備要删除的pod;
(6)将全部oldUnavailablePods加入到oldPodsToDelete數組中;
(7)周遊oldAvailablePods清單,當numUnavailable小于maxUnavailable值時,将pod加入到oldPodsToDelete數組中,且numUnavailable值加一;
(8)調用dsc.syncNodes,将oldPodsToDelete數組中的pod删除。
// pkg/controller/daemon/update.go
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("couldn't get unavailable numbers: %v", err)
}
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
// for oldPods delete all not running pods
var oldPodsToDelete []string
klog.V(4).Infof("Marking all unavailable old pods for deletion")
for _, pod := range oldUnavailablePods {
// Skip terminating pods. We won't delete them again
if pod.DeletionTimestamp != nil {
continue
}
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
}
klog.V(4).Infof("Marking old pods for deletion")
for _, pod := range oldAvailablePods {
if numUnavailable >= maxUnavailable {
klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
break
}
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
numUnavailable++
}
return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
}
2.3 dsc.updateDaemonSetStatus
dsc.updateDaemonSetStatus方法負責根據現存daemonset pod的部署情況以及pod的狀态、node是否滿足pod運作條件等資訊,來更新daemonset的status狀态值,這裡不對代碼展開分析,隻分析一下daemonset的status中各個字段的意思。
(1)currentNumberScheduled: 已經排程了daemonset pod的節點數量;
(2)desiredNumberScheduled: 期望排程daemonset pod的節點數量;
(3)numberMisscheduled:不需要排程daemonset pod但已經排程完成了的節點數量;
(4)numberAvailable: pod狀态達到Available的數量(pod達到Ready狀态MinReadySeconds時間後,就認為達到了Available狀态);
(5)numberReady: pod狀态達到Ready的數量;
(6)numberUnavailable: desiredNumberScheduled - numberAvailable;
(7)updatedNumberScheduled: 已經排程了最新版本daemonset pod的節點數量。
總結
daemonset controller建立 pod 的流程與 replicaset controller 建立 pod 的流程是相似的,都使用了 expectations 機制并且限制了在一次調諧過程中最多建立或删除的 pod 數量。daemonset的更新方式與 statefulset 一樣包含 OnDelete 和 RollingUpdate(滾動更新) 兩種,OnDelete 方式需要手動删除對應的 pod,然後daemonset controller才會建立出新的pod,而 RollingUpdate 方式與 statefulset 和 deployment 有所差別, RollingUpdate方式更新時是按照先删除pod再建立pod的順序進行,不像deployment那樣可以先建立出新的pod再删除舊的pod。
daemonset controller架構
syncDaemonset
daemonset controller核心處理邏輯
daemonset controller的核心處理邏輯是調諧daomonset對象,使得daemonset在合适node上完成pod的建立、在不合适node上完成pod的删除,觸發滾動更新時按照配置的滾動更新政策配置來删除舊的pod、建立新的pod,并根據曆史版本限制配置清理daemonset的曆史版本,最後更新daemonset對象的status狀态。
daemonset controller建立pod算法
daemonset controller建立pod的算法與replicaset controller建立pod的算法幾乎相同,按1、2、4、8...的遞增趨勢分多批次進行(每次調諧中建立pod的數量上限為250個,超過上限的會在下次調諧中再建立),若某批次建立pod有失敗的(如apiserver限流,丢棄請求等,注意:逾時除外,因為initialization處理有可能逾時),則後續批次的pod建立不再進行,需等待該daemonset對象下次調諧時再觸發該pod建立算法,進行pod的建立,直至所有滿足條件的node上都有該daemonset的pod。
daemonset controller删除pod算法
daemonset controller删除pod的算法是,拉起與要删除的pod數量相同的goroutine來删除pod(每次調諧中删除pod的數量上限為250),并等待所有goroutine執行完成。删除pod有失敗的(如apiserver限流,丢棄請求)或超過250上限的部分,需等待該daemonset對象下次調諧時再觸發該pod删除算法,進行pod的删除,直至所有期望被删除的pod都被删除。