Pod anti affinity to avoid backup pod to be scheduled on the same node as the target pod
This commit is contained in:
parent
bf2d50c41a
commit
72390d9564
@ -41,7 +41,7 @@ func (r *BackupSessionReconciler) backupJob(target formolv1alpha1.Target) (resul
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (e error) {
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.Namespace,
|
||||
Name: target.TargetName,
|
||||
@ -81,6 +81,25 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
|
||||
Name: formolv1alpha1.BACKUP_PATHS,
|
||||
Value: strings.Join(paths, string(os.PathListSeparator)),
|
||||
})
|
||||
// This is to make sure the backup job pod won't be scheduled on the same host as the target
|
||||
// Some filesystem don't accept to be mounted twice on the same node (XFS for instance).
|
||||
affinity := corev1.Affinity{
|
||||
PodAntiAffinity: &corev1.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
|
||||
corev1.WeightedPodAffinityTerm{
|
||||
Weight: 50,
|
||||
PodAffinityTerm: corev1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
formolv1alpha1.FORMOL_LABEL: target.TargetName,
|
||||
},
|
||||
},
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
job := batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: r.Namespace,
|
||||
@ -95,6 +114,7 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
|
||||
sidecar,
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Affinity: &affinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@ -11,7 +11,7 @@ func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Ta
|
||||
// The restore has to be done by an initContainer since the data is mounted RO
|
||||
// We create the initContainer here
|
||||
// Once the the container has rebooted and the initContainer has done its job, it will change the restoreTargetStatus to Waiting.
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
|
||||
@ -154,7 +154,7 @@ func StartRestore(
|
||||
return
|
||||
}
|
||||
log.V(0).Info("restore over. removing the initContainer")
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := session.Get(session.Context, client.ObjectKey{
|
||||
Namespace: restoreSessionNamespace,
|
||||
Name: target.TargetName,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user