Compare commits

...

7 Commits

Author SHA1 Message Date
Jean-Marc ANDRE
d850651344 make the submodule point to the git repo 2023-07-24 10:43:20 +02:00
Jean-Marc ANDRE
910dbbbbd5 Restic repository now includes the targetName to avoid concurrency when multiple targets are doing backup simulteanously 2023-04-28 15:05:54 +02:00
Jean-Marc ANDRE
05cb6bd1cb formolcli backup not null exit code when it fails 2023-04-28 14:24:42 +02:00
Jean-Marc ANDRE
c415a956ed sync with formol 2023-04-27 15:43:04 +02:00
Jean-Marc ANDRE
72390d9564 Pod anti affinity to avoid backup pod to be scheduled on the same node as the target pod 2023-04-27 15:41:00 +02:00
Jean-Marc ANDRE
bf2d50c41a We don't want to snapshot the same volume multiple time (subpath) 2023-04-27 15:37:15 +02:00
Jean-Marc ANDRE
e19e1775e8 sub module again... 2023-04-25 16:38:02 +02:00
9 changed files with 51 additions and 20 deletions

4
.gitmodules vendored
View File

@ -1,3 +1,5 @@
[submodule "formol"] [submodule "formol"]
path = formol path = formol
url = ../formol # url = ../formol
url = ssh://git@git.desmojim.fr:2222/jandre/formol.git
# url = git@github.com:desmo999r/formol.git

View File

@ -29,11 +29,14 @@ var createBackupSessionCmd = &cobra.Command{
var backupCmd = &cobra.Command{ var backupCmd = &cobra.Command{
Use: "backup", Use: "backup",
Short: "Backup paths", Short: "Backup paths",
Run: func(cmd *cobra.Command, args []string) { RunE: func(cmd *cobra.Command, args []string) error {
backupSessionName, _ := cmd.Flags().GetString("name") backupSessionName, _ := cmd.Flags().GetString("name")
backupSessionNamespace, _ := cmd.Flags().GetString("namespace") backupSessionNamespace, _ := cmd.Flags().GetString("namespace")
targetName, _ := cmd.Flags().GetString("target-name") targetName, _ := cmd.Flags().GetString("target-name")
standalone.BackupPaths(backupSessionName, backupSessionNamespace, targetName, args...) if err := standalone.BackupPaths(backupSessionName, backupSessionNamespace, targetName, args...); err != nil {
return err
}
return nil
}, },
} }
@ -78,8 +81,9 @@ var deleteSnapshotCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
name, _ := cmd.Flags().GetString("name") name, _ := cmd.Flags().GetString("name")
namespace, _ := cmd.Flags().GetString("namespace") namespace, _ := cmd.Flags().GetString("namespace")
targetName, _ := cmd.Flags().GetString("target-name")
snapshotId, _ := cmd.Flags().GetString("snapshot-id") snapshotId, _ := cmd.Flags().GetString("snapshot-id")
standalone.DeleteSnapshot(namespace, name, snapshotId) standalone.DeleteSnapshot(namespace, name, targetName, snapshotId)
}, },
} }
@ -135,7 +139,9 @@ func init() {
deleteSnapshotCmd.Flags().String("snapshot-id", "", "The snapshot id to delete") deleteSnapshotCmd.Flags().String("snapshot-id", "", "The snapshot id to delete")
deleteSnapshotCmd.Flags().String("namespace", "", "The namespace of the BackupConfiguration containing the information about the backup.") deleteSnapshotCmd.Flags().String("namespace", "", "The namespace of the BackupConfiguration containing the information about the backup.")
deleteSnapshotCmd.Flags().String("name", "", "The name of the BackupConfiguration containing the information about the backup.") deleteSnapshotCmd.Flags().String("name", "", "The name of the BackupConfiguration containing the information about the backup.")
deleteSnapshotCmd.Flags().String("target-name", "", "The name of the backup target.")
deleteSnapshotCmd.MarkFlagRequired("snapshot-id") deleteSnapshotCmd.MarkFlagRequired("snapshot-id")
deleteSnapshotCmd.MarkFlagRequired("namespace") deleteSnapshotCmd.MarkFlagRequired("namespace")
deleteSnapshotCmd.MarkFlagRequired("name") deleteSnapshotCmd.MarkFlagRequired("name")
deleteSnapshotCmd.MarkFlagRequired("target-name")
} }

View File

@ -72,7 +72,7 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques
} }
// Do preliminary checks with the repository // Do preliminary checks with the repository
if err = r.SetResticEnv(backupConf); err != nil { if err = r.SetResticEnv(backupConf, target.TargetName); err != nil {
r.Log.Error(err, "unable to set restic env") r.Log.Error(err, "unable to set restic env")
return ctrl.Result{}, err return ctrl.Result{}, err
} }

View File

@ -41,7 +41,7 @@ func (r *BackupSessionReconciler) backupJob(target formolv1alpha1.Target) (resul
} }
func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (e error) { func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (e error) {
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := r.Get(r.Context, client.ObjectKey{ if err := r.Get(r.Context, client.ObjectKey{
Namespace: r.Namespace, Namespace: r.Namespace,
Name: target.TargetName, Name: target.TargetName,
@ -71,7 +71,7 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
sidecar := formolv1alpha1.GetSidecar(r.backupConf, target) sidecar := formolv1alpha1.GetSidecar(r.backupConf, target)
sidecar.Args = append([]string{"backupsession", "backup", "--namespace", r.Namespace, "--name", r.Name, "--target-name", target.TargetName}, paths...) sidecar.Args = append([]string{"backupsession", "backup", "--namespace", r.Namespace, "--name", r.Name, "--target-name", target.TargetName}, paths...)
sidecar.VolumeMounts = vms sidecar.VolumeMounts = vms
if env, err := r.getResticEnv(r.backupConf); err != nil { if env, err := r.getResticEnv(r.backupConf, target.TargetName); err != nil {
r.Log.Error(err, "unable to get restic env") r.Log.Error(err, "unable to get restic env")
return err return err
} else { } else {
@ -81,6 +81,25 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
Name: formolv1alpha1.BACKUP_PATHS, Name: formolv1alpha1.BACKUP_PATHS,
Value: strings.Join(paths, string(os.PathListSeparator)), Value: strings.Join(paths, string(os.PathListSeparator)),
}) })
// This is to make sure the backup job pod won't be scheduled on the same host as the target
// Some filesystem don't accept to be mounted twice on the same node (XFS for instance).
affinity := corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
corev1.WeightedPodAffinityTerm{
Weight: 50,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
formolv1alpha1.FORMOL_LABEL: target.TargetName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
}
job := batchv1.Job{ job := batchv1.Job{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: r.Namespace, Namespace: r.Namespace,
@ -95,6 +114,7 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
sidecar, sidecar,
}, },
RestartPolicy: corev1.RestartPolicyNever, RestartPolicy: corev1.RestartPolicyNever,
Affinity: &affinity,
}, },
}, },
}, },
@ -257,9 +277,10 @@ func (r *BackupSessionReconciler) createVolumeFromSnapshot(vs *volumesnapshotv1.
func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podSpec *corev1.PodSpec) (err error) { func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podSpec *corev1.PodSpec) (err error) {
// We snapshot/check all the volumes. If at least one of the snapshot is not ready to use. We reschedule. // We snapshot/check all the volumes. If at least one of the snapshot is not ready to use. We reschedule.
alreadySnapshoted := make(map[string]struct{})
for _, vm := range vms { for _, vm := range vms {
for i, volume := range podSpec.Volumes { for i, volume := range podSpec.Volumes {
if vm.Name == volume.Name { if _, done := alreadySnapshoted[volume.Name]; done == false && vm.Name == volume.Name {
var vs *volumesnapshotv1.VolumeSnapshot var vs *volumesnapshotv1.VolumeSnapshot
vs, err = r.snapshotVolume(volume) vs, err = r.snapshotVolume(volume)
if IsNotReadyToUse(err) { if IsNotReadyToUse(err) {
@ -273,6 +294,7 @@ func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podS
} }
if vs != nil { if vs != nil {
// The snapshot is ready. We create a PVC from it. // The snapshot is ready. We create a PVC from it.
alreadySnapshoted[volume.Name] = struct{}{}
backupPVCName, err := r.createVolumeFromSnapshot(vs) backupPVCName, err := r.createVolumeFromSnapshot(vs)
if err != nil { if err != nil {
r.Log.Error(err, "unable to create volume from snapshot", "vs", vs) r.Log.Error(err, "unable to create volume from snapshot", "vs", vs)

View File

@ -69,7 +69,7 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
} }
// Do preliminary checks with the repository // Do preliminary checks with the repository
if err = r.SetResticEnv(backupConf); err != nil { if err = r.SetResticEnv(backupConf, target.TargetName); err != nil {
r.Log.Error(err, "unable to set restic env") r.Log.Error(err, "unable to set restic env")
return ctrl.Result{}, err return ctrl.Result{}, err
} }

View File

@ -11,7 +11,7 @@ func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Ta
// The restore has to be done by an initContainer since the data is mounted RO // The restore has to be done by an initContainer since the data is mounted RO
// We create the initContainer here // We create the initContainer here
// Once the the container has rebooted and the initContainer has done its job, it will change the restoreTargetStatus to Waiting. // Once the the container has rebooted and the initContainer has done its job, it will change the restoreTargetStatus to Waiting.
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := r.Get(r.Context, client.ObjectKey{ if err := r.Get(r.Context, client.ObjectKey{
Namespace: r.backupConf.Namespace, Namespace: r.backupConf.Namespace,
Name: target.TargetName, Name: target.TargetName,
@ -34,7 +34,7 @@ func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Ta
for i, _ := range initContainer.VolumeMounts { for i, _ := range initContainer.VolumeMounts {
initContainer.VolumeMounts[i].ReadOnly = false initContainer.VolumeMounts[i].ReadOnly = false
} }
if env, err := r.getResticEnv(r.backupConf); err != nil { if env, err := r.getResticEnv(r.backupConf, target.TargetName); err != nil {
r.Log.Error(err, "unable to get restic env") r.Log.Error(err, "unable to get restic env")
return err return err
} else { } else {

View File

@ -40,7 +40,7 @@ const (
RESTIC_EXEC = "/usr/bin/restic" RESTIC_EXEC = "/usr/bin/restic"
) )
func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration) (envs []corev1.EnvVar, err error) { func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration, targetName string) (envs []corev1.EnvVar, err error) {
repo := formolv1alpha1.Repo{} repo := formolv1alpha1.Repo{}
if err = s.Get(s.Context, client.ObjectKey{ if err = s.Get(s.Context, client.ObjectKey{
Namespace: backupConf.Namespace, Namespace: backupConf.Namespace,
@ -52,11 +52,12 @@ func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration) (en
if repo.Spec.Backend.S3 != nil { if repo.Spec.Backend.S3 != nil {
envs = append(envs, corev1.EnvVar{ envs = append(envs, corev1.EnvVar{
Name: formolv1alpha1.RESTIC_REPOSITORY, Name: formolv1alpha1.RESTIC_REPOSITORY,
Value: fmt.Sprintf("s3:http://%s/%s/%s-%s", Value: fmt.Sprintf("s3:http://%s/%s/%s-%s/%s",
repo.Spec.Backend.S3.Server, repo.Spec.Backend.S3.Server,
repo.Spec.Backend.S3.Bucket, repo.Spec.Backend.S3.Bucket,
strings.ToUpper(backupConf.Namespace), strings.ToUpper(backupConf.Namespace),
strings.ToLower(backupConf.Name)), strings.ToLower(backupConf.Name),
targetName),
}) })
data := s.getSecretData(repo.Spec.RepositorySecrets) data := s.getSecretData(repo.Spec.RepositorySecrets)
@ -76,8 +77,8 @@ func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration) (en
return return
} }
func (s Session) SetResticEnv(backupConf formolv1alpha1.BackupConfiguration) error { func (s Session) SetResticEnv(backupConf formolv1alpha1.BackupConfiguration, targetName string) error {
envs, err := s.getResticEnv(backupConf) envs, err := s.getResticEnv(backupConf, targetName)
for _, env := range envs { for _, env := range envs {
os.Setenv(env.Name, env.Value) os.Setenv(env.Name, env.Value)
} }

2
formol

@ -1 +1 @@
Subproject commit ea1c1bd2e31cc6f67621ed71659e738ca5f5d8c8 Subproject commit 6f150cc36de7f879e2ddd89d126de84b77af0651

View File

@ -154,7 +154,7 @@ func StartRestore(
return return
} }
log.V(0).Info("restore over. removing the initContainer") log.V(0).Info("restore over. removing the initContainer")
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := session.Get(session.Context, client.ObjectKey{ if err := session.Get(session.Context, client.ObjectKey{
Namespace: restoreSessionNamespace, Namespace: restoreSessionNamespace,
Name: target.TargetName, Name: target.TargetName,
@ -199,7 +199,7 @@ func CreateBackupSession(ref corev1.ObjectReference) {
} }
} }
func DeleteSnapshot(namespace string, name string, snapshotId string) { func DeleteSnapshot(namespace string, name string, targetName string, snapshotId string) {
log := session.Log.WithName("DeleteSnapshot") log := session.Log.WithName("DeleteSnapshot")
session.Namespace = namespace session.Namespace = namespace
backupConf := formolv1alpha1.BackupConfiguration{} backupConf := formolv1alpha1.BackupConfiguration{}
@ -210,7 +210,7 @@ func DeleteSnapshot(namespace string, name string, snapshotId string) {
log.Error(err, "unable to get the BackupConf") log.Error(err, "unable to get the BackupConf")
return return
} }
if err := session.SetResticEnv(backupConf); err != nil { if err := session.SetResticEnv(backupConf, targetName); err != nil {
log.Error(err, "unable to set the restic env") log.Error(err, "unable to set the restic env")
return return
} }