Compare commits

..

No commits in common. "master" and "v1.0.0" have entirely different histories.

9 changed files with 20 additions and 51 deletions

4
.gitmodules vendored
View File

@ -1,5 +1,3 @@
[submodule "formol"]
path = formol
# url = ../formol
url = ssh://git@git.desmojim.fr:2222/jandre/formol.git
# url = git@github.com:desmo999r/formol.git
url = ../formol

View File

@ -29,14 +29,11 @@ var createBackupSessionCmd = &cobra.Command{
var backupCmd = &cobra.Command{
Use: "backup",
Short: "Backup paths",
RunE: func(cmd *cobra.Command, args []string) error {
Run: func(cmd *cobra.Command, args []string) {
backupSessionName, _ := cmd.Flags().GetString("name")
backupSessionNamespace, _ := cmd.Flags().GetString("namespace")
targetName, _ := cmd.Flags().GetString("target-name")
if err := standalone.BackupPaths(backupSessionName, backupSessionNamespace, targetName, args...); err != nil {
return err
}
return nil
standalone.BackupPaths(backupSessionName, backupSessionNamespace, targetName, args...)
},
}
@ -81,9 +78,8 @@ var deleteSnapshotCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
name, _ := cmd.Flags().GetString("name")
namespace, _ := cmd.Flags().GetString("namespace")
targetName, _ := cmd.Flags().GetString("target-name")
snapshotId, _ := cmd.Flags().GetString("snapshot-id")
standalone.DeleteSnapshot(namespace, name, targetName, snapshotId)
standalone.DeleteSnapshot(namespace, name, snapshotId)
},
}
@ -139,9 +135,7 @@ func init() {
deleteSnapshotCmd.Flags().String("snapshot-id", "", "The snapshot id to delete")
deleteSnapshotCmd.Flags().String("namespace", "", "The namespace of the BackupConfiguration containing the information about the backup.")
deleteSnapshotCmd.Flags().String("name", "", "The name of the BackupConfiguration containing the information about the backup.")
deleteSnapshotCmd.Flags().String("target-name", "", "The name of the backup target.")
deleteSnapshotCmd.MarkFlagRequired("snapshot-id")
deleteSnapshotCmd.MarkFlagRequired("namespace")
deleteSnapshotCmd.MarkFlagRequired("name")
deleteSnapshotCmd.MarkFlagRequired("target-name")
}

View File

@ -72,7 +72,7 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}
// Do preliminary checks with the repository
if err = r.SetResticEnv(backupConf, target.TargetName); err != nil {
if err = r.SetResticEnv(backupConf); err != nil {
r.Log.Error(err, "unable to set restic env")
return ctrl.Result{}, err
}

View File

@ -41,7 +41,7 @@ func (r *BackupSessionReconciler) backupJob(target formolv1alpha1.Target) (resul
}
func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (e error) {
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := r.Get(r.Context, client.ObjectKey{
Namespace: r.Namespace,
Name: target.TargetName,
@ -71,7 +71,7 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
sidecar := formolv1alpha1.GetSidecar(r.backupConf, target)
sidecar.Args = append([]string{"backupsession", "backup", "--namespace", r.Namespace, "--name", r.Name, "--target-name", target.TargetName}, paths...)
sidecar.VolumeMounts = vms
if env, err := r.getResticEnv(r.backupConf, target.TargetName); err != nil {
if env, err := r.getResticEnv(r.backupConf); err != nil {
r.Log.Error(err, "unable to get restic env")
return err
} else {
@ -81,25 +81,6 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
Name: formolv1alpha1.BACKUP_PATHS,
Value: strings.Join(paths, string(os.PathListSeparator)),
})
// This is to make sure the backup job pod won't be scheduled on the same host as the target
// Some filesystem don't accept to be mounted twice on the same node (XFS for instance).
affinity := corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
corev1.WeightedPodAffinityTerm{
Weight: 50,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
formolv1alpha1.FORMOL_LABEL: target.TargetName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
}
job := batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Namespace: r.Namespace,
@ -114,7 +95,6 @@ func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) (
sidecar,
},
RestartPolicy: corev1.RestartPolicyNever,
Affinity: &affinity,
},
},
},
@ -277,10 +257,9 @@ func (r *BackupSessionReconciler) createVolumeFromSnapshot(vs *volumesnapshotv1.
func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podSpec *corev1.PodSpec) (err error) {
// We snapshot/check all the volumes. If at least one of the snapshot is not ready to use. We reschedule.
alreadySnapshoted := make(map[string]struct{})
for _, vm := range vms {
for i, volume := range podSpec.Volumes {
if _, done := alreadySnapshoted[volume.Name]; done == false && vm.Name == volume.Name {
if vm.Name == volume.Name {
var vs *volumesnapshotv1.VolumeSnapshot
vs, err = r.snapshotVolume(volume)
if IsNotReadyToUse(err) {
@ -294,7 +273,6 @@ func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podS
}
if vs != nil {
// The snapshot is ready. We create a PVC from it.
alreadySnapshoted[volume.Name] = struct{}{}
backupPVCName, err := r.createVolumeFromSnapshot(vs)
if err != nil {
r.Log.Error(err, "unable to create volume from snapshot", "vs", vs)

View File

@ -69,7 +69,7 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
// Do preliminary checks with the repository
if err = r.SetResticEnv(backupConf, target.TargetName); err != nil {
if err = r.SetResticEnv(backupConf); err != nil {
r.Log.Error(err, "unable to set restic env")
return ctrl.Result{}, err
}

View File

@ -11,7 +11,7 @@ func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Ta
// The restore has to be done by an initContainer since the data is mounted RO
// We create the initContainer here
// Once the the container has rebooted and the initContainer has done its job, it will change the restoreTargetStatus to Waiting.
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := r.Get(r.Context, client.ObjectKey{
Namespace: r.backupConf.Namespace,
Name: target.TargetName,
@ -34,7 +34,7 @@ func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Ta
for i, _ := range initContainer.VolumeMounts {
initContainer.VolumeMounts[i].ReadOnly = false
}
if env, err := r.getResticEnv(r.backupConf, target.TargetName); err != nil {
if env, err := r.getResticEnv(r.backupConf); err != nil {
r.Log.Error(err, "unable to get restic env")
return err
} else {

View File

@ -40,7 +40,7 @@ const (
RESTIC_EXEC = "/usr/bin/restic"
)
func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration, targetName string) (envs []corev1.EnvVar, err error) {
func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration) (envs []corev1.EnvVar, err error) {
repo := formolv1alpha1.Repo{}
if err = s.Get(s.Context, client.ObjectKey{
Namespace: backupConf.Namespace,
@ -52,12 +52,11 @@ func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration, tar
if repo.Spec.Backend.S3 != nil {
envs = append(envs, corev1.EnvVar{
Name: formolv1alpha1.RESTIC_REPOSITORY,
Value: fmt.Sprintf("s3:http://%s/%s/%s-%s/%s",
Value: fmt.Sprintf("s3:http://%s/%s/%s-%s",
repo.Spec.Backend.S3.Server,
repo.Spec.Backend.S3.Bucket,
strings.ToUpper(backupConf.Namespace),
strings.ToLower(backupConf.Name),
targetName),
strings.ToLower(backupConf.Name)),
})
data := s.getSecretData(repo.Spec.RepositorySecrets)
@ -77,8 +76,8 @@ func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration, tar
return
}
func (s Session) SetResticEnv(backupConf formolv1alpha1.BackupConfiguration, targetName string) error {
envs, err := s.getResticEnv(backupConf, targetName)
func (s Session) SetResticEnv(backupConf formolv1alpha1.BackupConfiguration) error {
envs, err := s.getResticEnv(backupConf)
for _, env := range envs {
os.Setenv(env.Name, env.Value)
}

2
formol

@ -1 +1 @@
Subproject commit 6f150cc36de7f879e2ddd89d126de84b77af0651
Subproject commit ea1c1bd2e31cc6f67621ed71659e738ca5f5d8c8

View File

@ -154,7 +154,7 @@ func StartRestore(
return
}
log.V(0).Info("restore over. removing the initContainer")
targetObject, targetPodSpec, _ := formolv1alpha1.GetTargetObjects(target.TargetKind)
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
if err := session.Get(session.Context, client.ObjectKey{
Namespace: restoreSessionNamespace,
Name: target.TargetName,
@ -199,7 +199,7 @@ func CreateBackupSession(ref corev1.ObjectReference) {
}
}
func DeleteSnapshot(namespace string, name string, targetName string, snapshotId string) {
func DeleteSnapshot(namespace string, name string, snapshotId string) {
log := session.Log.WithName("DeleteSnapshot")
session.Namespace = namespace
backupConf := formolv1alpha1.BackupConfiguration{}
@ -210,7 +210,7 @@ func DeleteSnapshot(namespace string, name string, targetName string, snapshotId
log.Error(err, "unable to get the BackupConf")
return
}
if err := session.SetResticEnv(backupConf, targetName); err != nil {
if err := session.SetResticEnv(backupConf); err != nil {
log.Error(err, "unable to set the restic env")
return
}