Restore OnlineKind
This commit is contained in:
parent
86417391d7
commit
8ea4e3bffe
28
cmd/root.go
28
cmd/root.go
@ -6,7 +6,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/desmo999r/formolcli/controllers"
|
||||
"github.com/desmo999r/formolcli/session"
|
||||
"github.com/desmo999r/formolcli/standalone"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"os"
|
||||
@ -19,13 +19,24 @@ var createBackupSessionCmd = &cobra.Command{
|
||||
name, _ := cmd.Flags().GetString("name")
|
||||
namespace, _ := cmd.Flags().GetString("namespace")
|
||||
fmt.Println("create backupsession called")
|
||||
session.CreateBackupSession(corev1.ObjectReference{
|
||||
standalone.CreateBackupSession(corev1.ObjectReference{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
var startRestoreSessionCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Restore a restic snapshot",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
restoreSessionName, _ := cmd.Flags().GetString("name")
|
||||
restoreSessionNamespace, _ := cmd.Flags().GetString("namespace")
|
||||
targetName, _ := cmd.Flags().GetString("target-name")
|
||||
standalone.StartRestore(restoreSessionName, restoreSessionNamespace, targetName)
|
||||
},
|
||||
}
|
||||
|
||||
var startServerCmd = &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Start a BackupSession / RestoreSession controller",
|
||||
@ -35,6 +46,11 @@ var startServerCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var restoreSessionCmd = &cobra.Command{
|
||||
Use: "restoresession",
|
||||
Short: "All the RestoreSession related commands",
|
||||
}
|
||||
|
||||
var backupSessionCmd = &cobra.Command{
|
||||
Use: "backupsession",
|
||||
Short: "All the BackupSession related commands",
|
||||
@ -66,10 +82,18 @@ func Execute() {
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(backupSessionCmd)
|
||||
rootCmd.AddCommand(restoreSessionCmd)
|
||||
backupSessionCmd.AddCommand(createBackupSessionCmd)
|
||||
restoreSessionCmd.AddCommand(startRestoreSessionCmd)
|
||||
rootCmd.AddCommand(startServerCmd)
|
||||
createBackupSessionCmd.Flags().String("namespace", "", "The namespace of the BackupConfiguration containing the information about the backup.")
|
||||
createBackupSessionCmd.Flags().String("name", "", "The name of the BackupConfiguration containing the information about the backup.")
|
||||
createBackupSessionCmd.MarkFlagRequired("namespace")
|
||||
createBackupSessionCmd.MarkFlagRequired("name")
|
||||
startRestoreSessionCmd.Flags().String("namespace", "", "The namespace of RestoreSession")
|
||||
startRestoreSessionCmd.Flags().String("name", "", "The name of RestoreSession")
|
||||
startRestoreSessionCmd.Flags().String("target-name", "", "The name of target being restored")
|
||||
startRestoreSessionCmd.MarkFlagRequired("namespace")
|
||||
startRestoreSessionCmd.MarkFlagRequired("name")
|
||||
startRestoreSessionCmd.MarkFlagRequired("target-name")
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ type BackupResult struct {
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) backupPaths(tag string, paths []string) (result BackupResult, err error) {
|
||||
if err = r.checkRepo(); err != nil {
|
||||
if err = r.CheckRepo(); err != nil {
|
||||
r.Log.Error(err, "unable to setup repo", "repo", os.Getenv(formolv1alpha1.RESTIC_REPOSITORY))
|
||||
return
|
||||
}
|
||||
|
||||
@ -12,6 +12,8 @@ import (
|
||||
|
||||
type RestoreSessionReconciler struct {
|
||||
Session
|
||||
backupConf formolv1alpha1.BackupConfiguration
|
||||
restoreSession formolv1alpha1.RestoreSession
|
||||
}
|
||||
|
||||
func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
@ -30,6 +32,7 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
r.Log.V(0).Info("RestoreSession still being initialized by the main controller. Wait for the next update...")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
r.restoreSession = restoreSession
|
||||
// We need the BackupConfiguration to get information about our restore target
|
||||
backupSession := formolv1alpha1.BackupSession{
|
||||
Spec: restoreSession.Spec.BackupSessionRef.Spec,
|
||||
@ -47,16 +50,17 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
r.Namespace = backupConf.Namespace
|
||||
r.backupConf = backupConf
|
||||
|
||||
// we don't want a copy because we will modify and update it.
|
||||
var target formolv1alpha1.Target
|
||||
var targetStatus *formolv1alpha1.TargetStatus
|
||||
var restoreTargetStatus *formolv1alpha1.TargetStatus
|
||||
targetName := os.Getenv(formolv1alpha1.TARGET_NAME)
|
||||
|
||||
for i, t := range backupConf.Spec.Targets {
|
||||
if t.TargetName == targetName {
|
||||
target = t
|
||||
targetStatus = &(restoreSession.Status.Targets[i])
|
||||
restoreTargetStatus = &(restoreSession.Status.Targets[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -68,7 +72,7 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
var newSessionState formolv1alpha1.SessionState
|
||||
switch targetStatus.SessionState {
|
||||
switch restoreTargetStatus.SessionState {
|
||||
case formolv1alpha1.New:
|
||||
// New session move to Initializing
|
||||
r.Log.V(0).Info("New session. Move to Initializing state")
|
||||
@ -90,27 +94,14 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
switch target.BackupType {
|
||||
case formolv1alpha1.JobKind:
|
||||
case formolv1alpha1.OnlineKind:
|
||||
// The restore has to be done by an initContainer since the data is mounted RO
|
||||
// We create the initContainer here
|
||||
// Once the the container has rebooted and the initContainer has done its job, it will change the targetStatus to Waiting.
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, targetObject); err != nil {
|
||||
r.Log.Error(err, "unable to get target objects", "target", target.TargetName)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
initContainer := corev1.Container {}
|
||||
targetPodSpec.InitContainers = append(targetPodSpec.InitContainers, initContainer)
|
||||
if err := r.Update(r.Context, targetObject); err != nil {
|
||||
r.Log.Error(err, "unable to add the restore init container", "targetObject", targetObject)
|
||||
if err := r.restoreInitContainer(target); err != nil {
|
||||
r.Log.Error(err, "unable to create restore initContainer", "target", target)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if newSessionState != "" {
|
||||
targetStatus.SessionState = newSessionState
|
||||
restoreTargetStatus.SessionState = newSessionState
|
||||
err := r.Status().Update(ctx, &restoreSession)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "unable to update RestoreSession status")
|
||||
|
||||
56
controllers/restoresession_controller_helper.go
Normal file
56
controllers/restoresession_controller_helper.go
Normal file
@ -0,0 +1,56 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *RestoreSessionReconciler) restoreInitContainer(target formolv1alpha1.Target) error {
|
||||
// The restore has to be done by an initContainer since the data is mounted RO
|
||||
// We create the initContainer here
|
||||
// Once the the container has rebooted and the initContainer has done its job, it will change the restoreTargetStatus to Waiting.
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, targetObject); err != nil {
|
||||
r.Log.Error(err, "unable to get target objects", "target", target.TargetName)
|
||||
return err
|
||||
}
|
||||
initContainer := corev1.Container{}
|
||||
for _, c := range targetPodSpec.Containers {
|
||||
if c.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
// We copy the existing formol sidecar container to keep the VolumeMounts
|
||||
// We just have to change the name
|
||||
// Change the VolumeMounts to RW
|
||||
// Change the command so the initContainer restores the snapshot
|
||||
c.DeepCopyInto(&initContainer)
|
||||
break
|
||||
}
|
||||
}
|
||||
initContainer.Name = formolv1alpha1.RESTORECONTAINER_NAME
|
||||
for i, _ := range initContainer.VolumeMounts {
|
||||
initContainer.VolumeMounts[i].ReadOnly = false
|
||||
}
|
||||
if env, err := r.getResticEnv(r.backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to get restic env")
|
||||
return err
|
||||
} else {
|
||||
initContainer.Env = append(initContainer.Env, env...)
|
||||
}
|
||||
initContainer.Args = []string{"restoresession", "start",
|
||||
"--name", r.restoreSession.Name,
|
||||
"--namespace", r.restoreSession.Namespace,
|
||||
"--target-name", target.TargetName,
|
||||
}
|
||||
targetPodSpec.InitContainers = append(targetPodSpec.InitContainers, initContainer)
|
||||
// This will kill this Pod and start a new one with the initContainer
|
||||
// the initContainer will restore the snapshot
|
||||
// If everything goes well the initContainer will change the restoreTargetStatus to Waiting
|
||||
if err := r.Update(r.Context, targetObject); err != nil {
|
||||
r.Log.Error(err, "unable to add the restore init container", "targetObject", targetObject)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -33,30 +33,51 @@ const (
|
||||
RESTIC_EXEC = "/usr/bin/restic"
|
||||
)
|
||||
|
||||
func (s Session) setResticEnv(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
func (s Session) getResticEnv(backupConf formolv1alpha1.BackupConfiguration) (envs []corev1.EnvVar, err error) {
|
||||
repo := formolv1alpha1.Repo{}
|
||||
if err := s.Get(s.Context, client.ObjectKey{
|
||||
if err = s.Get(s.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, &repo); err != nil {
|
||||
s.Log.Error(err, "unable to get repo")
|
||||
return err
|
||||
return
|
||||
}
|
||||
if repo.Spec.Backend.S3 != nil {
|
||||
os.Setenv(formolv1alpha1.RESTIC_REPOSITORY, fmt.Sprintf("s3:http://%s/%s/%s-%s",
|
||||
repo.Spec.Backend.S3.Server,
|
||||
repo.Spec.Backend.S3.Bucket,
|
||||
strings.ToUpper(backupConf.Namespace),
|
||||
strings.ToLower(backupConf.Name)))
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: formolv1alpha1.RESTIC_REPOSITORY,
|
||||
Value: fmt.Sprintf("s3:http://%s/%s/%s-%s",
|
||||
repo.Spec.Backend.S3.Server,
|
||||
repo.Spec.Backend.S3.Bucket,
|
||||
strings.ToUpper(backupConf.Namespace),
|
||||
strings.ToLower(backupConf.Name)),
|
||||
})
|
||||
|
||||
data := s.getSecretData(repo.Spec.RepositorySecrets)
|
||||
os.Setenv(formolv1alpha1.AWS_SECRET_ACCESS_KEY, string(data[formolv1alpha1.AWS_SECRET_ACCESS_KEY]))
|
||||
os.Setenv(formolv1alpha1.AWS_ACCESS_KEY_ID, string(data[formolv1alpha1.AWS_ACCESS_KEY_ID]))
|
||||
os.Setenv(formolv1alpha1.RESTIC_PASSWORD, string(data[formolv1alpha1.RESTIC_PASSWORD]))
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: formolv1alpha1.AWS_ACCESS_KEY_ID,
|
||||
Value: string(data[formolv1alpha1.AWS_ACCESS_KEY_ID]),
|
||||
})
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: formolv1alpha1.AWS_SECRET_ACCESS_KEY,
|
||||
Value: string(data[formolv1alpha1.AWS_SECRET_ACCESS_KEY]),
|
||||
})
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: formolv1alpha1.RESTIC_PASSWORD,
|
||||
Value: string(data[formolv1alpha1.RESTIC_PASSWORD]),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func (s Session) checkRepo() error {
|
||||
func (s Session) setResticEnv(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
envs, err := s.getResticEnv(backupConf)
|
||||
for _, env := range envs {
|
||||
os.Setenv(env.Name, env.Value)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s Session) CheckRepo() error {
|
||||
s.Log.V(0).Info("Checking repo")
|
||||
if err := exec.Command(RESTIC_EXEC, "unlock").Run(); err != nil {
|
||||
s.Log.Error(err, "unable to unlock repo", "repo", os.Getenv(formolv1alpha1.RESTIC_REPOSITORY))
|
||||
|
||||
2
formol
2
formol
@ -1 +1 @@
|
||||
Subproject commit 7e007bfd44cc0041a74760c82b752aa2a93242fb
|
||||
Subproject commit b2d80d66ae6bca9e1bc5d04737998806296f8a93
|
||||
@ -1,41 +0,0 @@
|
||||
package session
|
||||
|
||||
import (
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func CreateBackupSession(ref corev1.ObjectReference) {
|
||||
log := logger.WithName("CreateBackupSession")
|
||||
log.V(0).Info("CreateBackupSession called")
|
||||
backupConf := formolv1alpha1.BackupConfiguration{}
|
||||
if err := cl.Get(ctx, types.NamespacedName{
|
||||
Namespace: ref.Namespace,
|
||||
Name: ref.Name,
|
||||
}, &backupConf); err != nil {
|
||||
log.Error(err, "unable to get backupconf")
|
||||
os.Exit(1)
|
||||
}
|
||||
log.V(0).Info("got backupConf", "backupConf", backupConf)
|
||||
|
||||
backupSession := &formolv1alpha1.BackupSession{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: strings.Join([]string{"backupsession", ref.Name, strconv.FormatInt(time.Now().Unix(), 10)}, "-"),
|
||||
Namespace: ref.Namespace,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupSessionSpec{
|
||||
Ref: ref,
|
||||
},
|
||||
}
|
||||
log.V(1).Info("create backupsession", "backupSession", backupSession)
|
||||
if err := cl.Create(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to create backupsession")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -1,47 +0,0 @@
|
||||
package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
config *rest.Config
|
||||
scheme *runtime.Scheme
|
||||
cl client.Client
|
||||
logger logr.Logger
|
||||
ctx context.Context
|
||||
)
|
||||
|
||||
func init() {
|
||||
logger = zap.New(zap.UseDevMode(true))
|
||||
ctx = context.Background()
|
||||
log := logger.WithName("InitBackupSession")
|
||||
ctrl.SetLogger(logger)
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
|
||||
if err != nil {
|
||||
log.Error(err, "unable to get config")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
scheme = runtime.NewScheme()
|
||||
_ = formolv1alpha1.AddToScheme(scheme)
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
cl, err = client.New(config, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
log.Error(err, "unable to get client")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
122
standalone/root.go
Normal file
122
standalone/root.go
Normal file
@ -0,0 +1,122 @@
|
||||
package standalone
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
"github.com/desmo999r/formolcli/controllers"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
session controllers.Session
|
||||
)
|
||||
|
||||
func init() {
|
||||
session.Log = zap.New(zap.UseDevMode(true))
|
||||
session.Context = context.Background()
|
||||
log := session.Log.WithName("InitBackupSession")
|
||||
ctrl.SetLogger(session.Log)
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
|
||||
if err != nil {
|
||||
log.Error(err, "unable to get config")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
session.Scheme = runtime.NewScheme()
|
||||
_ = formolv1alpha1.AddToScheme(session.Scheme)
|
||||
_ = clientgoscheme.AddToScheme(session.Scheme)
|
||||
session.Client, err = client.New(config, client.Options{Scheme: session.Scheme})
|
||||
if err != nil {
|
||||
log.Error(err, "unable to get client")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func StartRestore(
|
||||
restoreSessionName string,
|
||||
restoreSessionNamespace string,
|
||||
targetName string) {
|
||||
log := session.Log.WithName("StartRestore")
|
||||
if err := session.CheckRepo(); err != nil {
|
||||
log.Error(err, "unable to check Repo")
|
||||
return
|
||||
}
|
||||
restoreSession := formolv1alpha1.RestoreSession{}
|
||||
if err := session.Get(session.Context, client.ObjectKey{
|
||||
Name: restoreSessionName,
|
||||
Namespace: restoreSessionNamespace,
|
||||
}, &restoreSession); err != nil {
|
||||
log.Error(err, "unable to get restoresession", "name", restoreSessionName, "namespace", restoreSessionNamespace)
|
||||
return
|
||||
}
|
||||
backupSession := formolv1alpha1.BackupSession{
|
||||
Spec: restoreSession.Spec.BackupSessionRef.Spec,
|
||||
Status: restoreSession.Spec.BackupSessionRef.Status,
|
||||
}
|
||||
for i, target := range backupSession.Status.Targets {
|
||||
if target.TargetName == targetName {
|
||||
|
||||
log.V(0).Info("StartRestore called", "restoring snapshot", target.SnapshotId)
|
||||
cmd := exec.Command(controllers.RESTIC_EXEC, "restore", target.SnapshotId, "--target", "/")
|
||||
// the restic restore command does not support JSON output
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
log.Error(err, "unable to restore snapshot", "output", output)
|
||||
restoreSession.Status.Targets[i].SessionState = formolv1alpha1.Failure
|
||||
} else {
|
||||
restoreSession.Status.Targets[i].SessionState = formolv1alpha1.Waiting
|
||||
log.V(0).Info("restore was a success. Moving to waiting state", "target", target.TargetName)
|
||||
}
|
||||
if err := session.Status().Update(session.Context, &restoreSession); err != nil {
|
||||
log.Error(err, "unable to update RestoreSession", "restoreSession", restoreSession)
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CreateBackupSession(ref corev1.ObjectReference) {
|
||||
log := session.Log.WithName("CreateBackupSession")
|
||||
log.V(0).Info("CreateBackupSession called")
|
||||
backupConf := formolv1alpha1.BackupConfiguration{}
|
||||
if err := session.Get(session.Context, types.NamespacedName{
|
||||
Namespace: ref.Namespace,
|
||||
Name: ref.Name,
|
||||
}, &backupConf); err != nil {
|
||||
log.Error(err, "unable to get backupconf")
|
||||
os.Exit(1)
|
||||
}
|
||||
log.V(0).Info("got backupConf", "backupConf", backupConf)
|
||||
|
||||
backupSession := &formolv1alpha1.BackupSession{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: strings.Join([]string{"backupsession", ref.Name, strconv.FormatInt(time.Now().Unix(), 10)}, "-"),
|
||||
Namespace: ref.Namespace,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupSessionSpec{
|
||||
Ref: ref,
|
||||
},
|
||||
}
|
||||
log.V(1).Info("create backupsession", "backupSession", backupSession)
|
||||
if err := session.Create(session.Context, backupSession); err != nil {
|
||||
log.Error(err, "unable to create backupsession")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user