Removed ~ files
This commit is contained in:
parent
7d9b4100fb
commit
912f3bb06a
@ -1,129 +0,0 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
//batchv1 "k8s.io/api/batch/v1"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
)
|
||||
|
||||
// BackupConfigurationReconciler reconciles a BackupConfiguration object
|
||||
type BackupConfigurationReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
Log logr.Logger
|
||||
context.Context
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupConfiguration object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
r.Context = ctx
|
||||
r.Log = log.FromContext(ctx)
|
||||
|
||||
r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r)
|
||||
|
||||
backupConf := formolv1alpha1.BackupConfiguration{}
|
||||
err := r.Get(ctx, req.NamespacedName, &backupConf)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr"
|
||||
|
||||
if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers)
|
||||
if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
_ = r.DeleteSidecar(backupConf)
|
||||
_ = r.DeleteCronJob(backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
// We have been deleted. Return here
|
||||
r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer
|
||||
if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
r.Log.V(0).Info("adding finalizer", "backupconf", backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to append finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if err := r.AddCronJob(backupConf); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveCronJob = true
|
||||
}
|
||||
|
||||
if err := r.AddSidecar(backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to add sidecar container")
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveSidecar = true
|
||||
}
|
||||
|
||||
if err := r.Status().Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "Unable to update BackupConfiguration status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupConfiguration{}).
|
||||
Complete(r)
|
||||
}
|
||||
@ -1,102 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name)
|
||||
return r.Delete(r.Context, cronjob)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("there is already a cronjob")
|
||||
var changed bool
|
||||
if backupConf.Spec.Schedule != cronjob.Spec.Schedule {
|
||||
r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule)
|
||||
cronjob.Spec.Schedule = backupConf.Spec.Schedule
|
||||
changed = true
|
||||
}
|
||||
if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend {
|
||||
r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend)
|
||||
cronjob.Spec.Suspend = backupConf.Spec.Suspend
|
||||
changed = true
|
||||
}
|
||||
if changed == true {
|
||||
if err := r.Update(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to update cronjob definition")
|
||||
return err
|
||||
}
|
||||
backupConf.Status.Suspended = *backupConf.Spec.Suspend
|
||||
}
|
||||
return nil
|
||||
} else if errors.IsNotFound(err) == false {
|
||||
r.Log.Error(err, "something went wrong")
|
||||
return err
|
||||
}
|
||||
|
||||
cronjob = &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backup-" + backupConf.Name,
|
||||
Namespace: backupConf.Namespace,
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Suspend: backupConf.Spec.Suspend,
|
||||
Schedule: backupConf.Spec.Schedule,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
ServiceAccountName: "backupsession-creator",
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "job-createbackupsession-" + backupConf.Name,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{
|
||||
"backupsession",
|
||||
"create",
|
||||
"--namespace",
|
||||
backupConf.Namespace,
|
||||
"--name",
|
||||
backupConf.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil {
|
||||
r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf)
|
||||
return err
|
||||
}
|
||||
r.Log.V(0).Info("creating the cronjob")
|
||||
if err := r.Create(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob)
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -1,134 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) {
|
||||
for i, container := range podSpec.Containers {
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
} else {
|
||||
for j, e := range container.Env {
|
||||
if e.Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
container.Env[j] = container.Env[len(container.Env)-1]
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
restoreContainers := []corev1.Container{}
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
continue
|
||||
}
|
||||
restoreContainers = append(restoreContainers, container)
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers = restoreContainers
|
||||
removeTags(&deployment.Spec.Template.Spec, target)
|
||||
return r.Update(r.Context, deployment)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
// Go through all the 'targets'
|
||||
// the backupType: Online needs a sidecar container for every single listed 'container'
|
||||
// if the backupType is something else than Online, the 'container' will still need a sidecar
|
||||
// if it has 'steps'
|
||||
addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool {
|
||||
for i, container := range podSpec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
return false
|
||||
}
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGETCONTAINER_TAG,
|
||||
Value: container.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
addSidecar := false
|
||||
for _, targetContainer := range target.Containers {
|
||||
if len(targetContainer.Steps) > 0 {
|
||||
addSidecar = true
|
||||
}
|
||||
}
|
||||
if target.BackupType == formolv1alpha1.OnlineKind {
|
||||
addSidecar = true
|
||||
}
|
||||
if addSidecar {
|
||||
repo := formolv1alpha1.Repo{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, &repo); err != nil {
|
||||
r.Log.Error(err, "unable to get Repo")
|
||||
return err
|
||||
}
|
||||
r.Log.V(1).Info("Got Repository", "repo", repo)
|
||||
env := repo.GetResticEnv(backupConf)
|
||||
sideCar := corev1.Container{
|
||||
Name: formolv1alpha1.SIDECARCONTAINER_NAME,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"backupsession", "server"},
|
||||
Env: append(env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGET_NAME,
|
||||
Value: target.TargetName,
|
||||
}),
|
||||
VolumeMounts: []corev1.VolumeMount{},
|
||||
}
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
if addTags(&deployment.Spec.Template.Spec, target) {
|
||||
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar)
|
||||
r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers)
|
||||
if err := r.Update(r.Context, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot update deployment", "Deployment", deployment)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1,165 +0,0 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
//"time"
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var _ = Describe("BackupConfiguration controller", func() {
|
||||
const BACKUPCONF_NAME = "test-backupconf-controller"
|
||||
|
||||
var (
|
||||
backupConf *formolv1alpha1.BackupConfiguration
|
||||
ctx = context.Background()
|
||||
key = types.NamespacedName{
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
backupConf = &formolv1alpha1.BackupConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupConfigurationSpec{
|
||||
Repository: REPO_NAME,
|
||||
Schedule: "1 * * * *",
|
||||
Image: "desmo999r/formolcli:v0.3.2",
|
||||
Targets: []formolv1alpha1.Target{
|
||||
formolv1alpha1.Target{
|
||||
BackupType: formolv1alpha1.OnlineKind,
|
||||
TargetKind: formolv1alpha1.Deployment,
|
||||
TargetName: DEPLOYMENT_NAME,
|
||||
Containers: []formolv1alpha1.TargetContainer{
|
||||
formolv1alpha1.Container{
|
||||
Name: CONTAINER_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
Context("Creating a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
})
|
||||
AfterEach(func() {
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
})
|
||||
It("Has a schedule", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should create a CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should update the CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
realBackupConf.Spec.Schedule = "1 0 * * *"
|
||||
suspend := true
|
||||
realBackupConf.Spec.Suspend = &suspend
|
||||
Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed())
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() string {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return ""
|
||||
}
|
||||
return cronJob.Spec.Schedule
|
||||
}, timeout, interval).Should(Equal("1 0 * * *"))
|
||||
Expect(*cronJob.Spec.Suspend).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
Context("Deleting a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
})
|
||||
It("Should delete the CronJob", func() {
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
By("The CronJob has been created. Now deleting the BackupConfiguration")
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeFalse())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
)
|
||||
|
||||
// BackupSessionReconciler reconciles a BackupSession object
|
||||
type BackupSessionReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupSession object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
|
||||
// TODO(user): your logic here
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupSession{}).
|
||||
Complete(r)
|
||||
}
|
||||
@ -1,155 +0,0 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
//+kubebuilder:scaffold:imports
|
||||
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
const (
|
||||
NAMESPACE_NAME = "test-namespace"
|
||||
REPO_NAME = "test-repo"
|
||||
DEPLOYMENT_NAME = "test-deployment"
|
||||
CONTAINER_NAME = "test-container"
|
||||
DATAVOLUME_NAME = "data"
|
||||
timeout = time.Second * 10
|
||||
interval = time.Millisecond * 250
|
||||
)
|
||||
|
||||
var (
|
||||
namespace = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: NAMESPACE_NAME,
|
||||
},
|
||||
}
|
||||
deployment = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: NAMESPACE_NAME,
|
||||
Name: DEPLOYMENT_NAME,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "test-deployment"},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"app": "test-deployment"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "test-container",
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{
|
||||
Name: DATAVOLUME_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = formolv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
Expect(k8sClient.Create(ctx, namespace)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
|
||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&BackupConfigurationReconciler{
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err = k8sManager.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
|
||||
}()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Loading…
Reference in New Issue
Block a user