Creates dummy backup sidecar
This commit is contained in:
parent
26464d0588
commit
6f95be9c73
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package v1alpha1
|
package v1alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,6 +29,12 @@ type Repository struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Target struct {
|
||||||
|
ApiVersion string `json:"apiVersion"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
}
|
||||||
|
|
||||||
// BackupConfigurationSpec defines the desired state of BackupConfiguration
|
// BackupConfigurationSpec defines the desired state of BackupConfiguration
|
||||||
type BackupConfigurationSpec struct {
|
type BackupConfigurationSpec struct {
|
||||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
@ -37,6 +44,9 @@ type BackupConfigurationSpec struct {
|
|||||||
Repository `json:"repository"`
|
Repository `json:"repository"`
|
||||||
Task string `json:"task,omitempty"`
|
Task string `json:"task,omitempty"`
|
||||||
Schedule string `json:"schedule"`
|
Schedule string `json:"schedule"`
|
||||||
|
Target `json:"target"`
|
||||||
|
// +optional
|
||||||
|
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
Suspend *bool `json:"suspend,omitempty"`
|
Suspend *bool `json:"suspend,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,8 +24,7 @@ import (
|
|||||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
type Ref struct {
|
type Ref struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Namespace string `json:"namespace"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackupSessionSpec defines the desired state of BackupSession
|
// BackupSessionSpec defines the desired state of BackupSession
|
||||||
|
|||||||
75
api/v1alpha1/repo_types.go
Normal file
75
api/v1alpha1/repo_types.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||||
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
|
type S3 struct {
|
||||||
|
Server string `json:"server"`
|
||||||
|
Bucket string `json:"bucket"`
|
||||||
|
// +optional
|
||||||
|
Prefix string `json:"prefix,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Backend struct {
|
||||||
|
S3 `json:"s3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RepoSpec defines the desired state of Repo
|
||||||
|
type RepoSpec struct {
|
||||||
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// Foo is an example field of Repo. Edit Repo_types.go to remove/update
|
||||||
|
Backend `json:"backend"`
|
||||||
|
RepositorySecrets string `json:"repositorySecrets"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RepoStatus defines the observed state of Repo
|
||||||
|
type RepoStatus struct {
|
||||||
|
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||||
|
// Important: Run "make" to regenerate code after modifying this file
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// Repo is the Schema for the repoes API
|
||||||
|
type Repo struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec RepoSpec `json:"spec,omitempty"`
|
||||||
|
Status RepoStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// RepoList contains a list of Repo
|
||||||
|
type RepoList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []Repo `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&Repo{}, &RepoList{})
|
||||||
|
}
|
||||||
@ -20,7 +20,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
kbatch_beta1 "k8s.io/api/batch/v1beta1"
|
kbatch_beta1 "k8s.io/api/batch/v1beta1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
@ -38,6 +41,89 @@ type BackupConfigurationReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch
|
||||||
|
|
||||||
|
func (r *BackupConfigurationReconciler) addSidecarContainer(backupConf *formolv1alpha1.BackupConfiguration) error {
|
||||||
|
log := r.Log.WithValues("Repository", backupConf.Spec.Repository.Name)
|
||||||
|
repo := &formolv1alpha1.Repo{}
|
||||||
|
sidecar := corev1.Container{
|
||||||
|
Name: "backup",
|
||||||
|
Image: "busybox",
|
||||||
|
Command: []string{"sh", "-c", "echo Toto; sleep 3600"},
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "POD_NAME",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VolumeMounts: []corev1.VolumeMount{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather information from the repo
|
||||||
|
if err := r.Get(context.Background(), client.ObjectKey{
|
||||||
|
Namespace: "backup",
|
||||||
|
Name: backupConf.Spec.Repository.Name,
|
||||||
|
}, repo); err != nil {
|
||||||
|
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// S3 backing storage
|
||||||
|
if (formolv1alpha1.S3{}) != repo.Spec.Backend.S3 {
|
||||||
|
url := "s3:http://" + repo.Spec.Backend.S3.Server + "/" + repo.Spec.Backend.S3.Bucket + "/" + backupConf.Spec.Target.Name
|
||||||
|
sidecar.Env = append(sidecar.Env, corev1.EnvVar{
|
||||||
|
Name: "RESTIC_REPOSITORY",
|
||||||
|
Value: url,
|
||||||
|
})
|
||||||
|
for _, key := range []string{
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"RESTIC_PASSWORD",
|
||||||
|
} {
|
||||||
|
sidecar.Env = append(sidecar.Env, corev1.EnvVar{
|
||||||
|
Name: key,
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
SecretKeyRef: &corev1.SecretKeySelector{
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{
|
||||||
|
Name: repo.Spec.RepositorySecrets,
|
||||||
|
},
|
||||||
|
Key: key,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithValues("Deployment", backupConf.Spec.Target.Name)
|
||||||
|
deployment := &appsv1.Deployment{}
|
||||||
|
if err := r.Get(context.Background(), client.ObjectKey{
|
||||||
|
Namespace: backupConf.Namespace,
|
||||||
|
Name: backupConf.Spec.Target.Name,
|
||||||
|
}, deployment); err != nil {
|
||||||
|
log.Error(err, "unable to fetch Deployment")
|
||||||
|
return client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||||
|
if container.Name == "backup" {
|
||||||
|
log.V(0).Info("There is already a backup sidecar container. Skipping", "container", container)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, volumemount := range backupConf.Spec.VolumeMounts {
|
||||||
|
log.V(1).Info("mounts", "volumemount", volumemount)
|
||||||
|
volumemount.ReadOnly = true
|
||||||
|
sidecar.VolumeMounts = append(sidecar.VolumeMounts, *volumemount.DeepCopy())
|
||||||
|
}
|
||||||
|
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sidecar)
|
||||||
|
log.V(0).Info("Adding a sicar container")
|
||||||
|
if err := r.Update(context.Background(), deployment); err != nil {
|
||||||
|
log.Error(err, "unable to update the Deployment")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *BackupConfigurationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *BackupConfigurationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := r.Log.WithValues("backupconfiguration", req.NamespacedName)
|
log := r.Log.WithValues("backupconfiguration", req.NamespacedName)
|
||||||
@ -45,18 +131,27 @@ func (r *BackupConfigurationReconciler) Reconcile(req ctrl.Request) (ctrl.Result
|
|||||||
log.V(1).Info("Enter Reconcile with req", "req", req)
|
log.V(1).Info("Enter Reconcile with req", "req", req)
|
||||||
|
|
||||||
// your logic here
|
// your logic here
|
||||||
var backupConf formolv1alpha1.BackupConfiguration
|
backupConf := &formolv1alpha1.BackupConfiguration{}
|
||||||
if err := r.Get(ctx, req.NamespacedName, &backupConf); err != nil {
|
if err := r.Get(ctx, req.NamespacedName, backupConf); err != nil {
|
||||||
log.Error(err, "unable to fetch BackupConfiguration")
|
log.Error(err, "unable to fetch BackupConfiguration")
|
||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch backupConf.Spec.Target.Kind {
|
||||||
|
case "Deployment":
|
||||||
|
if err := r.addSidecarContainer(backupConf); err != nil {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
case "PersistentVolumeClaim":
|
||||||
|
log.V(0).Info("TODO backup PVC")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
if backupConf.Spec.Suspend != nil && *backupConf.Spec.Suspend == true {
|
if backupConf.Spec.Suspend != nil && *backupConf.Spec.Suspend == true {
|
||||||
log.V(0).Info("We are suspended return and wait for the next event")
|
log.V(0).Info("We are suspended return and wait for the next event")
|
||||||
// TODO Suspend the CronJob
|
// TODO Suspend the CronJob
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
backupConf.Status.Suspended = backupConf.Spec.Suspend
|
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,8 +18,6 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -27,9 +25,6 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackupSessionReconciler reconciles a BackupSession object
|
// BackupSessionReconciler reconciles a BackupSession object
|
||||||
@ -43,140 +38,43 @@ type BackupSessionReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch
|
||||||
|
|
||||||
func (r *BackupSessionReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *BackupSessionReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("backupsession", req.NamespacedName)
|
log := r.Log.WithValues("backupsession", req.NamespacedName)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// your logic here
|
// your logic here
|
||||||
var backupSession formolv1alpha1.BackupSession
|
backupSession := &formolv1alpha1.BackupSession{}
|
||||||
if err := r.Get(ctx, req.NamespacedName, &backupSession); err != nil {
|
if err := r.Get(ctx, req.NamespacedName, backupSession); err != nil {
|
||||||
log.Error(err, "unable to get backupsession")
|
log.Error(err, "unable to get backupsession")
|
||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var backupConf formolv1alpha1.BackupConfiguration
|
log.V(1).Info("backupSession.Namespace", "namespace", backupSession.Namespace)
|
||||||
|
log.V(1).Info("backupSession.Spec.Ref.Name", "name", backupSession.Spec.Ref.Name)
|
||||||
|
backupConf := &formolv1alpha1.BackupConfiguration{}
|
||||||
if err := r.Get(ctx, client.ObjectKey{
|
if err := r.Get(ctx, client.ObjectKey{
|
||||||
Namespace: backupSession.Spec.Ref.Namespace,
|
Namespace: backupSession.Namespace,
|
||||||
Name: backupSession.Spec.Ref.Name}, &backupConf); err != nil {
|
Name: backupSession.Spec.Ref.Name}, backupConf); err != nil {
|
||||||
log.Error(err, "unable to get backupConfiguration")
|
log.Error(err, "unable to get backupConfiguration")
|
||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.V(1).Info("Found BackupConfiguration", "BackupConfiguration", backupConf)
|
log.V(1).Info("Found BackupConfiguration", "BackupConfiguration", backupConf)
|
||||||
var childJobs batchv1.JobList
|
|
||||||
if err := r.List(ctx, &childJobs, client.InNamespace(req.Namespace), client.MatchingFields{jobOwnerKey: backupSession.Spec.Ref.Name}); err != nil {
|
|
||||||
log.Error(err, "unable to list child jobs")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var activeJobs []*batchv1.Job
|
// Found the BackupConfiguration.
|
||||||
var successfulJobs []*batchv1.Job
|
switch backupConf.Spec.Target.Kind {
|
||||||
var failedJobs []*batchv1.Job
|
case "PersistentVolumeClaim":
|
||||||
|
return r.CreateJob()
|
||||||
isJobFinished := func(job *batchv1.Job) (bool, batchv1.JobConditionType) {
|
default:
|
||||||
for _, c := range job.Status.Conditions {
|
|
||||||
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == corev1.ConditionTrue {
|
|
||||||
return true, c.Type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, job := range childJobs.Items {
|
|
||||||
_, finishedType := isJobFinished(&job)
|
|
||||||
switch finishedType {
|
|
||||||
case "": // running
|
|
||||||
activeJobs = append(activeJobs, &childJobs.Items[i])
|
|
||||||
case batchv1.JobFailed:
|
|
||||||
failedJobs = append(failedJobs, &childJobs.Items[i])
|
|
||||||
case batchv1.JobComplete:
|
|
||||||
successfulJobs = append(successfulJobs, &childJobs.Items[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(activeJobs) > 0 {
|
|
||||||
log.V(0).Info("A backup job is already running. Skipping")
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
constructJobForBackupConfiguration := func(backupConf formolv1alpha1.BackupConfiguration) (*batchv1.Job, error) {
|
func (r *BackupSessionReconciler) CreateJob() (ctrl.Result, error) {
|
||||||
name := fmt.Sprintf("%s-%d", backupConf.Name, time.Now().Unix())
|
|
||||||
log.V(1).Info("constructing a new Job", "name", name)
|
|
||||||
task := &formolv1alpha1.Task{}
|
|
||||||
if err := r.Get(ctx, client.ObjectKey{
|
|
||||||
Namespace: "backup",
|
|
||||||
Name: backupConf.Spec.Task,
|
|
||||||
}, task); err != nil {
|
|
||||||
log.Error(err, "unable to get Task from BackupConfiguration")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.V(1).Info("found task", "task", task.Name)
|
|
||||||
containers := []corev1.Container{}
|
|
||||||
for _, step := range task.Spec.Steps {
|
|
||||||
function := &formolv1alpha1.Function{}
|
|
||||||
if err := r.Get(ctx, client.ObjectKey{
|
|
||||||
Namespace: "backup",
|
|
||||||
Name: step.Name,
|
|
||||||
}, function); err != nil {
|
|
||||||
log.Error(err, "unable to get Function")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.V(1).Info("found function", "function", function.Name)
|
|
||||||
containers = append(containers, *function.Spec.DeepCopy())
|
|
||||||
}
|
|
||||||
job := &batchv1.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: make(map[string]string),
|
|
||||||
Annotations: make(map[string]string),
|
|
||||||
Name: name,
|
|
||||||
Namespace: backupConf.Namespace,
|
|
||||||
},
|
|
||||||
Spec: batchv1.JobSpec{
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
Containers: containers,
|
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return job, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
job, err := constructJobForBackupConfiguration(backupConf)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "unable to construct job")
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := r.Create(ctx, job); err != nil {
|
|
||||||
log.Error(err, "unable to create job")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
jobOwnerKey = ".metadata.controller"
|
|
||||||
apiGVStr = formolv1alpha1.GroupVersion.String()
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
if err := mgr.GetFieldIndexer().IndexField(&batchv1.Job{}, jobOwnerKey, func(rawObj runtime.Object) []string {
|
|
||||||
job := rawObj.(*batchv1.Job)
|
|
||||||
owner := metav1.GetControllerOf(job)
|
|
||||||
if owner == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if owner.APIVersion != apiGVStr || owner.Kind != "BackupSession" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return []string{owner.Name}
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&formolv1alpha1.BackupSession{}).
|
For(&formolv1alpha1.BackupSession{}).
|
||||||
Owns(&batchv1.Job{}).
|
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user