First volume snapshot created
This commit is contained in:
parent
06b372765b
commit
8b2aaf7211
@ -115,6 +115,21 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
||||
targetStatus.SnapshotId = backupResult.SnapshotId
|
||||
targetStatus.Duration = &metav1.Duration{Duration: time.Now().Sub(targetStatus.StartTime.Time)}
|
||||
}
|
||||
case formolv1alpha1.SnapshotKind:
|
||||
if err := r.backupSnapshot(target); err != nil {
|
||||
switch err.(type) {
|
||||
case *NotReadyToUseError:
|
||||
r.Log.V(0).Info("Volume snapshots are not ready. Requeueing")
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
}, nil
|
||||
default:
|
||||
r.Log.Error(err, "unable to do snapshot backup")
|
||||
// TODO: cleanup existing snapshots
|
||||
r.deleteVolumeSnapshots(target)
|
||||
newSessionState = formolv1alpha1.Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Log.V(0).Info("Backup is over and is a success. Move to Waiting state")
|
||||
case formolv1alpha1.Finalize:
|
||||
|
||||
@ -4,9 +4,18 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
"io"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
SNAPSHOT_PREFIX = "formol-"
|
||||
)
|
||||
|
||||
type BackupResult struct {
|
||||
@ -69,3 +78,137 @@ func (r *BackupSessionReconciler) backupJob(tag string, target formolv1alpha1.Ta
|
||||
result, err = r.backupPaths(tag, paths)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) backupSnapshot(target formolv1alpha1.Target) error {
|
||||
targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind)
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, targetObject); err != nil {
|
||||
r.Log.Error(err, "cannot get target", "target", target.TargetName)
|
||||
return err
|
||||
}
|
||||
for _, container := range targetPodSpec.Containers {
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
// Now snapshot all the container PVC that support snapshots
|
||||
// then create new volumes from the snapshots
|
||||
// replace the volumes in the container struct with the snapshot volumes
|
||||
// use formolv1alpha1.GetVolumeMounts to get the volume mounts for the Job
|
||||
// sidecar := formolv1alpha1.GetSidecar(backupConf, target)
|
||||
_, vms := formolv1alpha1.GetVolumeMounts(container, targetContainer)
|
||||
if err := r.snapshotVolumes(vms, targetPodSpec); err != nil {
|
||||
r.Log.Error(err, "volume snapshot not ready")
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type NotReadyToUseError struct{}
|
||||
|
||||
func (e *NotReadyToUseError) Error() string {
|
||||
return "Snapshot is not ready to use"
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) snapshotVolume(volume corev1.Volume) error {
|
||||
r.Log.V(0).Info("Preparing snapshot", "volume", volume.Name)
|
||||
if volume.VolumeSource.PersistentVolumeClaim != nil {
|
||||
pvc := corev1.PersistentVolumeClaim{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.Namespace,
|
||||
Name: volume.VolumeSource.PersistentVolumeClaim.ClaimName,
|
||||
}, &pvc); err != nil {
|
||||
r.Log.Error(err, "unable to get pvc", "volume", volume)
|
||||
return err
|
||||
}
|
||||
pv := corev1.PersistentVolume{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Name: pvc.Spec.VolumeName,
|
||||
}, &pv); err != nil {
|
||||
r.Log.Error(err, "unable to get pv", "volume", pvc.Spec.VolumeName)
|
||||
return err
|
||||
}
|
||||
if pv.Spec.PersistentVolumeSource.CSI != nil {
|
||||
// This volume is supported by a CSI driver. Let's see if we can snapshot it.
|
||||
volumeSnapshotClassList := volumesnapshotv1.VolumeSnapshotClassList{}
|
||||
if err := r.List(r.Context, &volumeSnapshotClassList); err != nil {
|
||||
r.Log.Error(err, "unable to get VolumeSnapshotClass list")
|
||||
return err
|
||||
}
|
||||
for _, volumeSnapshotClass := range volumeSnapshotClassList.Items {
|
||||
if volumeSnapshotClass.Driver == pv.Spec.PersistentVolumeSource.CSI.Driver {
|
||||
// Check if a snapshot exist
|
||||
volumeSnapshot := volumesnapshotv1.VolumeSnapshot{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: r.Namespace,
|
||||
Name: SNAPSHOT_PREFIX + pv.Name,
|
||||
}, &volumeSnapshot); errors.IsNotFound(err) {
|
||||
// No snapshot found. Create a new one.
|
||||
// We want to snapshot using this VolumeSnapshotClass
|
||||
r.Log.V(0).Info("Create a volume snapshot", "pvc", pvc.Name)
|
||||
volumeSnapshot = volumesnapshotv1.VolumeSnapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: r.Namespace,
|
||||
Name: SNAPSHOT_PREFIX + pv.Name,
|
||||
},
|
||||
Spec: volumesnapshotv1.VolumeSnapshotSpec{
|
||||
VolumeSnapshotClassName: &volumeSnapshotClass.Name,
|
||||
Source: volumesnapshotv1.VolumeSnapshotSource{
|
||||
PersistentVolumeClaimName: &pvc.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := r.Create(r.Context, &volumeSnapshot); err != nil {
|
||||
r.Log.Error(err, "unable to create the snapshot", "pvc", pvc.Name)
|
||||
return err
|
||||
}
|
||||
// We just created the snapshot. We have to assume it's not yet ready and reschedule
|
||||
return &NotReadyToUseError{}
|
||||
} else {
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Something went very wrong here")
|
||||
return err
|
||||
}
|
||||
// The VolumeSnapshot exists. Is it ReadyToUse?
|
||||
if volumeSnapshot.Status == nil || volumeSnapshot.Status.ReadyToUse == nil || *volumeSnapshot.Status.ReadyToUse == false {
|
||||
r.Log.V(0).Info("Volume snapshot exists but it is not ready", "volume", volumeSnapshot.Name)
|
||||
return &NotReadyToUseError{}
|
||||
}
|
||||
r.Log.V(0).Info("Volume snapshot is ready to use", "volume", volumeSnapshot.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) snapshotVolumes(vms []corev1.VolumeMount, podSpec *corev1.PodSpec) (err error) {
|
||||
// We snapshot/check all the volumes. If at least one of the snapshot is not ready to use. We reschedule.
|
||||
for _, vm := range vms {
|
||||
for _, volume := range podSpec.Volumes {
|
||||
if vm.Name == volume.Name {
|
||||
err = r.snapshotVolume(volume)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *NotReadyToUseError:
|
||||
defer func() {
|
||||
err = &NotReadyToUseError{}
|
||||
}()
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *BackupSessionReconciler) deleteVolumeSnapshots(target formolv1alpha1.Target) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
@ -20,6 +22,8 @@ var (
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(formolv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(volumesnapshotv1.AddToScheme(scheme))
|
||||
utilruntime.Must(corev1.AddToScheme(scheme))
|
||||
}
|
||||
|
||||
func StartServer() {
|
||||
|
||||
2
formol
2
formol
@ -1 +1 @@
|
||||
Subproject commit f890962221bf05fb54c9d46d839957a59d644cc8
|
||||
Subproject commit d8b685c1ab88f0edf34b17850e6359b1eb632d25
|
||||
7
go.mod
7
go.mod
@ -5,6 +5,7 @@ go 1.19
|
||||
require (
|
||||
github.com/desmo999r/formol v0.8.0
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
@ -22,11 +23,11 @@ require (
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
@ -34,7 +35,7 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
|
||||
Loading…
Reference in New Issue
Block a user