snapshot initial commit with some BackupConfiguration and BackupSession controllers
This commit is contained in:
parent
d018b81655
commit
6d83e59171
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
*~
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
|
||||
13
Dockerfile
13
Dockerfile
@ -1,5 +1,7 @@
|
||||
# Build the manager binary
|
||||
FROM golang:alpine as builder
|
||||
FROM golang:1.19 as builder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@ -12,17 +14,20 @@ RUN go mod download
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY api/ api/
|
||||
COPY pkg/ pkg/
|
||||
COPY controllers/ controllers/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -a -o manager main.go
|
||||
# the GOARCH has not a default value to allow the binary be built according to the host where the command
|
||||
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
|
||||
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
||||
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
|
||||
201
LICENSE
201
LICENSE
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
121
Makefile
121
Makefile
@ -1,8 +1,8 @@
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= desmo999r/formolcontroller:0.3.0
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false,crdVersions=v1"
|
||||
IMG ?= controller:latest
|
||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||
ENVTEST_K8S_VERSION = 1.25.0
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
@ -12,11 +12,11 @@ GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# This is a requirement for 'setup-envtest.sh' in the test target.
|
||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
.PHONY: all
|
||||
all: build
|
||||
|
||||
##@ General
|
||||
@ -32,79 +32,126 @@ all: build
|
||||
# More info on the awk command:
|
||||
# http://linuxcommand.org/lc3_adv_awk.php
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Run go fmt against code.
|
||||
go fmt ./...
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
|
||||
ENVTEST_ASSETS_DIR=$(shell pwd)/testbin
|
||||
test: manifests generate fmt vet ## Run tests.
|
||||
mkdir -p ${ENVTEST_ASSETS_DIR}
|
||||
test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.8.3/hack/setup-envtest.sh
|
||||
source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out
|
||||
|
||||
##@ Build
|
||||
|
||||
build: generate fmt vet ## Build manager binary.
|
||||
.PHONY: build
|
||||
build: manifests generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager main.go
|
||||
|
||||
.PHONY: run
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
go run ./main.go
|
||||
|
||||
# If you wish built the manager image targeting other platforms you can use the --platform flag.
|
||||
# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it.
|
||||
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
.PHONY: docker-build
|
||||
docker-build: test ## Build docker image with the manager.
|
||||
podman build --disable-compression --format=docker . -t ${IMG}
|
||||
docker build -t ${IMG} .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: ## Push docker image with the manager.
|
||||
podman push ${IMG}
|
||||
docker push ${IMG}
|
||||
|
||||
docker: docker-build docker-push
|
||||
# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
||||
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
|
||||
# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/
|
||||
# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
|
||||
# To properly provided solutions that supports more than one platform you should use this option.
|
||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: test ## Build and push docker image for the manager for cross-platform support
|
||||
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
|
||||
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
|
||||
- docker buildx create --name project-v3-builder
|
||||
docker buildx use project-v3-builder
|
||||
- docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
|
||||
- docker buildx rm project-v3-builder
|
||||
rm Dockerfile.cross
|
||||
|
||||
##@ Deployment
|
||||
|
||||
ifndef ignore-not-found
|
||||
ignore-not-found = false
|
||||
endif
|
||||
|
||||
.PHONY: install
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete -f -
|
||||
.PHONY: uninstall
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
|
||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
.PHONY: undeploy
|
||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
##@ Build Dependencies
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1)
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
|
||||
## Tool Binaries
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
|
||||
# go-get-tool will 'go get' any package $2 and install it to $1.
|
||||
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
define go-get-tool
|
||||
@[ -f $(1) ] || { \
|
||||
set -e ;\
|
||||
TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
echo "Downloading $(2)" ;\
|
||||
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
|
||||
rm -rf $$TMP_DIR ;\
|
||||
}
|
||||
endef
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v3.8.7
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.10.0
|
||||
|
||||
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
@if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \
|
||||
echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \
|
||||
rm -rf $(LOCALBIN)/kustomize; \
|
||||
fi
|
||||
test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); }
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \
|
||||
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
|
||||
93
README.md
93
README.md
@ -1,3 +1,94 @@
|
||||
# formol
|
||||
// TODO(user): Add simple overview of use/purpose
|
||||
|
||||
## Description
|
||||
// TODO(user): An in-depth paragraph about your project and overview of use
|
||||
|
||||
## Getting Started
|
||||
You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster.
|
||||
**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).
|
||||
|
||||
### Running on the cluster
|
||||
1. Install Instances of Custom Resources:
|
||||
|
||||
```sh
|
||||
kubectl apply -f config/samples/
|
||||
```
|
||||
|
||||
2. Build and push your image to the location specified by `IMG`:
|
||||
|
||||
```sh
|
||||
make docker-build docker-push IMG=<some-registry>/formol:tag
|
||||
```
|
||||
|
||||
3. Deploy the controller to the cluster with the image specified by `IMG`:
|
||||
|
||||
```sh
|
||||
make deploy IMG=<some-registry>/formol:tag
|
||||
```
|
||||
|
||||
### Uninstall CRDs
|
||||
To delete the CRDs from the cluster:
|
||||
|
||||
```sh
|
||||
make uninstall
|
||||
```
|
||||
|
||||
### Undeploy controller
|
||||
UnDeploy the controller to the cluster:
|
||||
|
||||
```sh
|
||||
make undeploy
|
||||
```
|
||||
|
||||
## Contributing
|
||||
// TODO(user): Add detailed information on how you would like others to contribute to this project
|
||||
|
||||
### How it works
|
||||
This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
|
||||
|
||||
It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/)
|
||||
which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster
|
||||
|
||||
### Test It Out
|
||||
1. Install the CRDs into the cluster:
|
||||
|
||||
```sh
|
||||
make install
|
||||
```
|
||||
|
||||
2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
|
||||
|
||||
```sh
|
||||
make run
|
||||
```
|
||||
|
||||
**NOTE:** You can also run this in one step by running: `make install run`
|
||||
|
||||
### Modifying the API definitions
|
||||
If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
|
||||
|
||||
```sh
|
||||
make manifests
|
||||
```
|
||||
|
||||
**NOTE:** Run `make --help` for more information on all potential `make` targets
|
||||
|
||||
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
|
||||
|
||||
## License
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
My k8s backup solution
|
||||
|
||||
BIN
api/v1alpha1/.backupconfiguration_types.go.un~
Normal file
BIN
api/v1alpha1/.backupconfiguration_types.go.un~
Normal file
Binary file not shown.
BIN
api/v1alpha1/.backupsession_types.go.un~
Normal file
BIN
api/v1alpha1/.backupsession_types.go.un~
Normal file
Binary file not shown.
BIN
api/v1alpha1/.common.go.un~
Normal file
BIN
api/v1alpha1/.common.go.un~
Normal file
Binary file not shown.
BIN
api/v1alpha1/.function_types.go.un~
Normal file
BIN
api/v1alpha1/.function_types.go.un~
Normal file
Binary file not shown.
BIN
api/v1alpha1/.repo_types.go.un~
Normal file
BIN
api/v1alpha1/.repo_types.go.un~
Normal file
Binary file not shown.
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,87 +17,82 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod
|
||||
type TargetKind string
|
||||
|
||||
const (
|
||||
SidecarKind string = "Sidecar"
|
||||
JobKind string = "Job"
|
||||
BackupVolumes string = "Volumes"
|
||||
Deployment TargetKind = "Deployment"
|
||||
StatefulSet TargetKind = "StatefulSet"
|
||||
Pod TargetKind = "Pod"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Online;Snapshot;Job
|
||||
type BackupType string
|
||||
|
||||
const (
|
||||
SnapshotKind BackupType = "Snapshot"
|
||||
OnlineKind BackupType = "Online"
|
||||
JobKind BackupType = "Job"
|
||||
)
|
||||
|
||||
type Step struct {
|
||||
Name string `json:"name"`
|
||||
// +optional
|
||||
Finalize *bool `json:"finalize,omitempty"`
|
||||
Finalize *bool `json:"finalize"`
|
||||
}
|
||||
|
||||
type Hook struct {
|
||||
Cmd string `json:"cmd"`
|
||||
// +optional
|
||||
Args []string `json:"args,omitempty"`
|
||||
type TargetContainer struct {
|
||||
Name string `json:"name"`
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
// +kubebuilder:default:=2
|
||||
Retry int `json:"retry"`
|
||||
Steps []Step `json:"steps,omitempty"`
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
// +kubebuilder:validation:Enum=Sidecar;Job
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
// +optional
|
||||
ContainerName string `json:"containerName"`
|
||||
// +optional
|
||||
ApiVersion string `json:"apiVersion,omitempty"`
|
||||
// +optional
|
||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
// +optional
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
Steps []Step `json:"steps,omitempty"`
|
||||
// +kubebuilder:default:=2
|
||||
Retry int `json:"retry,omitempty"`
|
||||
BackupType `json:"backupType"`
|
||||
TargetKind `json:"targetKind"`
|
||||
TargetName string `json:"targetName"`
|
||||
Containers []TargetContainer `json:"containers"`
|
||||
}
|
||||
|
||||
type Keep struct {
|
||||
Last int32 `json:"last,omitempty"`
|
||||
Daily int32 `json:"daily,omitempty"`
|
||||
Weekly int32 `json:"weekly,omitempty"`
|
||||
Monthly int32 `json:"monthly,omitempty"`
|
||||
Yearly int32 `json:"yearly,omitempty"`
|
||||
Last int32 `json:"last"`
|
||||
Daily int32 `json:"daily"`
|
||||
Weekly int32 `json:"weekly"`
|
||||
Monthly int32 `json:"monthly"`
|
||||
Yearly int32 `json:"yearly"`
|
||||
}
|
||||
|
||||
// BackupConfigurationSpec defines the desired state of BackupConfiguration
|
||||
type BackupConfigurationSpec struct {
|
||||
Repository string `json:"repository"`
|
||||
Image string `json:"image"`
|
||||
|
||||
// +optional
|
||||
Suspend *bool `json:"suspend,omitempty"`
|
||||
|
||||
// +optional
|
||||
Schedule string `json:"schedule,omitempty"`
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
Targets []Target `json:"targets"`
|
||||
// +optional
|
||||
Keep `json:"keep,omitempty"`
|
||||
// +kubebuilder:default:=false
|
||||
Suspend *bool `json:"suspend"`
|
||||
Schedule string `json:"schedule"`
|
||||
Keep `json:"keep"`
|
||||
Targets []Target `json:"targets"`
|
||||
}
|
||||
|
||||
// BackupConfigurationStatus defines the observed state of BackupConfiguration
|
||||
type BackupConfigurationStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"`
|
||||
Suspended bool `json:"suspended"`
|
||||
ActiveCronJob bool `json:"activeCronJob"`
|
||||
ActiveSidecar bool `json:"activeSidecar"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:shortName="bc"
|
||||
//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend`
|
||||
//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule`
|
||||
|
||||
// BackupConfiguration is the Schema for the backupconfigurations API
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName="bc"
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend`
|
||||
// +kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule`
|
||||
type BackupConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@ -106,7 +101,7 @@ type BackupConfiguration struct {
|
||||
Status BackupConfigurationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// BackupConfigurationList contains a list of BackupConfiguration
|
||||
type BackupConfigurationList struct {
|
||||
|
||||
115
api/v1alpha1/backupconfiguration_types.go~
Normal file
115
api/v1alpha1/backupconfiguration_types.go~
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod
|
||||
type TargetKind string
|
||||
|
||||
const (
|
||||
Deployment TargetKind = "Deployment"
|
||||
StatefulSet TargetKind = "StatefulSet"
|
||||
Pod TargetKind = "Pod"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Online;Snapshot;Job
|
||||
type BackupType string
|
||||
|
||||
const (
|
||||
SnapshotKind BackupType = "Snapshot"
|
||||
OnlineKind BackupType = "Online"
|
||||
JobKind BackupType = "Job"
|
||||
)
|
||||
|
||||
type Step struct {
|
||||
Name string `json:"name"`
|
||||
// +optional
|
||||
Finalize *bool `json:"finalize"`
|
||||
}
|
||||
|
||||
type TargetContainer struct {
|
||||
Name string `json:"name"`
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
// +kubebuilder:default:=2
|
||||
Retry int `json:"retry"`
|
||||
Steps []Step `json:"steps,omitempty"`
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
BackupType `json:"backupType"`
|
||||
TargetKind `json:"targetKind"`
|
||||
TargetName string `json:"targetName"`
|
||||
Containers []TargetContainer `json:"containers"`
|
||||
}
|
||||
|
||||
type Keep struct {
|
||||
Last int32 `json:"last"`
|
||||
Daily int32 `json:"daily"`
|
||||
Weekly int32 `json:"weekly"`
|
||||
Monthly int32 `json:"monthly"`
|
||||
Yearly int32 `json:"yearly"`
|
||||
}
|
||||
|
||||
// BackupConfigurationSpec defines the desired state of BackupConfiguration
|
||||
type BackupConfigurationSpec struct {
|
||||
Repo string `json:"repo"`
|
||||
Image string `json:"image"`
|
||||
// +kubebuilder:default:=false
|
||||
Suspend *bool `json:"suspend"`
|
||||
Schedule string `json:"schedule"`
|
||||
Keep `json:"keep"`
|
||||
Targets []Target `json:"targets"`
|
||||
}
|
||||
|
||||
// BackupConfigurationStatus defines the observed state of BackupConfiguration
|
||||
type BackupConfigurationStatus struct {
|
||||
LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"`
|
||||
Suspended bool `json:"suspended"`
|
||||
ActiveCronJob bool `json:"activeCronJob"`
|
||||
ActiveSidecar bool `json:"activeSidecar"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:shortName="bc"
|
||||
//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend`
|
||||
//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule`
|
||||
|
||||
// BackupConfiguration is the Schema for the backupconfigurations API
|
||||
type BackupConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec BackupConfigurationSpec `json:"spec,omitempty"`
|
||||
Status BackupConfigurationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// BackupConfigurationList contains a list of BackupConfiguration
|
||||
type BackupConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []BackupConfiguration `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&BackupConfiguration{}, &BackupConfigurationList{})
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -21,8 +21,28 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
type SessionState string
|
||||
|
||||
const (
|
||||
New SessionState = "New"
|
||||
Init SessionState = "Initializing"
|
||||
Running SessionState = "Running"
|
||||
Waiting SessionState = "Waiting"
|
||||
Finalize SessionState = "Finalizing"
|
||||
Success SessionState = "Success"
|
||||
Failure SessionState = "Failure"
|
||||
Deleted SessionState = "Deleted"
|
||||
)
|
||||
|
||||
type TargetStatus struct {
|
||||
Name string `json:"name"`
|
||||
Kind string `json:"kind"`
|
||||
SessionState `json:"state"`
|
||||
SnapshotId string `json:"snapshotId"`
|
||||
StartTime *metav1.Time `json:"startTime"`
|
||||
Duration *metav1.Duration `json:"duration"`
|
||||
Try int `json:"try"`
|
||||
}
|
||||
|
||||
// BackupSessionSpec defines the desired state of BackupSession
|
||||
type BackupSessionSpec struct {
|
||||
@ -31,21 +51,15 @@ type BackupSessionSpec struct {
|
||||
|
||||
// BackupSessionStatus defines the observed state of BackupSession
|
||||
type BackupSessionStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
// +optional
|
||||
SessionState `json:"state,omitempty"`
|
||||
// +optional
|
||||
StartTime *metav1.Time `json:"startTime,omitempty"`
|
||||
// +optional
|
||||
Targets []TargetStatus `json:"target,omitempty"`
|
||||
// +optional
|
||||
Keep string `json:"keep,omitempty"`
|
||||
SessionState `json:"state"`
|
||||
StartTime *metav1.Time `json:"startTime"`
|
||||
Targets []TargetStatus `json:"target"`
|
||||
Keep string `json:"keep"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName="bs"
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name`
|
||||
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
|
||||
// +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime`
|
||||
@ -60,7 +74,7 @@ type BackupSession struct {
|
||||
Status BackupSessionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// BackupSessionList contains a list of BackupSession
|
||||
type BackupSessionList struct {
|
||||
|
||||
89
api/v1alpha1/backupsession_types.go~
Normal file
89
api/v1alpha1/backupsession_types.go~
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
type SessionState string
|
||||
|
||||
const (
|
||||
New SessionState = "New"
|
||||
Init SessionState = "Initializing"
|
||||
Running SessionState = "Running"
|
||||
Waiting SessionState = "Waiting"
|
||||
Finalize SessionState = "Finalizing"
|
||||
Success SessionState = "Success"
|
||||
Failure SessionState = "Failure"
|
||||
Deleted SessionState = "Deleted"
|
||||
)
|
||||
|
||||
type TargetStatus struct {
|
||||
Name string `json:"name"`
|
||||
Kind string `json:"kind"`
|
||||
SessionState `json:"state"`
|
||||
SnapshotId string `json:"snapshotId"`
|
||||
StartTime *metav1.Time `json:"startTime"`
|
||||
Duration *metav1.Duration `json:"duration"`
|
||||
Try int `json:"try"`
|
||||
}
|
||||
|
||||
// BackupSessionSpec defines the desired state of BackupSession
|
||||
type BackupSessionSpec struct {
|
||||
Ref corev1.ObjectReference `json:"ref"`
|
||||
}
|
||||
|
||||
// BackupSessionStatus defines the observed state of BackupSession
|
||||
type BackupSessionStatus struct {
|
||||
SessionState `json:"state"`
|
||||
StartTime *metav1.Time `json:"startTime"`
|
||||
Targets []TargetStatus `json:"target"`
|
||||
Keep string `json:"keep"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName="bs"
|
||||
// +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name`
|
||||
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
|
||||
// +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime`
|
||||
// +kubebuilder:printcolumn:name="Keep",type=string,JSONPath=`.status.keep`
|
||||
|
||||
// BackupSession is the Schema for the backupsessions API
|
||||
type BackupSession struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec BackupSessionSpec `json:"spec,omitempty"`
|
||||
Status BackupSessionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// BackupSessionList contains a list of BackupSession
|
||||
type BackupSessionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []BackupSession `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&BackupSession{}, &BackupSessionList{})
|
||||
}
|
||||
@ -1,47 +1,13 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type SessionState string
|
||||
|
||||
const (
|
||||
New SessionState = "New"
|
||||
Init SessionState = "Initializing"
|
||||
Running SessionState = "Running"
|
||||
Waiting SessionState = "Waiting"
|
||||
Finalize SessionState = "Finalizing"
|
||||
Success SessionState = "Success"
|
||||
Failure SessionState = "Failure"
|
||||
Deleted SessionState = "Deleted"
|
||||
// Environment variables used by the sidecar container
|
||||
RESTORE_ANNOTATION = "restore"
|
||||
// the name of the sidecar container
|
||||
SIDECARCONTAINER_NAME string = "formol"
|
||||
// the name of the container we backup when there are more than 1 container in the pod
|
||||
TARGETCONTAINER_TAG string = "FORMOL_TARGET"
|
||||
// Used by both the backupsession and restoresession controllers to identified the target deployment
|
||||
TARGET_NAME string = "TARGET_NAME"
|
||||
// Used by restoresession controller
|
||||
RESTORESESSION_NAMESPACE string = "RESTORESESSION_NAMESPACE"
|
||||
RESTORESESSION_NAME string = "RESTORESESSION_NAME"
|
||||
// Used by the backupsession controller
|
||||
POD_NAME string = "POD_NAME"
|
||||
POD_NAMESPACE string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
type TargetStatus struct {
|
||||
Name string `json:"name"`
|
||||
Kind string `json:"kind"`
|
||||
// +optional
|
||||
SessionState `json:"state,omitempty"`
|
||||
// +optional
|
||||
SnapshotId string `json:"snapshotId,omitempty"`
|
||||
// +optional
|
||||
StartTime *metav1.Time `json:"startTime,omitempty"`
|
||||
// +optional
|
||||
Duration *metav1.Duration `json:"duration,omitempty"`
|
||||
// +optional
|
||||
Try int `json:"try,omitemmpty"`
|
||||
}
|
||||
|
||||
6
api/v1alpha1/common.go~
Normal file
6
api/v1alpha1/common.go~
Normal file
@ -0,0 +1,6 @@
|
||||
package v1alpha1
|
||||
|
||||
const (
|
||||
SIDECARCONTAINER_NAME string = "formol"
|
||||
TARGETCONTAINER_TAG string = "FORMOL_TARGET"
|
||||
)
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -21,20 +21,25 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
// FunctionStatus defines the observed state of Function
|
||||
type FunctionStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
|
||||
// Function is the Schema for the functions API
|
||||
type Function struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec corev1.Container `json:"spec"`
|
||||
Spec corev1.Container `json:"spec,omitempty"`
|
||||
Status FunctionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// FunctionList contains a list of Function
|
||||
type FunctionList struct {
|
||||
|
||||
64
api/v1alpha1/function_types.go~
Normal file
64
api/v1alpha1/function_types.go~
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// FunctionSpec defines the desired state of Function
|
||||
type FunctionSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Foo is an example field of Function. Edit function_types.go to remove/update
|
||||
Foo string `json:"foo,omitempty"`
|
||||
}
|
||||
|
||||
// FunctionStatus defines the observed state of Function
|
||||
type FunctionStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
|
||||
// Function is the Schema for the functions API
|
||||
type Function struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec FunctionSpec `json:"spec,omitempty"`
|
||||
Status FunctionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// FunctionList contains a list of Function
|
||||
type FunctionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Function `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Function{}, &FunctionList{})
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,12 +17,14 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
type S3 struct {
|
||||
Server string `json:"server"`
|
||||
Bucket string `json:"bucket"`
|
||||
@ -31,26 +33,24 @@ type S3 struct {
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
S3 `json:"s3"`
|
||||
// +optional
|
||||
S3 *S3 `json:"s3,omitempty"`
|
||||
// +optional
|
||||
Nfs *string `json:"nfs,omitempty"`
|
||||
}
|
||||
|
||||
// RepoSpec defines the desired state of Repo
|
||||
type RepoSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Foo is an example field of Repo. Edit Repo_types.go to remove/update
|
||||
Backend `json:"backend"`
|
||||
RepositorySecrets string `json:"repositorySecrets"`
|
||||
}
|
||||
|
||||
// RepoStatus defines the observed state of Repo
|
||||
type RepoStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
|
||||
// Repo is the Schema for the repoes API
|
||||
type Repo struct {
|
||||
@ -61,7 +61,7 @@ type Repo struct {
|
||||
Status RepoStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// RepoList contains a list of Repo
|
||||
type RepoList struct {
|
||||
@ -73,3 +73,37 @@ type RepoList struct {
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Repo{}, &RepoList{})
|
||||
}
|
||||
|
||||
func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar {
|
||||
env := []corev1.EnvVar{}
|
||||
if repo.Spec.Backend.S3 != nil {
|
||||
url := fmt.Sprintf("s3:http://%s/%s/%s-%s",
|
||||
repo.Spec.Backend.S3.Server,
|
||||
repo.Spec.Backend.S3.Bucket,
|
||||
strings.ToUpper(backupConf.Namespace),
|
||||
strings.ToLower(backupConf.Name))
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RESTIC_REPOSITORY",
|
||||
Value: url,
|
||||
})
|
||||
for _, key := range []string{
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"RESTIC_PASSWORD",
|
||||
} {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: key,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: repo.Spec.RepositorySecrets,
|
||||
},
|
||||
Key: key,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
109
api/v1alpha1/repo_types.go~
Normal file
109
api/v1alpha1/repo_types.go~
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
type S3 struct {
|
||||
Server string `json:"server"`
|
||||
Bucket string `json:"bucket"`
|
||||
// +optional
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
// +optional
|
||||
S3 *S3 `json:"s3,omitempty"`
|
||||
// +optional
|
||||
Nfs *string `json:"nfs,omitempty"`
|
||||
}
|
||||
|
||||
// RepoSpec defines the desired state of Repo
|
||||
type RepoSpec struct {
|
||||
Backend `json:"backend"`
|
||||
RepositorySecrets string `json:"repositorySecrets"`
|
||||
}
|
||||
|
||||
// RepoStatus defines the observed state of Repo
|
||||
type RepoStatus struct {
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
|
||||
// Repo is the Schema for the repoes API
|
||||
type Repo struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec RepoSpec `json:"spec,omitempty"`
|
||||
Status RepoStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// RepoList contains a list of Repo
|
||||
type RepoList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Repo `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Repo{}, &RepoList{})
|
||||
}
|
||||
|
||||
func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar {
|
||||
env := []corev1.EnvVar{}
|
||||
if repo.Spec.Backend.S3 {
|
||||
url := fmt.Sprintf("s3:http://%s/%s/%s-%s",
|
||||
repo.Spec.Backend.S3.Server,
|
||||
repo.Spec.Backend.S3.Bucket,
|
||||
strings.ToUpper(backupConf.Namespace),
|
||||
stringsToLower(backupConf.Name))
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RESTIC_REPOSITORY",
|
||||
Value: url,
|
||||
})
|
||||
for _, key := range []string{
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"RESTIC_PASSWORD",
|
||||
} {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: key,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: repo.Spec.RepositorySecrets,
|
||||
},
|
||||
Key: key,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,41 +17,29 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
//"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
type BackupSessionRef struct {
|
||||
// +optional
|
||||
Ref corev1.ObjectReference `json:"ref,omitempty"`
|
||||
// +optional
|
||||
Spec BackupSessionSpec `json:"spec,omitempty"`
|
||||
// +optional
|
||||
Status BackupSessionStatus `json:"status,omitempty"`
|
||||
}
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// RestoreSessionSpec defines the desired state of RestoreSession
|
||||
type RestoreSessionSpec struct {
|
||||
BackupSessionRef `json:"backupSession"`
|
||||
//Ref string `json:"backupSessionRef"`
|
||||
// +optional
|
||||
//Targets []TargetStatus `json:"target,omitempty"`
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Foo is an example field of RestoreSession. Edit restoresession_types.go to remove/update
|
||||
Foo string `json:"foo,omitempty"`
|
||||
}
|
||||
|
||||
// RestoreSessionStatus defines the observed state of RestoreSession
|
||||
type RestoreSessionStatus struct {
|
||||
// +optional
|
||||
SessionState `json:"state,omitempty"`
|
||||
// +optional
|
||||
StartTime *metav1.Time `json:"startTime,omitempty"`
|
||||
// +optional
|
||||
Targets []TargetStatus `json:"target,omitempty"`
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName="rs"
|
||||
// +kubebuilder:subresource:status
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
|
||||
// RestoreSession is the Schema for the restoresessions API
|
||||
type RestoreSession struct {
|
||||
@ -62,7 +50,7 @@ type RestoreSession struct {
|
||||
Status RestoreSessionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// RestoreSessionList contains a list of RestoreSession
|
||||
type RestoreSessionList struct {
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -21,15 +22,23 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Backend) DeepCopyInto(out *Backend) {
|
||||
*out = *in
|
||||
out.S3 = in.S3
|
||||
if in.S3 != nil {
|
||||
in, out := &in.S3, &out.S3
|
||||
*out = new(S3)
|
||||
**out = **in
|
||||
}
|
||||
if in.Nfs != nil {
|
||||
in, out := &in.Nfs, &out.Nfs
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend.
|
||||
@ -109,6 +118,7 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.Keep = in.Keep
|
||||
if in.Targets != nil {
|
||||
in, out := &in.Targets, &out.Targets
|
||||
*out = make([]Target, len(*in))
|
||||
@ -116,7 +126,6 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.Keep = in.Keep
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfigurationSpec.
|
||||
@ -207,24 +216,6 @@ func (in *BackupSessionList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupSessionRef) DeepCopyInto(out *BackupSessionRef) {
|
||||
*out = *in
|
||||
out.Ref = in.Ref
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSessionRef.
|
||||
func (in *BackupSessionRef) DeepCopy() *BackupSessionRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupSessionRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupSessionSpec) DeepCopyInto(out *BackupSessionSpec) {
|
||||
*out = *in
|
||||
@ -273,6 +264,7 @@ func (in *Function) DeepCopyInto(out *Function) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function.
|
||||
@ -326,21 +318,16 @@ func (in *FunctionList) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Hook) DeepCopyInto(out *Hook) {
|
||||
func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) {
|
||||
*out = *in
|
||||
if in.Args != nil {
|
||||
in, out := &in.Args, &out.Args
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hook.
|
||||
func (in *Hook) DeepCopy() *Hook {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus.
|
||||
func (in *FunctionStatus) DeepCopy() *FunctionStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Hook)
|
||||
out := new(FunctionStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@ -365,7 +352,7 @@ func (in *Repo) DeepCopyInto(out *Repo) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
@ -422,7 +409,7 @@ func (in *RepoList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RepoSpec) DeepCopyInto(out *RepoSpec) {
|
||||
*out = *in
|
||||
out.Backend = in.Backend
|
||||
in.Backend.DeepCopyInto(&out.Backend)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoSpec.
|
||||
@ -455,8 +442,8 @@ func (in *RestoreSession) DeepCopyInto(out *RestoreSession) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSession.
|
||||
@ -512,7 +499,6 @@ func (in *RestoreSessionList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreSessionSpec) DeepCopyInto(out *RestoreSessionSpec) {
|
||||
*out = *in
|
||||
in.BackupSessionRef.DeepCopyInto(&out.BackupSessionRef)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionSpec.
|
||||
@ -528,17 +514,6 @@ func (in *RestoreSessionSpec) DeepCopy() *RestoreSessionSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreSessionStatus) DeepCopyInto(out *RestoreSessionStatus) {
|
||||
*out = *in
|
||||
if in.StartTime != nil {
|
||||
in, out := &in.StartTime, &out.StartTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Targets != nil {
|
||||
in, out := &in.Targets, &out.Targets
|
||||
*out = make([]TargetStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionStatus.
|
||||
@ -589,21 +564,9 @@ func (in *Step) DeepCopy() *Step {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Target) DeepCopyInto(out *Target) {
|
||||
*out = *in
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Paths != nil {
|
||||
in, out := &in.Paths, &out.Paths
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Steps != nil {
|
||||
in, out := &in.Steps, &out.Steps
|
||||
*out = make([]Step, len(*in))
|
||||
if in.Containers != nil {
|
||||
in, out := &in.Containers, &out.Containers
|
||||
*out = make([]TargetContainer, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
@ -620,6 +583,33 @@ func (in *Target) DeepCopy() *Target {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetContainer) DeepCopyInto(out *TargetContainer) {
|
||||
*out = *in
|
||||
if in.Paths != nil {
|
||||
in, out := &in.Paths, &out.Paths
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Steps != nil {
|
||||
in, out := &in.Steps, &out.Steps
|
||||
*out = make([]Step, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetContainer.
|
||||
func (in *TargetContainer) DeepCopy() *TargetContainer {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TargetContainer)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetStatus) DeepCopyInto(out *TargetStatus) {
|
||||
*out = *in
|
||||
@ -629,7 +619,7 @@ func (in *TargetStatus) DeepCopyInto(out *TargetStatus) {
|
||||
}
|
||||
if in.Duration != nil {
|
||||
in, out := &in.Duration, &out.Duration
|
||||
*out = new(metav1.Duration)
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
# The following manifests contain a self-signed issuer CR and a certificate CR.
|
||||
# More document can be found at https://docs.cert-manager.io
|
||||
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
|
||||
# breaking changes
|
||||
apiVersion: cert-manager.io/v1alpha2
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: selfsigned-issuer
|
||||
namespace: system
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1alpha2
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
|
||||
namespace: system
|
||||
spec:
|
||||
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
|
||||
dnsNames:
|
||||
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
|
||||
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: selfsigned-issuer
|
||||
secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize
|
||||
@ -1,5 +0,0 @@
|
||||
resources:
|
||||
- certificate.yaml
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
@ -1,16 +0,0 @@
|
||||
# This configuration is for teaching kustomize how to update name ref and var substitution
|
||||
nameReference:
|
||||
- kind: Issuer
|
||||
group: cert-manager.io
|
||||
fieldSpecs:
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/issuerRef/name
|
||||
|
||||
varReference:
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/commonName
|
||||
- kind: Certificate
|
||||
group: cert-manager.io
|
||||
path: spec/dnsNames
|
||||
@ -2,32 +2,31 @@
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/formol.desmojim.fr_functions.yaml
|
||||
- bases/formol.desmojim.fr_backupconfigurations.yaml
|
||||
- bases/formol.desmojim.fr_backupsessions.yaml
|
||||
- bases/formol.desmojim.fr_functions.yaml
|
||||
- bases/formol.desmojim.fr_repoes.yaml
|
||||
- bases/formol.desmojim.fr_backupsessions.yaml
|
||||
- bases/formol.desmojim.fr_restoresessions.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizeresource
|
||||
#+kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patchesStrategicMerge:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
#- patches/webhook_in_tasks.yaml
|
||||
#- patches/webhook_in_functions.yaml
|
||||
#- patches/webhook_in_backupconfigurations.yaml
|
||||
#- patches/webhook_in_backupsessions.yaml
|
||||
#- patches/webhook_in_functions.yaml
|
||||
#- patches/webhook_in_repoes.yaml
|
||||
#- patches/webhook_in_backupsessions.yaml
|
||||
#- patches/webhook_in_restoresessions.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
#+kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
#- patches/cainjection_in_functions.yaml
|
||||
#- patches/cainjection_in_backupconfigurations.yaml
|
||||
#- patches/cainjection_in_backupsessions.yaml
|
||||
#- patches/cainjection_in_functions.yaml
|
||||
#- patches/cainjection_in_repoes.yaml
|
||||
#- patches/cainjection_in_backupsessions.yaml
|
||||
#- patches/cainjection_in_restoresessions.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
configurations:
|
||||
|
||||
@ -4,13 +4,15 @@ nameReference:
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhookClientConfig/service/name
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhookClientConfig/service/namespace
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
|
||||
7
config/crd/patches/cainjection_in_repoes.yaml
Normal file
7
config/crd/patches/cainjection_in_repoes.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
name: repoes.formol.desmojim.fr
|
||||
@ -1,8 +1,7 @@
|
||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
name: restoresessions.formol.desmojim.fr.desmojim.fr
|
||||
name: restoresessions.formol.desmojim.fr
|
||||
|
||||
@ -1,20 +1,16 @@
|
||||
# The following patch enables conversion webhook for CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backupconfigurations.formol.desmojim.fr
|
||||
spec:
|
||||
preserveUnknownFields: false
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1", "v1beta1", "v1alpha1"]
|
||||
clientConfig:
|
||||
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
||||
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
||||
caBundle: Cg==
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
|
||||
@ -1,18 +1,16 @@
|
||||
# The following patch enables conversion webhook for CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backupsessions.formol.desmojim.fr
|
||||
spec:
|
||||
preserveUnknownFields: false
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhookClientConfig:
|
||||
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
||||
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
||||
caBundle: Cg==
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
# The following patch enables conversion webhook for CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
@ -7,11 +6,11 @@ metadata:
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhookClientConfig:
|
||||
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
||||
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
||||
caBundle: Cg==
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
|
||||
16
config/crd/patches/webhook_in_repoes.yaml
Normal file
16
config/crd/patches/webhook_in_repoes.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: repoes.formol.desmojim.fr
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
@ -1,17 +1,16 @@
|
||||
# The following patch enables conversion webhook for CRD
|
||||
# CRD conversion requires k8s 1.13 or later.
|
||||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: restoresessions.formol.desmojim.fr.desmojim.fr
|
||||
name: restoresessions.formol.desmojim.fr
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhookClientConfig:
|
||||
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
||||
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
||||
caBundle: Cg==
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
|
||||
@ -16,21 +16,23 @@ bases:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- ../webhook
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||
#- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
- ../prometheus
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- manager_webhook_patch.yaml
|
||||
|
||||
@ -46,7 +48,7 @@ vars:
|
||||
# objref:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1alpha2
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
# fieldref:
|
||||
# fieldpath: metadata.namespace
|
||||
@ -54,7 +56,7 @@ vars:
|
||||
# objref:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1alpha2
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
#- name: SERVICE_NAMESPACE # namespace of the service
|
||||
# objref:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@ -8,18 +8,48 @@ metadata:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- arm64
|
||||
- ppc64le
|
||||
- s390x
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.8.0-arm
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--metrics-addr=127.0.0.1:8080"
|
||||
- "--enable-leader-election"
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
||||
|
||||
10
config/default/manager_config_patch.yaml
Normal file
10
config/default/manager_config_patch.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: manager
|
||||
@ -1,23 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: manager
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: webhook-server-cert
|
||||
@ -1,15 +0,0 @@
|
||||
# This patch add annotation to admission webhook config and
|
||||
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: mutating-webhook-configuration
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: validating-webhook-configuration
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
@ -1,8 +0,0 @@
|
||||
resources:
|
||||
- manager.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: desmo999r/formolcontroller
|
||||
newTag: 0.3.0
|
||||
@ -1,39 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --enable-leader-election
|
||||
image: controller:latest
|
||||
name: manager
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 20Mi
|
||||
terminationGracePeriodSeconds: 10
|
||||
@ -1,2 +0,0 @@
|
||||
resources:
|
||||
- monitor.yaml
|
||||
@ -1,16 +0,0 @@
|
||||
|
||||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: controller-manager-metrics-monitor
|
||||
namespace: system
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: https
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
@ -1,7 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
@ -1,13 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: proxy-role
|
||||
rules:
|
||||
- apiGroups: ["authentication.k8s.io"]
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["authorization.k8s.io"]
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs: ["create"]
|
||||
@ -1,12 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: proxy-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: proxy-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: system
|
||||
@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: controller-manager-metrics-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
targetPort: https
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
@ -1,24 +0,0 @@
|
||||
# permissions for end users to edit backupconfigurations.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: backupconfiguration-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupconfigurations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupconfigurations/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,20 +0,0 @@
|
||||
# permissions for end users to view backupconfigurations.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: backupconfiguration-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupconfigurations/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,24 +0,0 @@
|
||||
# permissions for end users to edit backupsessions.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: backupsession-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupsessions
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupsessions/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,20 +0,0 @@
|
||||
# permissions for end users to view backupsessions.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: backupsession-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupsessions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- backupsessions/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,24 +0,0 @@
|
||||
# permissions for end users to edit functions.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: function-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- functions
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- functions/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,20 +0,0 @@
|
||||
# permissions for end users to view functions.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: function-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- functions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- formol.desmojim.fr
|
||||
resources:
|
||||
- functions/status
|
||||
verbs:
|
||||
- get
|
||||
@ -1,12 +0,0 @@
|
||||
resources:
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# Comment the following 4 lines if you want to disable
|
||||
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
|
||||
# which protects your /metrics endpoint.
|
||||
#- auth_proxy_service.yaml
|
||||
#- auth_proxy_role.yaml
|
||||
#- auth_proxy_role_binding.yaml
|
||||
#- auth_proxy_client_clusterrole.yaml
|
||||
@ -1,32 +0,0 @@
|
||||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps/status
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
@ -1,12 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: system
|
||||
@ -1,12 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: manager-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: system
|
||||
@ -1,7 +0,0 @@
|
||||
apiVersion: formol.desmojim.fr.desmojim.fr/v1alpha1
|
||||
kind: RestoreSession
|
||||
metadata:
|
||||
name: restoresession-sample
|
||||
spec:
|
||||
# Add fields here
|
||||
foo: bar
|
||||
@ -1,17 +1,12 @@
|
||||
apiVersion: formol.desmojim.fr/v1alpha1
|
||||
kind: BackupConfiguration
|
||||
metadata:
|
||||
name: backupconf-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: backupconfiguration
|
||||
app.kubernetes.io/instance: backupconfiguration-sample
|
||||
app.kubernetes.io/part-of: formol
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/created-by: formol
|
||||
name: backupconfiguration-sample
|
||||
spec:
|
||||
repository:
|
||||
name: repo-minio
|
||||
schedule: "*/1 * * * *"
|
||||
target:
|
||||
apiVersion: v1
|
||||
kind: Deployment
|
||||
name: nginx-deployment
|
||||
volumeMounts:
|
||||
- name: empty
|
||||
mountPath: /data
|
||||
paths:
|
||||
- /data
|
||||
# TODO(user): Add fields here
|
||||
|
||||
@ -1,8 +1,12 @@
|
||||
apiVersion: formol.desmojim.fr/v1alpha1
|
||||
kind: BackupSession
|
||||
metadata:
|
||||
name: backupsession-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: backupsession
|
||||
app.kubernetes.io/instance: backupsession-sample
|
||||
app.kubernetes.io/part-of: formol
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/created-by: formol
|
||||
name: backupsession-sample
|
||||
spec:
|
||||
# Add fields here
|
||||
ref:
|
||||
name: backupconf-nginx
|
||||
# TODO(user): Add fields here
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
apiVersion: formol.desmojim.fr/v1alpha1
|
||||
kind: Function
|
||||
metadata:
|
||||
name: function-backup-pvc
|
||||
namespace: backup
|
||||
labels:
|
||||
app.kubernetes.io/name: function
|
||||
app.kubernetes.io/instance: function-sample
|
||||
app.kubernetes.io/part-of: formol
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/created-by: formol
|
||||
name: function-sample
|
||||
spec:
|
||||
name: function-backup-pvc
|
||||
image: desmo999r/formolcli
|
||||
args:
|
||||
- backup
|
||||
- volume
|
||||
# TODO(user): Add fields here
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
apiVersion: formol.desmojim.fr/v1alpha1
|
||||
kind: Repo
|
||||
metadata:
|
||||
name: repo-minio
|
||||
namespace: backup
|
||||
labels:
|
||||
app.kubernetes.io/name: repo
|
||||
app.kubernetes.io/instance: repo-sample
|
||||
app.kubernetes.io/part-of: formol
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/created-by: formol
|
||||
name: repo-sample
|
||||
spec:
|
||||
backend:
|
||||
s3:
|
||||
server: raid5.desmojim.fr:9000
|
||||
bucket: testbucket2
|
||||
repositorySecrets: secret-minio
|
||||
# TODO(user): Add fields here
|
||||
|
||||
12
config/samples/formol_v1alpha1_restoresession.yaml
Normal file
12
config/samples/formol_v1alpha1_restoresession.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: formol.desmojim.fr/v1alpha1
|
||||
kind: RestoreSession
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: restoresession
|
||||
app.kubernetes.io/instance: restoresession-sample
|
||||
app.kubernetes.io/part-of: formol
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
app.kubernetes.io/created-by: formol
|
||||
name: restoresession-sample
|
||||
spec:
|
||||
# TODO(user): Add fields here
|
||||
@ -1,28 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: empty
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: empty
|
||||
emptyDir: {}
|
||||
|
||||
@ -1,6 +0,0 @@
|
||||
resources:
|
||||
- manifests.yaml
|
||||
- service.yaml
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
@ -1,25 +0,0 @@
|
||||
# the following config is for teaching kustomize where to look at when substituting vars.
|
||||
# It requires kustomize v2.1.0 or newer to work properly.
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: MutatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/name
|
||||
- kind: ValidatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: MutatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/namespace
|
||||
create: true
|
||||
- kind: ValidatingWebhookConfiguration
|
||||
group: admissionregistration.k8s.io
|
||||
path: webhooks/clientConfig/service/namespace
|
||||
create: true
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
||||
@ -1,12 +0,0 @@
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 9443
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
BIN
controllers/.backupconfiguration_controller.go.un~
Normal file
BIN
controllers/.backupconfiguration_controller.go.un~
Normal file
Binary file not shown.
BIN
controllers/.backupconfiguration_controller_cronjob.go.un~
Normal file
BIN
controllers/.backupconfiguration_controller_cronjob.go.un~
Normal file
Binary file not shown.
BIN
controllers/.backupconfiguration_controller_sidecar.go.un~
Normal file
BIN
controllers/.backupconfiguration_controller_sidecar.go.un~
Normal file
Binary file not shown.
BIN
controllers/.backupconfiguration_controller_test.go.un~
Normal file
BIN
controllers/.backupconfiguration_controller_test.go.un~
Normal file
Binary file not shown.
BIN
controllers/.backupsession_controller.go.un~
Normal file
BIN
controllers/.backupsession_controller.go.un~
Normal file
Binary file not shown.
BIN
controllers/.suite_test.go.un~
Normal file
BIN
controllers/.suite_test.go.un~
Normal file
Binary file not shown.
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -18,360 +18,108 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
//"time"
|
||||
|
||||
formolrbac "github.com/desmo999r/formol/pkg/rbac"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
kbatch_beta1 "k8s.io/api/batch/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
)
|
||||
|
||||
// BackupConfigurationReconciler reconciles a BackupConfiguration object
|
||||
type BackupConfigurationReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Log logr.Logger
|
||||
context.Context
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &BackupConfigurationReconciler{}
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update
|
||||
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=*
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get
|
||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupConfiguration object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
r.Context = ctx
|
||||
r.Log = log.FromContext(ctx)
|
||||
|
||||
func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
var changed bool
|
||||
log := r.Log.WithValues("backupconfiguration", req.NamespacedName)
|
||||
//time.Sleep(300 * time.Millisecond)
|
||||
r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r)
|
||||
|
||||
log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r)
|
||||
|
||||
backupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
if err := r.Get(ctx, req.NamespacedName, backupConf); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
getDeployment := func(namespace string, name string) (*appsv1.Deployment, error) {
|
||||
deployment := &appsv1.Deployment{}
|
||||
err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}, deployment)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
deleteCronJob := func() error {
|
||||
_ = formolrbac.DeleteFormolRBAC(r.Client, "default", backupConf.Namespace)
|
||||
_ = formolrbac.DeleteBackupSessionCreatorRBAC(r.Client, backupConf.Namespace)
|
||||
cronjob := &kbatch_beta1.CronJob{}
|
||||
if err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name)
|
||||
return r.Delete(context.TODO(), cronjob)
|
||||
} else {
|
||||
return err
|
||||
backupConf := formolv1alpha1.BackupConfiguration{}
|
||||
err := r.Get(ctx, req.NamespacedName, &backupConf)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
addCronJob := func() error {
|
||||
if err := formolrbac.CreateFormolRBAC(r.Client, "default", backupConf.Namespace); err != nil {
|
||||
log.Error(err, "unable to create backupsessionlistener RBAC")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := formolrbac.CreateBackupSessionCreatorRBAC(r.Client, backupConf.Namespace); err != nil {
|
||||
log.Error(err, "unable to create backupsession-creator RBAC")
|
||||
return nil
|
||||
}
|
||||
|
||||
cronjob := &kbatch_beta1.CronJob{}
|
||||
if err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
log.V(0).Info("there is already a cronjob")
|
||||
if backupConf.Spec.Schedule != cronjob.Spec.Schedule {
|
||||
log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule)
|
||||
cronjob.Spec.Schedule = backupConf.Spec.Schedule
|
||||
changed = true
|
||||
}
|
||||
if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend {
|
||||
log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend)
|
||||
cronjob.Spec.Suspend = backupConf.Spec.Suspend
|
||||
changed = true
|
||||
}
|
||||
if changed == true {
|
||||
if err := r.Update(context.TODO(), cronjob); err != nil {
|
||||
log.Error(err, "unable to update cronjob definition")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if errors.IsNotFound(err) == false {
|
||||
log.Error(err, "something went wrong")
|
||||
return err
|
||||
}
|
||||
|
||||
cronjob = &kbatch_beta1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backup-" + backupConf.Name,
|
||||
Namespace: backupConf.Namespace,
|
||||
},
|
||||
Spec: kbatch_beta1.CronJobSpec{
|
||||
Suspend: backupConf.Spec.Suspend,
|
||||
Schedule: backupConf.Spec.Schedule,
|
||||
JobTemplate: kbatch_beta1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
ServiceAccountName: "backupsession-creator",
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "job-createbackupsession-" + backupConf.Name,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{
|
||||
"backupsession",
|
||||
"create",
|
||||
"--namespace",
|
||||
backupConf.Namespace,
|
||||
"--name",
|
||||
backupConf.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := ctrl.SetControllerReference(backupConf, cronjob, r.Scheme); err != nil {
|
||||
log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf)
|
||||
return err
|
||||
}
|
||||
log.V(0).Info("creating the cronjob")
|
||||
if err := r.Create(context.Background(), cronjob); err != nil {
|
||||
log.Error(err, "unable to create the cronjob", "cronjob", cronjob)
|
||||
return err
|
||||
} else {
|
||||
changed = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
deleteSidecarContainer := func(target formolv1alpha1.Target) error {
|
||||
deployment, err := getDeployment(backupConf.Namespace, target.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restorecontainers := []corev1.Container{}
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
continue
|
||||
}
|
||||
restorecontainers = append(restorecontainers, container)
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers = restorecontainers
|
||||
if err := r.Update(context.Background(), deployment); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := formolrbac.DeleteFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
addSidecarContainer := func(target formolv1alpha1.Target) error {
|
||||
deployment, err := getDeployment(backupConf.Namespace, target.Name)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to get Deployment")
|
||||
return err
|
||||
}
|
||||
log.V(1).Info("got deployment", "Deployment", deployment)
|
||||
for i, container := range deployment.Spec.Template.Spec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
log.V(0).Info("There is already a backup sidecar container. Skipping", "container", container)
|
||||
return nil
|
||||
}
|
||||
if target.ContainerName != "" && target.ContainerName == container.Name {
|
||||
// Put a tag so we can find what container we are supposed to backup
|
||||
// and what process we are supposed to chroot to run the init steps
|
||||
deployment.Spec.Template.Spec.Containers[i].Env = append(container.Env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGETCONTAINER_TAG,
|
||||
Value: "True",
|
||||
})
|
||||
}
|
||||
}
|
||||
sidecar := corev1.Container{
|
||||
Name: formolv1alpha1.SIDECARCONTAINER_NAME,
|
||||
// TODO: Put the image in the BackupConfiguration YAML file
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"backupsession", "server"},
|
||||
//Image: "busybox",
|
||||
//Command: []string{
|
||||
// "sh",
|
||||
// "-c",
|
||||
// "sleep 3600; echo done",
|
||||
//},
|
||||
Env: []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.POD_NAME,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.POD_NAMESPACE,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGET_NAME,
|
||||
Value: target.Name,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{},
|
||||
}
|
||||
|
||||
// Gather information from the repo
|
||||
repo := &formolv1alpha1.Repo{}
|
||||
if err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, repo); err != nil {
|
||||
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||
return err
|
||||
}
|
||||
sidecar.Env = append(sidecar.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...)
|
||||
|
||||
for _, volumemount := range target.VolumeMounts {
|
||||
log.V(1).Info("mounts", "volumemount", volumemount)
|
||||
volumemount.ReadOnly = true
|
||||
sidecar.VolumeMounts = append(sidecar.VolumeMounts, *volumemount.DeepCopy())
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sidecar)
|
||||
deployment.Spec.Template.Spec.ShareProcessNamespace = func() *bool { b := true; return &b }()
|
||||
|
||||
if err := formolrbac.CreateFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil {
|
||||
log.Error(err, "unable to create backupsessionlistener RBAC")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.V(0).Info("Adding a sicar container")
|
||||
if err := r.Update(context.Background(), deployment); err != nil {
|
||||
log.Error(err, "unable to update the Deployment")
|
||||
return err
|
||||
} else {
|
||||
changed = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
deleteExternalResources := func() error {
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
switch target.Kind {
|
||||
case formolv1alpha1.SidecarKind:
|
||||
_ = deleteSidecarContainer(target)
|
||||
}
|
||||
}
|
||||
// TODO: remove the hardcoded "default"
|
||||
_ = deleteCronJob()
|
||||
return nil
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr"
|
||||
|
||||
if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.V(0).Info("backupconf being deleted", "backupconf", backupConf.Name)
|
||||
r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers)
|
||||
if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
_ = deleteExternalResources()
|
||||
_ = r.DeleteSidecar(backupConf)
|
||||
_ = r.DeleteCronJob(backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
if err := r.Update(context.Background(), backupConf); err != nil {
|
||||
log.Error(err, "unable to remove finalizer")
|
||||
return reconcile.Result{}, err
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
// We have been deleted. Return here
|
||||
log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name)
|
||||
return reconcile.Result{}, nil
|
||||
r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer
|
||||
if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
r.Log.V(0).Info("adding finalizer", "backupconf", backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
err := r.Update(context.Background(), backupConf)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to append finalizer")
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to append finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
// backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if err := addCronJob(); err != nil {
|
||||
return reconcile.Result{}, nil
|
||||
if err := r.AddCronJob(backupConf); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveCronJob = true
|
||||
}
|
||||
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
switch target.Kind {
|
||||
case formolv1alpha1.SidecarKind:
|
||||
if err := addSidecarContainer(target); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
} else {
|
||||
backupConf.Status.ActiveSidecar = true
|
||||
}
|
||||
}
|
||||
if err := r.AddSidecar(backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to add sidecar container")
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveSidecar = true
|
||||
}
|
||||
|
||||
//backupConf.Status.Suspended = false
|
||||
if changed == true {
|
||||
log.V(1).Info("updating backupconf")
|
||||
if err := r.Status().Update(ctx, backupConf); err != nil {
|
||||
log.Error(err, "unable to update backupconf", "backupconf", backupConf)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if err := r.Status().Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "Unable to update BackupConfiguration status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupConfiguration{}).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: 3}).
|
||||
//WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated
|
||||
//Owns(&formolv1alpha1.BackupSession{}).
|
||||
Owns(&kbatch_beta1.CronJob{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
129
controllers/backupconfiguration_controller.go~
Normal file
129
controllers/backupconfiguration_controller.go~
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
//batchv1 "k8s.io/api/batch/v1"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
)
|
||||
|
||||
// BackupConfigurationReconciler reconciles a BackupConfiguration object
|
||||
type BackupConfigurationReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
Log logr.Logger
|
||||
context.Context
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupConfiguration object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
r.Context = ctx
|
||||
r.Log = log.FromContext(ctx)
|
||||
|
||||
r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r)
|
||||
|
||||
backupConf := formolv1alpha1.BackupConfiguration{}
|
||||
err := r.Get(ctx, req.NamespacedName, &backupConf)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr"
|
||||
|
||||
if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers)
|
||||
if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
_ = r.DeleteSidecar(backupConf)
|
||||
_ = r.DeleteCronJob(backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
// We have been deleted. Return here
|
||||
r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer
|
||||
if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) {
|
||||
r.Log.V(0).Info("adding finalizer", "backupconf", backupConf)
|
||||
backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName)
|
||||
if err := r.Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to append finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if err := r.AddCronJob(backupConf); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveCronJob = true
|
||||
}
|
||||
|
||||
if err := r.AddSidecar(backupConf); err != nil {
|
||||
r.Log.Error(err, "unable to add sidecar container")
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
backupConf.Status.ActiveSidecar = true
|
||||
}
|
||||
|
||||
if err := r.Status().Update(ctx, &backupConf); err != nil {
|
||||
r.Log.Error(err, "Unable to update BackupConfiguration status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupConfiguration{}).
|
||||
Complete(r)
|
||||
}
|
||||
103
controllers/backupconfiguration_controller_cronjob.go
Normal file
103
controllers/backupconfiguration_controller_cronjob.go
Normal file
@ -0,0 +1,103 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name)
|
||||
return r.Delete(r.Context, cronjob)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("there is already a cronjob")
|
||||
var changed bool
|
||||
if backupConf.Spec.Schedule != cronjob.Spec.Schedule {
|
||||
r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule)
|
||||
cronjob.Spec.Schedule = backupConf.Spec.Schedule
|
||||
changed = true
|
||||
}
|
||||
if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend {
|
||||
r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend)
|
||||
cronjob.Spec.Suspend = backupConf.Spec.Suspend
|
||||
changed = true
|
||||
}
|
||||
if changed == true {
|
||||
if err := r.Update(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to update cronjob definition")
|
||||
return err
|
||||
}
|
||||
backupConf.Status.Suspended = *backupConf.Spec.Suspend
|
||||
}
|
||||
return nil
|
||||
} else if errors.IsNotFound(err) == false {
|
||||
r.Log.Error(err, "something went wrong")
|
||||
return err
|
||||
}
|
||||
|
||||
cronjob = &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backup-" + backupConf.Name,
|
||||
Namespace: backupConf.Namespace,
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Suspend: backupConf.Spec.Suspend,
|
||||
Schedule: backupConf.Spec.Schedule,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
ServiceAccountName: "backupsession-creator",
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "job-createbackupsession-" + backupConf.Name,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{
|
||||
"backupsession",
|
||||
"create",
|
||||
"--namespace",
|
||||
backupConf.Namespace,
|
||||
"--name",
|
||||
backupConf.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil {
|
||||
r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf)
|
||||
return err
|
||||
}
|
||||
r.Log.V(0).Info("creating the cronjob")
|
||||
if err := r.Create(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob)
|
||||
return err
|
||||
} else {
|
||||
backupConf.Status.Suspended = *backupConf.Spec.Suspend
|
||||
return nil
|
||||
}
|
||||
}
|
||||
102
controllers/backupconfiguration_controller_cronjob.go~
Normal file
102
controllers/backupconfiguration_controller_cronjob.go~
Normal file
@ -0,0 +1,102 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name)
|
||||
return r.Delete(r.Context, cronjob)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
cronjob := &batchv1.CronJob{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: "backup-" + backupConf.Name,
|
||||
}, cronjob); err == nil {
|
||||
r.Log.V(0).Info("there is already a cronjob")
|
||||
var changed bool
|
||||
if backupConf.Spec.Schedule != cronjob.Spec.Schedule {
|
||||
r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule)
|
||||
cronjob.Spec.Schedule = backupConf.Spec.Schedule
|
||||
changed = true
|
||||
}
|
||||
if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend {
|
||||
r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend)
|
||||
cronjob.Spec.Suspend = backupConf.Spec.Suspend
|
||||
changed = true
|
||||
}
|
||||
if changed == true {
|
||||
if err := r.Update(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to update cronjob definition")
|
||||
return err
|
||||
}
|
||||
backupConf.Status.Suspended = *backupConf.Spec.Suspend
|
||||
}
|
||||
return nil
|
||||
} else if errors.IsNotFound(err) == false {
|
||||
r.Log.Error(err, "something went wrong")
|
||||
return err
|
||||
}
|
||||
|
||||
cronjob = &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backup-" + backupConf.Name,
|
||||
Namespace: backupConf.Namespace,
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Suspend: backupConf.Spec.Suspend,
|
||||
Schedule: backupConf.Spec.Schedule,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
ServiceAccountName: "backupsession-creator",
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "job-createbackupsession-" + backupConf.Name,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{
|
||||
"backupsession",
|
||||
"create",
|
||||
"--namespace",
|
||||
backupConf.Namespace,
|
||||
"--name",
|
||||
backupConf.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil {
|
||||
r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf)
|
||||
return err
|
||||
}
|
||||
r.Log.V(0).Info("creating the cronjob")
|
||||
if err := r.Create(r.Context, cronjob); err != nil {
|
||||
r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob)
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
137
controllers/backupconfiguration_controller_sidecar.go
Normal file
137
controllers/backupconfiguration_controller_sidecar.go
Normal file
@ -0,0 +1,137 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) {
|
||||
for i, container := range podSpec.Containers {
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
} else {
|
||||
for j, e := range container.Env {
|
||||
if e.Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
container.Env[j] = container.Env[len(container.Env)-1]
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
restoreContainers := []corev1.Container{}
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
continue
|
||||
}
|
||||
restoreContainers = append(restoreContainers, container)
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers = restoreContainers
|
||||
removeTags(&deployment.Spec.Template.Spec, target)
|
||||
if err := r.Update(r.Context, deployment); err != nil {
|
||||
r.Log.Error(err, "unable to update deployment", "deployment", deployment)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
// Go through all the 'targets'
|
||||
// the backupType: Online needs a sidecar container for every single listed 'container'
|
||||
// if the backupType is something else than Online, the 'container' will still need a sidecar
|
||||
// if it has 'steps'
|
||||
addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool {
|
||||
for i, container := range podSpec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
return false
|
||||
}
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGETCONTAINER_TAG,
|
||||
Value: container.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
addSidecar := false
|
||||
for _, targetContainer := range target.Containers {
|
||||
if len(targetContainer.Steps) > 0 {
|
||||
addSidecar = true
|
||||
}
|
||||
}
|
||||
if target.BackupType == formolv1alpha1.OnlineKind {
|
||||
addSidecar = true
|
||||
}
|
||||
if addSidecar {
|
||||
repo := formolv1alpha1.Repo{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, &repo); err != nil {
|
||||
r.Log.Error(err, "unable to get Repo")
|
||||
return err
|
||||
}
|
||||
r.Log.V(1).Info("Got Repository", "repo", repo)
|
||||
env := repo.GetResticEnv(backupConf)
|
||||
sideCar := corev1.Container{
|
||||
Name: formolv1alpha1.SIDECARCONTAINER_NAME,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"backupsession", "server"},
|
||||
Env: append(env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGET_NAME,
|
||||
Value: target.TargetName,
|
||||
}),
|
||||
VolumeMounts: []corev1.VolumeMount{},
|
||||
}
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
if addTags(&deployment.Spec.Template.Spec, target) {
|
||||
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar)
|
||||
r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers)
|
||||
if err := r.Update(r.Context, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot update deployment", "Deployment", deployment)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
134
controllers/backupconfiguration_controller_sidecar.go~
Normal file
134
controllers/backupconfiguration_controller_sidecar.go~
Normal file
@ -0,0 +1,134 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) {
|
||||
for i, container := range podSpec.Containers {
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
} else {
|
||||
for j, e := range container.Env {
|
||||
if e.Name == formolv1alpha1.TARGETCONTAINER_TAG {
|
||||
container.Env[j] = container.Env[len(container.Env)-1]
|
||||
podSpec.Containers[i].Env = container.Env[:len(container.Env)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
restoreContainers := []corev1.Container{}
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
continue
|
||||
}
|
||||
restoreContainers = append(restoreContainers, container)
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers = restoreContainers
|
||||
removeTags(&deployment.Spec.Template.Spec, target)
|
||||
return r.Update(r.Context, deployment)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error {
|
||||
// Go through all the 'targets'
|
||||
// the backupType: Online needs a sidecar container for every single listed 'container'
|
||||
// if the backupType is something else than Online, the 'container' will still need a sidecar
|
||||
// if it has 'steps'
|
||||
addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool {
|
||||
for i, container := range podSpec.Containers {
|
||||
if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME {
|
||||
return false
|
||||
}
|
||||
for _, targetContainer := range target.Containers {
|
||||
if targetContainer.Name == container.Name {
|
||||
podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGETCONTAINER_TAG,
|
||||
Value: container.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for _, target := range backupConf.Spec.Targets {
|
||||
addSidecar := false
|
||||
for _, targetContainer := range target.Containers {
|
||||
if len(targetContainer.Steps) > 0 {
|
||||
addSidecar = true
|
||||
}
|
||||
}
|
||||
if target.BackupType == formolv1alpha1.OnlineKind {
|
||||
addSidecar = true
|
||||
}
|
||||
if addSidecar {
|
||||
repo := formolv1alpha1.Repo{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, &repo); err != nil {
|
||||
r.Log.Error(err, "unable to get Repo")
|
||||
return err
|
||||
}
|
||||
r.Log.V(1).Info("Got Repository", "repo", repo)
|
||||
env := repo.GetResticEnv(backupConf)
|
||||
sideCar := corev1.Container{
|
||||
Name: formolv1alpha1.SIDECARCONTAINER_NAME,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"backupsession", "server"},
|
||||
Env: append(env, corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGET_NAME,
|
||||
Value: target.TargetName,
|
||||
}),
|
||||
VolumeMounts: []corev1.VolumeMount{},
|
||||
}
|
||||
switch target.TargetKind {
|
||||
case formolv1alpha1.Deployment:
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(r.Context, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.TargetName,
|
||||
}, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName)
|
||||
return err
|
||||
}
|
||||
if addTags(&deployment.Spec.Template.Spec, target) {
|
||||
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar)
|
||||
r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers)
|
||||
if err := r.Update(r.Context, deployment); err != nil {
|
||||
r.Log.Error(err, "cannot update deployment", "Deployment", deployment)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1,67 +1,64 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
//"k8s.io/apimachinery/pkg/types"
|
||||
//"reflect"
|
||||
//"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
//batchv1 "k8s.io/api/batch/v1"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
//"k8s.io/apimachinery/pkg/api/errors"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
//"time"
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var _ = Describe("Testing BackupConf controller", func() {
|
||||
const (
|
||||
BCBackupConfName = "test-backupconf-controller"
|
||||
)
|
||||
var _ = Describe("BackupConfiguration controller", func() {
|
||||
const BACKUPCONF_NAME = "test-backupconf-controller"
|
||||
|
||||
var (
|
||||
key = types.NamespacedName{
|
||||
Name: BCBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
}
|
||||
backupConf *formolv1alpha1.BackupConfiguration
|
||||
ctx = context.Background()
|
||||
backupConf = &formolv1alpha1.BackupConfiguration{}
|
||||
key = types.NamespacedName{
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
backupConf = &formolv1alpha1.BackupConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BCBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupConfigurationSpec{
|
||||
Repository: TestRepoName,
|
||||
Repository: REPO_NAME,
|
||||
Schedule: "1 * * * *",
|
||||
Image: "desmo999r/formolcli:latest",
|
||||
Image: "desmo999r/formolcli:v0.3.2",
|
||||
Targets: []formolv1alpha1.Target{
|
||||
formolv1alpha1.Target{
|
||||
Kind: formolv1alpha1.SidecarKind,
|
||||
Name: TestDeploymentName,
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
Name: TestDataVolume,
|
||||
MountPath: TestDataMountPath,
|
||||
},
|
||||
},
|
||||
Paths: []string{
|
||||
TestDataMountPath,
|
||||
},
|
||||
},
|
||||
formolv1alpha1.Target{
|
||||
Kind: formolv1alpha1.JobKind,
|
||||
Name: TestBackupFuncName,
|
||||
Steps: []formolv1alpha1.Step{
|
||||
formolv1alpha1.Step{
|
||||
Name: TestBackupFuncName,
|
||||
BackupType: formolv1alpha1.OnlineKind,
|
||||
TargetKind: formolv1alpha1.Deployment,
|
||||
TargetName: DEPLOYMENT_NAME,
|
||||
Containers: []formolv1alpha1.TargetContainer{
|
||||
formolv1alpha1.TargetContainer{
|
||||
Name: CONTAINER_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -69,7 +66,8 @@ var _ = Describe("Testing BackupConf controller", func() {
|
||||
},
|
||||
}
|
||||
})
|
||||
Context("Creating a backupconf", func() {
|
||||
|
||||
Context("Creating a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
@ -81,97 +79,87 @@ var _ = Describe("Testing BackupConf controller", func() {
|
||||
It("Has a schedule", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, key, realBackupConf)
|
||||
if err != nil {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
Expect(realBackupConf.Spec.Targets[0].Retry).Should(Equal(2))
|
||||
})
|
||||
It("Should also create a CronJob", func() {
|
||||
cronJob := &batchv1beta1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BCBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
}, cronJob)
|
||||
return err == nil
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should also create a sidecar container", func() {
|
||||
realDeployment := &appsv1.Deployment{}
|
||||
Eventually(func() (int, error) {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: TestDeploymentName,
|
||||
Namespace: TestNamespace,
|
||||
}, realDeployment)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(realDeployment.Spec.Template.Spec.Containers), nil
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
})
|
||||
It("Should also update the CronJob", func() {
|
||||
It("Should create a CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, key, realBackupConf)
|
||||
if err != nil {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should update the CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
realBackupConf.Spec.Schedule = "1 0 * * *"
|
||||
suspend := true
|
||||
realBackupConf.Spec.Suspend = &suspend
|
||||
Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed())
|
||||
cronJob := &batchv1beta1.CronJob{}
|
||||
Eventually(func() (string, error) {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BCBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
}, cronJob)
|
||||
if err != nil {
|
||||
return "", err
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() string {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return ""
|
||||
}
|
||||
return cronJob.Spec.Schedule, nil
|
||||
return cronJob.Spec.Schedule
|
||||
}, timeout, interval).Should(Equal("1 0 * * *"))
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BCBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
}, cronJob)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return *cronJob.Spec.Suspend == true, nil
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(*cronJob.Spec.Suspend).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
Context("Deleting a backupconf", func() {
|
||||
Context("Deleting a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
})
|
||||
It("Should also delete the sidecar container", func() {
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
realDeployment := &appsv1.Deployment{}
|
||||
Eventually(func() (int, error) {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: TestDeploymentName,
|
||||
Namespace: TestNamespace,
|
||||
}, realDeployment)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
It("Should delete the CronJob", func() {
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return len(realDeployment.Spec.Template.Spec.Containers), nil
|
||||
}, timeout, interval).Should(Equal(1))
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
By("The CronJob has been created. Now deleting the BackupConfiguration")
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeFalse())
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
165
controllers/backupconfiguration_controller_test.go~
Normal file
165
controllers/backupconfiguration_controller_test.go~
Normal file
@ -0,0 +1,165 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
//"time"
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var _ = Describe("BackupConfiguration controller", func() {
|
||||
const BACKUPCONF_NAME = "test-backupconf-controller"
|
||||
|
||||
var (
|
||||
backupConf *formolv1alpha1.BackupConfiguration
|
||||
ctx = context.Background()
|
||||
key = types.NamespacedName{
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
backupConf = &formolv1alpha1.BackupConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupConfigurationSpec{
|
||||
Repository: REPO_NAME,
|
||||
Schedule: "1 * * * *",
|
||||
Image: "desmo999r/formolcli:v0.3.2",
|
||||
Targets: []formolv1alpha1.Target{
|
||||
formolv1alpha1.Target{
|
||||
BackupType: formolv1alpha1.OnlineKind,
|
||||
TargetKind: formolv1alpha1.Deployment,
|
||||
TargetName: DEPLOYMENT_NAME,
|
||||
Containers: []formolv1alpha1.TargetContainer{
|
||||
formolv1alpha1.Container{
|
||||
Name: CONTAINER_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
Context("Creating a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
})
|
||||
AfterEach(func() {
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
})
|
||||
It("Has a schedule", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should create a CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *"))
|
||||
})
|
||||
It("Should update the CronJob", func() {
|
||||
realBackupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, key, realBackupConf); err != nil {
|
||||
return false
|
||||
}
|
||||
return realBackupConf.Status.ActiveCronJob
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
realBackupConf.Spec.Schedule = "1 0 * * *"
|
||||
suspend := true
|
||||
realBackupConf.Spec.Suspend = &suspend
|
||||
Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed())
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() string {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return ""
|
||||
}
|
||||
return cronJob.Spec.Schedule
|
||||
}, timeout, interval).Should(Equal("1 0 * * *"))
|
||||
Expect(*cronJob.Spec.Suspend).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
Context("Deleting a BackupConf", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupConf)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
})
|
||||
It("Should delete the CronJob", func() {
|
||||
cronJob := &batchv1.CronJob{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
By("The CronJob has been created. Now deleting the BackupConfiguration")
|
||||
Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed())
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: "backup-" + BACKUPCONF_NAME,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
}, cronJob); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeFalse())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -18,458 +18,49 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
sessionState string = ".metadata.state"
|
||||
finalizerName string = "finalizer.backupsession.formol.desmojim.fr"
|
||||
JOBTTL int32 = 7200
|
||||
)
|
||||
|
||||
// BackupSessionReconciler reconciles a BackupSession object
|
||||
type BackupSessionReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Log logr.Logger
|
||||
context.Context
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &BackupSessionReconciler{}
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update
|
||||
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch;create;delete
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=functions,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete;watch
|
||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupSession object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
r.Log = log.FromContext(ctx)
|
||||
r.Context = ctx
|
||||
|
||||
func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := r.Log.WithValues("backupsession", req.NamespacedName)
|
||||
r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r)
|
||||
|
||||
backupSession := &formolv1alpha1.BackupSession{}
|
||||
if err := r.Get(ctx, req.NamespacedName, backupSession); err != nil {
|
||||
log.Error(err, "unable to get backupsession")
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
backupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupSession.Namespace,
|
||||
Name: backupSession.Spec.Ref.Name,
|
||||
}, backupConf); err != nil {
|
||||
log.Error(err, "unable to get backupConfiguration")
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// helper functions
|
||||
// is there a backup operation ongoing
|
||||
isBackupOngoing := func() bool {
|
||||
backupSessionList := &formolv1alpha1.BackupSessionList{}
|
||||
if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: "Running"})}); err != nil {
|
||||
log.Error(err, "unable to get backupsessionlist")
|
||||
return true
|
||||
}
|
||||
return len(backupSessionList.Items) > 0
|
||||
}
|
||||
|
||||
// delete session specific backup resources
|
||||
deleteExternalResources := func() error {
|
||||
log := r.Log.WithValues("deleteExternalResources", backupSession.Name)
|
||||
// Gather information from the repo
|
||||
repo := &formolv1alpha1.Repo{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, repo); err != nil {
|
||||
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||
return err
|
||||
}
|
||||
env := formolutils.ConfigureResticEnvVar(backupConf, repo)
|
||||
// container that will delete the restic snapshot(s) matching the backupsession
|
||||
deleteSnapshots := []corev1.Container{}
|
||||
for _, target := range backupSession.Status.Targets {
|
||||
if target.SessionState == formolv1alpha1.Success {
|
||||
deleteSnapshots = append(deleteSnapshots, corev1.Container{
|
||||
Name: target.Name,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"snapshot", "delete", "--snapshot-id", target.SnapshotId},
|
||||
Env: env,
|
||||
})
|
||||
}
|
||||
}
|
||||
// create a job to delete the restic snapshot(s) with the backupsession name tag
|
||||
if len(deleteSnapshots) > 0 {
|
||||
jobTtl := JOBTTL
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("delete-%s-", backupSession.Name),
|
||||
Namespace: backupSession.Namespace,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
TTLSecondsAfterFinished: &jobTtl,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
InitContainers: deleteSnapshots[1:],
|
||||
Containers: []corev1.Container{deleteSnapshots[0]},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
log.V(0).Info("creating a job to delete restic snapshots")
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
log.Error(err, "unable to delete job", "job", job)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a backup job
|
||||
createBackupJob := func(target formolv1alpha1.Target) error {
|
||||
log := r.Log.WithValues("createbackupjob", target.Name)
|
||||
ctx := context.Background()
|
||||
backupSessionEnv := []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: "TARGET_NAME",
|
||||
Value: target.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "BACKUPSESSION_NAME",
|
||||
Value: backupSession.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "BACKUPSESSION_NAMESPACE",
|
||||
Value: backupSession.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
output := corev1.VolumeMount{
|
||||
Name: "output",
|
||||
MountPath: "/output",
|
||||
}
|
||||
restic := corev1.Container{
|
||||
Name: "restic",
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"volume", "backup", "--tag", backupSession.Name, "--path", "/output"},
|
||||
VolumeMounts: []corev1.VolumeMount{output},
|
||||
Env: backupSessionEnv,
|
||||
}
|
||||
log.V(1).Info("creating a tagged backup job", "container", restic)
|
||||
// Gather information from the repo
|
||||
repo := &formolv1alpha1.Repo{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, repo); err != nil {
|
||||
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||
return err
|
||||
}
|
||||
// S3 backing storage
|
||||
restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...)
|
||||
jobTtl := JOBTTL
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("%s-%s-", backupSession.Name, target.Name),
|
||||
Namespace: backupConf.Namespace,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
TTLSecondsAfterFinished: &jobTtl,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
InitContainers: []corev1.Container{},
|
||||
Containers: []corev1.Container{restic},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{Name: "output"},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, step := range target.Steps {
|
||||
function := &formolv1alpha1.Function{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: step.Name,
|
||||
}, function); err != nil {
|
||||
log.Error(err, "unable to get function", "Function", step)
|
||||
return err
|
||||
}
|
||||
function.Spec.Name = function.Name
|
||||
function.Spec.Env = append(function.Spec.Env, backupSessionEnv...)
|
||||
function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output)
|
||||
job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec)
|
||||
}
|
||||
if err := ctrl.SetControllerReference(backupConf, job, r.Scheme); err != nil {
|
||||
log.Error(err, "unable to set controller on job", "job", job, "backupconf", backupConf)
|
||||
return err
|
||||
}
|
||||
log.V(0).Info("creating a backup job", "target", target)
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
log.Error(err, "unable to create job", "job", job)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// start the next task
|
||||
startNextTask := func() (*formolv1alpha1.TargetStatus, error) {
|
||||
nextTarget := len(backupSession.Status.Targets)
|
||||
if nextTarget < len(backupConf.Spec.Targets) {
|
||||
target := backupConf.Spec.Targets[nextTarget]
|
||||
targetStatus := formolv1alpha1.TargetStatus{
|
||||
Name: target.Name,
|
||||
Kind: target.Kind,
|
||||
SessionState: formolv1alpha1.New,
|
||||
StartTime: &metav1.Time{Time: time.Now()},
|
||||
Try: 1,
|
||||
}
|
||||
backupSession.Status.Targets = append(backupSession.Status.Targets, targetStatus)
|
||||
switch target.Kind {
|
||||
case formolv1alpha1.JobKind:
|
||||
if err := createBackupJob(target); err != nil {
|
||||
log.V(0).Info("unable to create task", "task", target)
|
||||
targetStatus.SessionState = formolv1alpha1.Failure
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &targetStatus, nil
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup existing backupsessions
|
||||
cleanupSessions := func() {
|
||||
backupSessionList := &formolv1alpha1.BackupSessionList{}
|
||||
if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: string(formolv1alpha1.Success)})}); err != nil {
|
||||
log.Error(err, "unable to get backupsessionlist")
|
||||
return
|
||||
}
|
||||
if len(backupSessionList.Items) < 2 {
|
||||
// Not enough backupSession to proceed
|
||||
log.V(1).Info("Not enough successful backup jobs")
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(backupSessionList.Items, func(i, j int) bool {
|
||||
return backupSessionList.Items[i].Status.StartTime.Time.Unix() > backupSessionList.Items[j].Status.StartTime.Time.Unix()
|
||||
})
|
||||
|
||||
type KeepBackup struct {
|
||||
Counter int32
|
||||
Last time.Time
|
||||
}
|
||||
|
||||
var lastBackups, dailyBackups, weeklyBackups, monthlyBackups, yearlyBackups KeepBackup
|
||||
lastBackups.Counter = backupConf.Spec.Keep.Last
|
||||
dailyBackups.Counter = backupConf.Spec.Keep.Daily
|
||||
weeklyBackups.Counter = backupConf.Spec.Keep.Weekly
|
||||
monthlyBackups.Counter = backupConf.Spec.Keep.Monthly
|
||||
yearlyBackups.Counter = backupConf.Spec.Keep.Yearly
|
||||
for _, session := range backupSessionList.Items {
|
||||
if session.Spec.Ref.Name != backupConf.Name {
|
||||
continue
|
||||
}
|
||||
deleteSession := true
|
||||
keep := []string{}
|
||||
if lastBackups.Counter > 0 {
|
||||
log.V(1).Info("Keep backup", "last", session.Status.StartTime)
|
||||
lastBackups.Counter--
|
||||
keep = append(keep, "last")
|
||||
deleteSession = false
|
||||
}
|
||||
if dailyBackups.Counter > 0 {
|
||||
if session.Status.StartTime.Time.YearDay() != dailyBackups.Last.YearDay() {
|
||||
log.V(1).Info("Keep backup", "daily", session.Status.StartTime)
|
||||
dailyBackups.Counter--
|
||||
dailyBackups.Last = session.Status.StartTime.Time
|
||||
keep = append(keep, "daily")
|
||||
deleteSession = false
|
||||
}
|
||||
}
|
||||
if weeklyBackups.Counter > 0 {
|
||||
if session.Status.StartTime.Time.Weekday().String() == "Sunday" && session.Status.StartTime.Time.YearDay() != weeklyBackups.Last.YearDay() {
|
||||
log.V(1).Info("Keep backup", "weekly", session.Status.StartTime)
|
||||
weeklyBackups.Counter--
|
||||
weeklyBackups.Last = session.Status.StartTime.Time
|
||||
keep = append(keep, "weekly")
|
||||
deleteSession = false
|
||||
}
|
||||
}
|
||||
if monthlyBackups.Counter > 0 {
|
||||
if session.Status.StartTime.Time.Day() == 1 && session.Status.StartTime.Time.Month() != monthlyBackups.Last.Month() {
|
||||
log.V(1).Info("Keep backup", "monthly", session.Status.StartTime)
|
||||
monthlyBackups.Counter--
|
||||
monthlyBackups.Last = session.Status.StartTime.Time
|
||||
keep = append(keep, "monthly")
|
||||
deleteSession = false
|
||||
}
|
||||
}
|
||||
if yearlyBackups.Counter > 0 {
|
||||
if session.Status.StartTime.Time.YearDay() == 1 && session.Status.StartTime.Time.Year() != yearlyBackups.Last.Year() {
|
||||
log.V(1).Info("Keep backup", "yearly", session.Status.StartTime)
|
||||
yearlyBackups.Counter--
|
||||
yearlyBackups.Last = session.Status.StartTime.Time
|
||||
keep = append(keep, "yearly")
|
||||
deleteSession = false
|
||||
}
|
||||
}
|
||||
if deleteSession {
|
||||
log.V(1).Info("Delete session", "delete", session.Status.StartTime)
|
||||
if err := r.Delete(ctx, &session); err != nil {
|
||||
log.Error(err, "unable to delete backupsession", "session", session.Name)
|
||||
// we don't return anything, we keep going
|
||||
}
|
||||
} else {
|
||||
session.Status.Keep = strings.Join(keep, ",") // + " " + time.Now().Format("2006 Jan 02 15:04:05 -0700 MST")
|
||||
if err := r.Status().Update(ctx, &session); err != nil {
|
||||
log.Error(err, "unable to update session status", "session", session)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// end helper functions
|
||||
|
||||
log.V(0).Info("backupSession", "backupSession.ObjectMeta", backupSession.ObjectMeta, "backupSession.Status", backupSession.Status)
|
||||
if backupSession.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
switch backupSession.Status.SessionState {
|
||||
case formolv1alpha1.New:
|
||||
// Check if the finalizer has been registered
|
||||
if !controllerutil.ContainsFinalizer(backupSession, finalizerName) {
|
||||
controllerutil.AddFinalizer(backupSession, finalizerName)
|
||||
// We update the BackupSession to add the finalizer
|
||||
// Reconcile will be called again
|
||||
// return now
|
||||
err := r.Update(ctx, backupSession)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to add finalizer")
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Brand new backupsession
|
||||
if isBackupOngoing() {
|
||||
log.V(0).Info("There is an ongoing backup. Let's reschedule this operation")
|
||||
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
// start the first task
|
||||
backupSession.Status.SessionState = formolv1alpha1.Running
|
||||
targetStatus, err := startNextTask()
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
log.V(0).Info("New backup. Start the first task", "task", targetStatus)
|
||||
if err := r.Status().Update(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to update BackupSession status")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
case formolv1alpha1.Running:
|
||||
// Backup ongoing. Check the status of the last task to decide what to do
|
||||
currentTargetStatus := &backupSession.Status.Targets[len(backupSession.Status.Targets)-1]
|
||||
switch currentTargetStatus.SessionState {
|
||||
case formolv1alpha1.Running:
|
||||
// The current task is still running. Nothing to do
|
||||
log.V(0).Info("task is still running", "targetStatus", currentTargetStatus)
|
||||
case formolv1alpha1.Success:
|
||||
// The last task succeed. Let's try to start the next one
|
||||
targetStatus, err := startNextTask()
|
||||
log.V(0).Info("last task was a success. start a new one", "currentTargetStatus", currentTargetStatus, "targetStatus", targetStatus)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if targetStatus == nil {
|
||||
// No more task to start. The backup is a success
|
||||
backupSession.Status.SessionState = formolv1alpha1.Success
|
||||
log.V(0).Info("Backup is successful. Let's try to do some cleanup")
|
||||
cleanupSessions()
|
||||
}
|
||||
if err := r.Status().Update(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to update BackupSession status")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
case formolv1alpha1.Failure:
|
||||
// last task failed. Try to run it again
|
||||
currentTarget := backupConf.Spec.Targets[len(backupSession.Status.Targets)-1]
|
||||
if currentTargetStatus.Try < currentTarget.Retry {
|
||||
log.V(0).Info("last task was a failure. try again", "currentTargetStatus", currentTargetStatus)
|
||||
currentTargetStatus.Try++
|
||||
currentTargetStatus.SessionState = formolv1alpha1.New
|
||||
currentTargetStatus.StartTime = &metav1.Time{Time: time.Now()}
|
||||
switch currentTarget.Kind {
|
||||
case formolv1alpha1.JobKind:
|
||||
if err := createBackupJob(currentTarget); err != nil {
|
||||
log.V(0).Info("unable to create task", "task", currentTarget)
|
||||
currentTargetStatus.SessionState = formolv1alpha1.Failure
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.V(0).Info("task failed again and for the last time", "currentTargetStatus", currentTargetStatus)
|
||||
backupSession.Status.SessionState = formolv1alpha1.Failure
|
||||
}
|
||||
if err := r.Status().Update(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to update BackupSession status")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
case formolv1alpha1.Success:
|
||||
// Should never go there
|
||||
case formolv1alpha1.Failure:
|
||||
// The backup failed
|
||||
case "":
|
||||
// BackupSession has just been created
|
||||
backupSession.Status.SessionState = formolv1alpha1.New
|
||||
backupSession.Status.StartTime = &metav1.Time{Time: time.Now()}
|
||||
if err := r.Status().Update(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to update backupSession")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.V(0).Info("backupsession being deleted", "backupsession", backupSession.Name)
|
||||
if controllerutil.ContainsFinalizer(backupSession, finalizerName) {
|
||||
if err := deleteExternalResources(); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
controllerutil.RemoveFinalizer(backupSession, finalizerName)
|
||||
if err := r.Update(ctx, backupSession); err != nil {
|
||||
log.Error(err, "unable to remove finalizer")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// We have been deleted. Return here
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &formolv1alpha1.BackupSession{}, sessionState, func(rawObj client.Object) []string {
|
||||
session := rawObj.(*formolv1alpha1.BackupSession)
|
||||
return []string{string(session.Status.SessionState)}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupSession{}).
|
||||
//WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated
|
||||
Owns(&batchv1.Job{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
62
controllers/backupsession_controller.go~
Normal file
62
controllers/backupsession_controller.go~
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
)
|
||||
|
||||
// BackupSessionReconciler reconciles a BackupSession object
|
||||
type BackupSessionReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the BackupSession object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
|
||||
// TODO(user): your logic here
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.BackupSession{}).
|
||||
Complete(r)
|
||||
}
|
||||
@ -1,147 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
//corev1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var _ = Describe("Testing BackupSession controller", func() {
|
||||
const (
|
||||
BSBackupSessionName = "test-backupsession-controller"
|
||||
)
|
||||
var (
|
||||
ctx = context.Background()
|
||||
key = types.NamespacedName{
|
||||
Name: BSBackupSessionName,
|
||||
Namespace: TestNamespace,
|
||||
}
|
||||
backupSession = &formolv1alpha1.BackupSession{}
|
||||
)
|
||||
BeforeEach(func() {
|
||||
backupSession = &formolv1alpha1.BackupSession{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BSBackupSessionName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupSessionSpec{
|
||||
Ref: corev1.ObjectReference{
|
||||
Name: TestBackupConfName,
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
Context("Creating a backupsession", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, backupSession)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
realBackupSession := &formolv1alpha1.BackupSession{}
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Get(ctx, key, realBackupSession)
|
||||
return err
|
||||
}, timeout, interval).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
if err := k8sClient.Get(ctx, key, realBackupSession); err != nil {
|
||||
return ""
|
||||
} else {
|
||||
return realBackupSession.Status.SessionState
|
||||
}
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Running))
|
||||
})
|
||||
AfterEach(func() {
|
||||
Expect(k8sClient.Delete(ctx, backupSession)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Should have a new task", func() {
|
||||
realBackupSession := &formolv1alpha1.BackupSession{}
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
Expect(realBackupSession.Status.Targets[0].Name).Should(Equal(TestDeploymentName))
|
||||
Expect(realBackupSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New))
|
||||
Expect(realBackupSession.Status.Targets[0].Kind).Should(Equal(formolv1alpha1.SidecarKind))
|
||||
Expect(realBackupSession.Status.Targets[0].Try).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("Should move to the next task when the first one is a success", func() {
|
||||
realBackupSession := &formolv1alpha1.BackupSession{}
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() int {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return len(realBackupSession.Status.Targets)
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
Expect(realBackupSession.Status.Targets[1].Name).Should(Equal(TestBackupFuncName))
|
||||
Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New))
|
||||
Expect(realBackupSession.Status.Targets[1].Kind).Should(Equal(formolv1alpha1.JobKind))
|
||||
})
|
||||
|
||||
It("Should be a success when the last task is a success", func() {
|
||||
realBackupSession := &formolv1alpha1.BackupSession{}
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() int {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return len(realBackupSession.Status.Targets)
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return realBackupSession.Status.SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Success))
|
||||
})
|
||||
|
||||
It("Should retry when the task is a failure", func() {
|
||||
realBackupSession := &formolv1alpha1.BackupSession{}
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() int {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return len(realBackupSession.Status.Targets)
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() int {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return realBackupSession.Status.Targets[1].Try
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed())
|
||||
Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New))
|
||||
realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure
|
||||
Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, realBackupSession)
|
||||
return realBackupSession.Status.SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Failure))
|
||||
})
|
||||
|
||||
It("should create a backup job", func() {
|
||||
})
|
||||
})
|
||||
Context("When other BackupSession exist", func() {
|
||||
const (
|
||||
bs1Name = "test-backupsession-controller1"
|
||||
bs2Name = "test-backupsession-controller2"
|
||||
bs3Name = "test-backupsession-controller3"
|
||||
)
|
||||
var ()
|
||||
BeforeEach(func() {
|
||||
})
|
||||
JustBeforeEach(func() {
|
||||
})
|
||||
It("Should clean up old sessions", func() {
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -18,429 +18,45 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolutils "github.com/desmo999r/formol/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
RESTORESESSION string = "restoresession"
|
||||
UPDATESTATUS string = "updatestatus"
|
||||
jobOwnerKey string = ".metadata.controller"
|
||||
)
|
||||
|
||||
// RestoreSessionReconciler reconciles a RestoreSession object
|
||||
type RestoreSessionReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &RestoreSessionReconciler{}
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/finalizers,verbs=update
|
||||
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the RestoreSession object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile
|
||||
func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
|
||||
func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := log.FromContext(ctx).WithValues("restoresession", req.NamespacedName)
|
||||
// TODO(user): your logic here
|
||||
|
||||
// Get the RestoreSession
|
||||
restoreSession := &formolv1alpha1.RestoreSession{}
|
||||
if err := r.Get(ctx, req.NamespacedName, restoreSession); err != nil {
|
||||
log.Error(err, "unable to get restoresession")
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
log = r.Log.WithValues("restoresession", req.NamespacedName, "version", restoreSession.ObjectMeta.ResourceVersion)
|
||||
// Get the BackupSession the RestoreSession references
|
||||
backupSession := &formolv1alpha1.BackupSession{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: restoreSession.Namespace,
|
||||
Name: restoreSession.Spec.BackupSessionRef.Ref.Name,
|
||||
}, backupSession); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
backupSession = &formolv1alpha1.BackupSession{
|
||||
Spec: restoreSession.Spec.BackupSessionRef.Spec,
|
||||
Status: restoreSession.Spec.BackupSessionRef.Status,
|
||||
}
|
||||
log.V(1).Info("generated backupsession", "spec", backupSession.Spec, "status", backupSession.Status)
|
||||
} else {
|
||||
log.Error(err, "unable to get backupsession", "restoresession", restoreSession.Spec)
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
// Get the BackupConfiguration linked to the BackupSession
|
||||
backupConf := &formolv1alpha1.BackupConfiguration{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupSession.Spec.Ref.Namespace,
|
||||
Name: backupSession.Spec.Ref.Name,
|
||||
}, backupConf); err != nil {
|
||||
log.Error(err, "unable to get backupConfiguration", "name", backupSession.Spec.Ref, "namespace", backupSession.Namespace)
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
createRestoreJob := func(target formolv1alpha1.Target, snapshotId string) error {
|
||||
// TODO: Get the list of existing jobs and see if there is already one scheduled for the target
|
||||
var jobList batchv1.JobList
|
||||
if err := r.List(ctx, &jobList, client.InNamespace(restoreSession.Namespace), client.MatchingFields{jobOwnerKey: restoreSession.Name}); err != nil {
|
||||
log.Error(err, "unable to get job list")
|
||||
return err
|
||||
}
|
||||
log.V(1).Info("Found jobs", "jobs", jobList.Items)
|
||||
for _, job := range jobList.Items {
|
||||
if job.Annotations["targetName"] == target.Name && job.Annotations["snapshotId"] == snapshotId {
|
||||
log.V(0).Info("there is already a cronjob to restore that target", "targetName", target.Name, "snapshotId", snapshotId)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
restoreSessionEnv := []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: "TARGET_NAME",
|
||||
Value: target.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "RESTORESESSION_NAME",
|
||||
Value: restoreSession.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "RESTORESESSION_NAMESPACE",
|
||||
Value: restoreSession.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
output := corev1.VolumeMount{
|
||||
Name: "output",
|
||||
MountPath: "/output",
|
||||
}
|
||||
restic := corev1.Container{
|
||||
Name: "restic",
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"volume", "restore", "--snapshot-id", snapshotId},
|
||||
VolumeMounts: []corev1.VolumeMount{output},
|
||||
Env: restoreSessionEnv,
|
||||
}
|
||||
finalizer := corev1.Container{
|
||||
Name: "finalizer",
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"target", "finalize"},
|
||||
VolumeMounts: []corev1.VolumeMount{output},
|
||||
Env: restoreSessionEnv,
|
||||
}
|
||||
repo := &formolv1alpha1.Repo{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, repo); err != nil {
|
||||
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||
return err
|
||||
}
|
||||
// S3 backing storage
|
||||
var ttl int32 = 300
|
||||
restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...)
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("%s-%s-", restoreSession.Name, target.Name),
|
||||
Namespace: restoreSession.Namespace,
|
||||
Annotations: map[string]string{
|
||||
"targetName": target.Name,
|
||||
"snapshotId": snapshotId,
|
||||
},
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
TTLSecondsAfterFinished: &ttl,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
InitContainers: []corev1.Container{restic},
|
||||
Containers: []corev1.Container{finalizer},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{Name: "output"},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, step := range target.Steps {
|
||||
function := &formolv1alpha1.Function{}
|
||||
// get the backup function
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: restoreSession.Namespace,
|
||||
Name: step.Name,
|
||||
}, function); err != nil {
|
||||
log.Error(err, "unable to get backup function", "name", step.Name)
|
||||
return err
|
||||
}
|
||||
var restoreName string
|
||||
if function.Annotations["restoreFunction"] != "" {
|
||||
restoreName = function.Annotations["restoreFunction"]
|
||||
} else {
|
||||
restoreName = strings.Replace(step.Name, "backup", "restore", 1)
|
||||
}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: restoreSession.Namespace,
|
||||
Name: restoreName,
|
||||
}, function); err != nil {
|
||||
log.Error(err, "unable to get function", "function", step)
|
||||
return err
|
||||
}
|
||||
function.Spec.Name = function.Name
|
||||
function.Spec.Env = append(function.Spec.Env, restoreSessionEnv...)
|
||||
function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output)
|
||||
job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec)
|
||||
}
|
||||
if err := ctrl.SetControllerReference(restoreSession, job, r.Scheme); err != nil {
|
||||
log.Error(err, "unable to set controller on job", "job", job, "restoresession", restoreSession)
|
||||
return err
|
||||
}
|
||||
log.V(0).Info("creating a restore job", "target", target.Name)
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
log.Error(err, "unable to create job", "job", job)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
deleteRestoreInitContainer := func(target formolv1alpha1.Target) (err error) {
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err = r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: target.Name,
|
||||
}, deployment); err != nil {
|
||||
log.Error(err, "unable to get deployment")
|
||||
return err
|
||||
}
|
||||
log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name)
|
||||
newInitContainers := []corev1.Container{}
|
||||
for _, initContainer := range deployment.Spec.Template.Spec.InitContainers {
|
||||
if initContainer.Name == RESTORESESSION {
|
||||
log.V(0).Info("Found our restoresession container. Removing it from the list of init containers", "container", initContainer)
|
||||
defer func() {
|
||||
if err = r.Update(ctx, deployment); err != nil {
|
||||
log.Error(err, "unable to update deployment")
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
newInitContainers = append(newInitContainers, initContainer)
|
||||
}
|
||||
}
|
||||
deployment.Spec.Template.Spec.InitContainers = newInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
createRestoreInitContainer := func(target formolv1alpha1.Target, snapshotId string) error {
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: restoreSession.Namespace,
|
||||
Name: target.Name,
|
||||
}, deployment); err != nil {
|
||||
log.Error(err, "unable to get deployment")
|
||||
return err
|
||||
}
|
||||
log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name)
|
||||
for _, initContainer := range deployment.Spec.Template.Spec.InitContainers {
|
||||
if initContainer.Name == RESTORESESSION {
|
||||
log.V(0).Info("there is already a restoresession initcontainer", "deployment", deployment.Spec.Template.Spec.InitContainers)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
restoreSessionEnv := []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.TARGET_NAME,
|
||||
Value: target.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.RESTORESESSION_NAME,
|
||||
Value: restoreSession.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: formolv1alpha1.RESTORESESSION_NAMESPACE,
|
||||
Value: restoreSession.Namespace,
|
||||
},
|
||||
}
|
||||
initContainer := corev1.Container{
|
||||
Name: RESTORESESSION,
|
||||
Image: backupConf.Spec.Image,
|
||||
Args: []string{"volume", "restore", "--snapshot-id", snapshotId},
|
||||
VolumeMounts: target.VolumeMounts,
|
||||
Env: restoreSessionEnv,
|
||||
}
|
||||
repo := &formolv1alpha1.Repo{}
|
||||
if err := r.Get(ctx, client.ObjectKey{
|
||||
Namespace: backupConf.Namespace,
|
||||
Name: backupConf.Spec.Repository,
|
||||
}, repo); err != nil {
|
||||
log.Error(err, "unable to get Repo from BackupConfiguration")
|
||||
return err
|
||||
}
|
||||
// S3 backing storage
|
||||
initContainer.Env = append(initContainer.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...)
|
||||
deployment.Spec.Template.Spec.InitContainers = append([]corev1.Container{initContainer},
|
||||
deployment.Spec.Template.Spec.InitContainers...)
|
||||
if err := r.Update(ctx, deployment); err != nil {
|
||||
log.Error(err, "unable to update deployment")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
startNextTask := func() (*formolv1alpha1.TargetStatus, error) {
|
||||
nextTarget := len(restoreSession.Status.Targets)
|
||||
if nextTarget < len(backupConf.Spec.Targets) {
|
||||
target := backupConf.Spec.Targets[nextTarget]
|
||||
targetStatus := formolv1alpha1.TargetStatus{
|
||||
Name: target.Name,
|
||||
Kind: target.Kind,
|
||||
SessionState: formolv1alpha1.New,
|
||||
StartTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
restoreSession.Status.Targets = append(restoreSession.Status.Targets, targetStatus)
|
||||
switch target.Kind {
|
||||
case formolv1alpha1.SidecarKind:
|
||||
log.V(0).Info("Next task is a Sidecard restore", "target", target)
|
||||
if err := createRestoreInitContainer(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil {
|
||||
log.V(0).Info("unable to create restore init container", "task", target)
|
||||
targetStatus.SessionState = formolv1alpha1.Failure
|
||||
return nil, err
|
||||
}
|
||||
case formolv1alpha1.JobKind:
|
||||
log.V(0).Info("Next task is a Job restore", "target", target)
|
||||
if err := createRestoreJob(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil {
|
||||
log.V(0).Info("unable to create restore job", "task", target)
|
||||
targetStatus.SessionState = formolv1alpha1.Failure
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &targetStatus, nil
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
endTask := func() error {
|
||||
target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1]
|
||||
switch target.Kind {
|
||||
case formolv1alpha1.SidecarKind:
|
||||
if err := deleteRestoreInitContainer(target); err != nil {
|
||||
log.Error(err, "unable to delete restore init container")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch restoreSession.Status.SessionState {
|
||||
case formolv1alpha1.New:
|
||||
restoreSession.Status.SessionState = formolv1alpha1.Running
|
||||
if targetStatus, err := startNextTask(); err != nil {
|
||||
log.Error(err, "unable to start next restore task")
|
||||
return reconcile.Result{}, err
|
||||
} else {
|
||||
log.V(0).Info("New restore. Start the first task", "task", targetStatus.Name)
|
||||
if err := r.Status().Update(ctx, restoreSession); err != nil {
|
||||
log.Error(err, "unable to update restoresession")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
case formolv1alpha1.Running:
|
||||
currentTargetStatus := &restoreSession.Status.Targets[len(restoreSession.Status.Targets)-1]
|
||||
switch currentTargetStatus.SessionState {
|
||||
case formolv1alpha1.Failure:
|
||||
log.V(0).Info("last restore task failed. Stop here", "target", currentTargetStatus.Name)
|
||||
restoreSession.Status.SessionState = formolv1alpha1.Failure
|
||||
if err := r.Status().Update(ctx, restoreSession); err != nil {
|
||||
log.Error(err, "unable to update restoresession")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
case formolv1alpha1.Running:
|
||||
log.V(0).Info("task is still running", "target", currentTargetStatus.Name)
|
||||
return reconcile.Result{}, nil
|
||||
case formolv1alpha1.Waiting:
|
||||
target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1]
|
||||
if target.Kind == formolv1alpha1.SidecarKind {
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := r.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: restoreSession.Namespace,
|
||||
Name: target.Name,
|
||||
}, deployment); err != nil {
|
||||
log.Error(err, "unable to get deployment")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if deployment.Status.ReadyReplicas == *deployment.Spec.Replicas {
|
||||
log.V(0).Info("The deployment is ready. We can resume the backup")
|
||||
currentTargetStatus.SessionState = formolv1alpha1.Finalize
|
||||
if err := r.Status().Update(ctx, restoreSession); err != nil {
|
||||
log.Error(err, "unable to update restoresession")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
} else {
|
||||
log.V(0).Info("Waiting for the sidecar to come back")
|
||||
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
|
||||
}
|
||||
} else {
|
||||
log.V(0).Info("not a SidecarKind. Ignoring Waiting")
|
||||
}
|
||||
case formolv1alpha1.Success:
|
||||
_ = endTask()
|
||||
log.V(0).Info("last task was a success. start a new one", "target", currentTargetStatus, "restoreSession version", restoreSession.ObjectMeta.ResourceVersion)
|
||||
targetStatus, err := startNextTask()
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if targetStatus == nil {
|
||||
// No more task to start. The restore is over
|
||||
restoreSession.Status.SessionState = formolv1alpha1.Success
|
||||
}
|
||||
if err := r.Status().Update(ctx, restoreSession); err != nil {
|
||||
log.Error(err, "unable to update restoresession")
|
||||
return reconcile.Result{RequeueAfter: 300 * time.Millisecond}, nil
|
||||
}
|
||||
}
|
||||
case "":
|
||||
// Restore session has just been created
|
||||
restoreSession.Status.SessionState = formolv1alpha1.New
|
||||
restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()}
|
||||
if err := r.Status().Update(ctx, restoreSession); err != nil {
|
||||
log.Error(err, "unable to update restoreSession")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *RestoreSessionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, jobOwnerKey, func(rawObj client.Object) []string {
|
||||
job := rawObj.(*batchv1.Job)
|
||||
owner := metav1.GetControllerOf(job)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
if owner.APIVersion != formolv1alpha1.GroupVersion.String() || owner.Kind != "RestoreSession" {
|
||||
return nil
|
||||
}
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&formolv1alpha1.RestoreSession{}).
|
||||
Owns(&batchv1.Job{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@ -1,95 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var _ = Describe("Testing RestoreSession controller", func() {
|
||||
const (
|
||||
RSRestoreSessionName = "test-restoresession-controller"
|
||||
)
|
||||
var (
|
||||
ctx = context.Background()
|
||||
key = types.NamespacedName{
|
||||
Name: RSRestoreSessionName,
|
||||
Namespace: TestNamespace,
|
||||
}
|
||||
restoreSession = &formolv1alpha1.RestoreSession{}
|
||||
)
|
||||
BeforeEach(func() {
|
||||
restoreSession = &formolv1alpha1.RestoreSession{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: RSRestoreSessionName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: formolv1alpha1.RestoreSessionSpec{
|
||||
BackupSessionRef: formolv1alpha1.BackupSessionRef{
|
||||
Ref: corev1.ObjectReference{
|
||||
Name: TestBackupSessionName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
Context("Creating a RestoreSession", func() {
|
||||
JustBeforeEach(func() {
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, restoreSession)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
realRestoreSession := &formolv1alpha1.RestoreSession{}
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, key, realRestoreSession)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, realRestoreSession)
|
||||
return realRestoreSession.Status.SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Running))
|
||||
})
|
||||
AfterEach(func() {
|
||||
Expect(k8sClient.Delete(ctx, restoreSession)).Should(Succeed())
|
||||
})
|
||||
It("Should have a new task and should fail if the task fails", func() {
|
||||
restoreSession := &formolv1alpha1.RestoreSession{}
|
||||
Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed())
|
||||
Expect(len(restoreSession.Status.Targets)).Should(Equal(1))
|
||||
Expect(restoreSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New))
|
||||
restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Running
|
||||
Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed())
|
||||
Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, restoreSession)
|
||||
return restoreSession.Status.Targets[0].SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Running))
|
||||
restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Failure
|
||||
Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed())
|
||||
Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, restoreSession)
|
||||
return restoreSession.Status.SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Failure))
|
||||
})
|
||||
It("Should move to the new task if the first one is a success and be a success if all the tasks succeed", func() {
|
||||
restoreSession := &formolv1alpha1.RestoreSession{}
|
||||
Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed())
|
||||
Expect(len(restoreSession.Status.Targets)).Should(Equal(1))
|
||||
restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed())
|
||||
Eventually(func() int {
|
||||
_ = k8sClient.Get(ctx, key, restoreSession)
|
||||
return len(restoreSession.Status.Targets)
|
||||
}, timeout, interval).Should(Equal(2))
|
||||
restoreSession.Status.Targets[1].SessionState = formolv1alpha1.Success
|
||||
Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed())
|
||||
Eventually(func() formolv1alpha1.SessionState {
|
||||
_ = k8sClient.Get(ctx, key, restoreSession)
|
||||
return restoreSession.Status.SessionState
|
||||
}, timeout, interval).Should(Equal(formolv1alpha1.Success))
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -22,55 +22,48 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
//+kubebuilder:scaffold:imports
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
const (
|
||||
TestBackupFuncName = "test-backup-func"
|
||||
TestFunc = "test-norestore-func"
|
||||
TestRestoreFuncName = "test-restore-func"
|
||||
TestNamespace = "test-namespace"
|
||||
TestRepoName = "test-repo"
|
||||
TestDeploymentName = "test-deployment"
|
||||
TestBackupConfName = "test-backupconf"
|
||||
TestBackupSessionName = "test-backupsession"
|
||||
TestDataVolume = "data"
|
||||
TestDataMountPath = "/data"
|
||||
timeout = time.Second * 10
|
||||
interval = time.Millisecond * 250
|
||||
)
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
const (
|
||||
NAMESPACE_NAME = "test-namespace"
|
||||
REPO_NAME = "test-repo"
|
||||
DEPLOYMENT_NAME = "test-deployment"
|
||||
CONTAINER_NAME = "test-container"
|
||||
DATAVOLUME_NAME = "data"
|
||||
timeout = time.Second * 10
|
||||
interval = time.Millisecond * 250
|
||||
)
|
||||
|
||||
var (
|
||||
namespace = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestNamespace,
|
||||
Name: NAMESPACE_NAME,
|
||||
},
|
||||
}
|
||||
deployment = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestDeploymentName,
|
||||
Namespace: TestNamespace,
|
||||
Namespace: NAMESPACE_NAME,
|
||||
Name: DEPLOYMENT_NAME,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
@ -89,239 +82,74 @@ var (
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{
|
||||
Name: TestDataVolume,
|
||||
Name: DATAVOLUME_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
sa = &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
}
|
||||
secret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-secret",
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"RESTIC_PASSWORD": []byte("toto"),
|
||||
"AWS_ACCESS_KEY_ID": []byte("titi"),
|
||||
"AWS_SECRET_ACCESS_KEY": []byte("tata"),
|
||||
},
|
||||
}
|
||||
repo = &formolv1alpha1.Repo{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestRepoName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: formolv1alpha1.RepoSpec{
|
||||
Backend: formolv1alpha1.Backend{
|
||||
S3: formolv1alpha1.S3{
|
||||
Server: "raid5.desmojim.fr:9000",
|
||||
Bucket: "testbucket2",
|
||||
},
|
||||
},
|
||||
RepositorySecrets: "test-secret",
|
||||
},
|
||||
}
|
||||
function = &formolv1alpha1.Function{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestFunc,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: corev1.Container{
|
||||
Name: "norestore-func",
|
||||
Image: "myimage",
|
||||
Args: []string{"a", "set", "of", "args"},
|
||||
},
|
||||
}
|
||||
backupFunc = &formolv1alpha1.Function{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestRestoreFuncName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: corev1.Container{
|
||||
Name: "restore-func",
|
||||
Image: "myimage",
|
||||
Args: []string{"a", "set", "of", "args"},
|
||||
},
|
||||
}
|
||||
restoreFunc = &formolv1alpha1.Function{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestBackupFuncName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: corev1.Container{
|
||||
Name: "backup-func",
|
||||
Image: "myimage",
|
||||
Args: []string{"a", "set", "of", "args"},
|
||||
Env: []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testBackupConf = &formolv1alpha1.BackupConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupConfigurationSpec{
|
||||
Repository: TestRepoName,
|
||||
Image: "desmo999r/formolcli:latest",
|
||||
Schedule: "1 * * * *",
|
||||
Keep: formolv1alpha1.Keep{
|
||||
Last: 2,
|
||||
},
|
||||
Targets: []formolv1alpha1.Target{
|
||||
formolv1alpha1.Target{
|
||||
Kind: formolv1alpha1.SidecarKind,
|
||||
Name: TestDeploymentName,
|
||||
Steps: []formolv1alpha1.Step{
|
||||
formolv1alpha1.Step{
|
||||
Name: TestFunc,
|
||||
},
|
||||
},
|
||||
Paths: []string{
|
||||
TestDataMountPath,
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
Name: TestDataVolume,
|
||||
MountPath: TestDataMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
formolv1alpha1.Target{
|
||||
Kind: formolv1alpha1.JobKind,
|
||||
Name: TestBackupFuncName,
|
||||
Steps: []formolv1alpha1.Step{
|
||||
formolv1alpha1.Step{
|
||||
Name: TestFunc,
|
||||
},
|
||||
formolv1alpha1.Step{
|
||||
Name: TestBackupFuncName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testBackupSession = &formolv1alpha1.BackupSession{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: TestBackupSessionName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
Spec: formolv1alpha1.BackupSessionSpec{
|
||||
Ref: corev1.ObjectReference{
|
||||
Name: TestBackupConfName,
|
||||
Namespace: TestNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Controller Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = formolv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
Expect(k8sClient.Create(ctx, namespace)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
|
||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&BackupConfigurationReconciler{
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"),
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&BackupSessionReconciler{
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("BackupSession"),
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&RestoreSessionReconciler{
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"),
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer GinkgoRecover()
|
||||
err = k8sManager.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
ctx := context.Background()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
Expect(k8sClient.Create(ctx, namespace)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, sa)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, secret)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, repo)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, function)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, backupFunc)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, restoreFunc)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, testBackupConf)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, testBackupSession)).Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{
|
||||
Name: TestBackupSessionName,
|
||||
Namespace: TestNamespace,
|
||||
}, testBackupSession)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
testBackupSession.Status.SessionState = formolv1alpha1.Success
|
||||
testBackupSession.Status.Targets = []formolv1alpha1.TargetStatus{
|
||||
formolv1alpha1.TargetStatus{
|
||||
Name: TestDeploymentName,
|
||||
Kind: formolv1alpha1.SidecarKind,
|
||||
SessionState: formolv1alpha1.Success,
|
||||
SnapshotId: "12345abcdef",
|
||||
},
|
||||
formolv1alpha1.TargetStatus{
|
||||
Name: TestBackupFuncName,
|
||||
Kind: formolv1alpha1.JobKind,
|
||||
SessionState: formolv1alpha1.Success,
|
||||
SnapshotId: "67890ghijk",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Status().Update(ctx, testBackupSession)).Should(Succeed())
|
||||
}, 60)
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
155
controllers/suite_test.go~
Normal file
155
controllers/suite_test.go~
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
//+kubebuilder:scaffold:imports
|
||||
|
||||
//appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
const (
|
||||
NAMESPACE_NAME = "test-namespace"
|
||||
REPO_NAME = "test-repo"
|
||||
DEPLOYMENT_NAME = "test-deployment"
|
||||
CONTAINER_NAME = "test-container"
|
||||
DATAVOLUME_NAME = "data"
|
||||
timeout = time.Second * 10
|
||||
interval = time.Millisecond * 250
|
||||
)
|
||||
|
||||
var (
|
||||
namespace = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: NAMESPACE_NAME,
|
||||
},
|
||||
}
|
||||
deployment = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: NAMESPACE_NAME,
|
||||
Name: DEPLOYMENT_NAME,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "test-deployment"},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"app": "test-deployment"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "test-container",
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{
|
||||
Name: DATAVOLUME_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = formolv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
Expect(k8sClient.Create(ctx, namespace)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
|
||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&BackupConfigurationReconciler{
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err = k8sManager.Start(ctx)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
|
||||
}()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
84
go.mod
84
go.mod
@ -1,13 +1,81 @@
|
||||
module github.com/desmo999r/formol
|
||||
|
||||
go 1.13
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v0.3.0
|
||||
github.com/onsi/ginkgo v1.14.1
|
||||
github.com/onsi/gomega v1.10.2
|
||||
k8s.io/api v0.20.2
|
||||
k8s.io/apimachinery v0.20.2
|
||||
k8s.io/client-go v0.20.2
|
||||
sigs.k8s.io/controller-runtime v0.8.3
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/onsi/ginkgo/v2 v2.1.4
|
||||
github.com/onsi/gomega v1.19.0
|
||||
k8s.io/api v0.25.0
|
||||
k8s.io/apimachinery v0.25.0
|
||||
k8s.io/client-go v0.25.0
|
||||
sigs.k8s.io/controller-runtime v0.13.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.97.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.25.0 // indirect
|
||||
k8s.io/component-base v0.25.0 // indirect
|
||||
k8s.io/klog/v2 v2.70.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
81
main.go
81
main.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
|
||||
Copyright 2023.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -20,16 +20,20 @@ import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
formoldesmojimfrv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
"github.com/desmo999r/formol/controllers"
|
||||
// +kubebuilder:scaffold:imports
|
||||
//+kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
@ -38,30 +42,47 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
_ = formolv1alpha1.AddToScheme(scheme)
|
||||
_ = formoldesmojimfrv1alpha1.AddToScheme(scheme)
|
||||
// +kubebuilder:scaffold:scheme
|
||||
utilruntime.Must(formolv1alpha1.AddToScheme(scheme))
|
||||
//+kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var enableLeaderElection bool
|
||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
var probeAddr string
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
Port: 9443,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "6846258d.desmojim.fr",
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
Port: 9443,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "6846258d.desmojim.fr",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
@ -70,7 +91,6 @@ func main() {
|
||||
|
||||
if err = (&controllers.BackupConfigurationReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "BackupConfiguration")
|
||||
@ -78,7 +98,6 @@ func main() {
|
||||
}
|
||||
if err = (&controllers.BackupSessionReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("BackupSession"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "BackupSession")
|
||||
@ -86,27 +105,21 @@ func main() {
|
||||
}
|
||||
if err = (&controllers.RestoreSessionReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "RestoreSession")
|
||||
os.Exit(1)
|
||||
}
|
||||
// if os.Getenv("ENABLE_WEBHOOKS") != "false" {
|
||||
// if err = (&formolv1alpha1.BackupSession{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
// setupLog.Error(err, "unable to create webhook", "webhook", "BackupSession")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// if err = (&formolv1alpha1.BackupConfiguration{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
// setupLog.Error(err, "unable to create webhook", "webhook", "BackupConfiguration")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// if err = (&formoldesmojimfrv1alpha1.Function{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
// setupLog.Error(err, "unable to create webhook", "webhook", "Function")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// }
|
||||
// +kubebuilder:scaffold:builder
|
||||
//+kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
|
||||
@ -1,438 +0,0 @@
|
||||
package rbac
|
||||
|
||||
import (
|
||||
"context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
formolRole = "formol-sidecar-role"
|
||||
backupListenerRole = "backup-listener-role"
|
||||
backupListenerRoleBinding = "backup-listener-rolebinding"
|
||||
backupSessionCreatorSA = "backupsession-creator"
|
||||
backupSessionCreatorRole = "backupsession-creator-role"
|
||||
backupSessionCreatorRoleBinding = "backupsession-creator-rolebinding"
|
||||
backupSessionStatusUpdaterRole = "backupsession-statusupdater-role"
|
||||
backupSessionStatusUpdaterRoleBinding = "backupsession-statusupdater-rolebinding"
|
||||
)
|
||||
|
||||
func DeleteBackupSessionCreatorRBAC(cl client.Client, namespace string) error {
|
||||
serviceaccount := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorSA,
|
||||
}, serviceaccount); err == nil {
|
||||
if err = cl.Delete(context.Background(), serviceaccount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
role := &rbacv1.Role{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRole,
|
||||
}, role); err == nil {
|
||||
if err = cl.Delete(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rolebinding := &rbacv1.RoleBinding{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRoleBinding,
|
||||
}, rolebinding); err == nil {
|
||||
if err = cl.Delete(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBackupSessionCreatorRBAC(cl client.Client, namespace string) error {
|
||||
serviceaccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorSA,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorSA,
|
||||
}, serviceaccount); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), serviceaccount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRole,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"backupsessions"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"backupsessions/status"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"backupconfigurations"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRole,
|
||||
}, role); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRoleBinding,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: backupSessionCreatorSA,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: backupSessionCreatorRole,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionCreatorRoleBinding,
|
||||
}, rolebinding); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
}, sa); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRole,
|
||||
}, role); err == nil {
|
||||
if err = cl.Delete(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rolebinding := &rbacv1.RoleBinding{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRoleBinding,
|
||||
}, rolebinding); err == nil {
|
||||
if err = cl.Delete(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteFormolRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
formolRoleBinding := namespace + "-" + saName + "-formol-sidecar-rolebinding"
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: formolRoleBinding,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: formolRole,
|
||||
},
|
||||
}
|
||||
if err := cl.Delete(context.Background(), clusterRoleBinding); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateFormolRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
}, sa); err != nil {
|
||||
return err
|
||||
}
|
||||
clusterRole := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: formolRole,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"*"},
|
||||
//APIGroups: []string{"formol.desmojim.fr"},
|
||||
//Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods", "secrets", "configmaps"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{"apps"},
|
||||
Resources: []string{"deployments", "replicasets"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Name: formolRole,
|
||||
}, clusterRole); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), clusterRole); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
formolRoleBinding := namespace + "-" + saName + "-formol-rolebinding"
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: formolRoleBinding,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: formolRole,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Name: formolRoleBinding,
|
||||
}, clusterRoleBinding); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
}, sa); err != nil {
|
||||
return err
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRole,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods", "secrets", "configmaps"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{"apps"},
|
||||
Resources: []string{"deployments", "replicasets"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"update", "delete"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"restoresessions", "backupsessions"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRole,
|
||||
}, role); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRoleBinding,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: saName,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: backupListenerRole,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRoleBinding,
|
||||
}, rolebinding); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
}, sa); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionStatusUpdaterRole,
|
||||
}, role); err == nil {
|
||||
if err = cl.Delete(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rolebinding := &rbacv1.RoleBinding{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionStatusUpdaterRoleBinding,
|
||||
}, rolebinding); err == nil {
|
||||
if err = cl.Delete(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error {
|
||||
if saName == "" {
|
||||
saName = "default"
|
||||
}
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: saName,
|
||||
}, sa); err != nil {
|
||||
return err
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupSessionStatusUpdaterRole,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch", "patch", "update"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"restoresessions/status", "backupsessions/status"},
|
||||
},
|
||||
rbacv1.PolicyRule{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
APIGroups: []string{"formol.desmojim.fr"},
|
||||
Resources: []string{"restoresessions", "backupsessions"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRole,
|
||||
}, role); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRoleBinding,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: saName,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: backupListenerRole,
|
||||
},
|
||||
}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: backupListenerRoleBinding,
|
||||
}, rolebinding); err != nil && errors.IsNotFound(err) {
|
||||
if err = cl.Create(context.Background(), rolebinding); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
BIN
pkg/utils/.root.go.un~
Normal file
BIN
pkg/utils/.root.go.un~
Normal file
Binary file not shown.
@ -1,10 +1,8 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ContainsString(slice []string, s string) bool {
|
||||
@ -29,29 +27,5 @@ func RemoveString(slice []string, s string) (result []string) {
|
||||
func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar {
|
||||
env := []corev1.EnvVar{}
|
||||
// S3 backing storage
|
||||
if (formolv1alpha1.S3{}) != repo.Spec.Backend.S3 {
|
||||
url := fmt.Sprintf("s3:http://%s/%s/%s-%s", repo.Spec.Backend.S3.Server, repo.Spec.Backend.S3.Bucket, strings.ToUpper(backupConf.Namespace), strings.ToLower(backupConf.Name))
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RESTIC_REPOSITORY",
|
||||
Value: url,
|
||||
})
|
||||
for _, key := range []string{
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"RESTIC_PASSWORD",
|
||||
} {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: key,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: repo.Spec.RepositorySecrets,
|
||||
},
|
||||
Key: key,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
33
pkg/utils/root.go~
Normal file
33
pkg/utils/root.go~
Normal file
@ -0,0 +1,33 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ContainsString(slice []string, s string) bool {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func RemoveString(slice []string, s string) (result []string) {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar {
|
||||
env := []corev1.EnvVar{}
|
||||
// S3 backing storage
|
||||
return env
|
||||
}
|
||||
BIN
test/.00-setup.yaml.un~
Normal file
BIN
test/.00-setup.yaml.un~
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user