From 6d83e5917168be8b43dd4558556571f198932e0f Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 3 Feb 2023 18:23:41 +0100 Subject: [PATCH] snapshot initial commit with some BackupConfiguration and BackupSession controllers --- .gitignore | 2 + Dockerfile | 13 +- LICENSE | 201 -------- Makefile | 121 +++-- README.md | 93 +++- .../.backupconfiguration_types.go.un~ | Bin 0 -> 6508 bytes api/v1alpha1/.backupsession_types.go.un~ | Bin 0 -> 7478 bytes api/v1alpha1/.common.go.un~ | Bin 0 -> 5425 bytes api/v1alpha1/.function_types.go.un~ | Bin 0 -> 7007 bytes api/v1alpha1/.repo_types.go.un~ | Bin 0 -> 1033 bytes api/v1alpha1/backupconfiguration_types.go | 93 ++-- api/v1alpha1/backupconfiguration_types.go~ | 115 +++++ api/v1alpha1/backupsession_types.go | 46 +- api/v1alpha1/backupsession_types.go~ | 89 ++++ api/v1alpha1/common.go | 34 -- api/v1alpha1/common.go~ | 6 + api/v1alpha1/function_types.go | 17 +- api/v1alpha1/function_types.go~ | 64 +++ api/v1alpha1/groupversion_info.go | 2 +- api/v1alpha1/repo_types.go | 56 ++- api/v1alpha1/repo_types.go~ | 109 +++++ api/v1alpha1/restoresession_types.go | 38 +- api/v1alpha1/zz_generated.deepcopy.go | 118 +++-- config/certmanager/certificate.yaml | 26 - config/certmanager/kustomization.yaml | 5 - config/certmanager/kustomizeconfig.yaml | 16 - config/crd/kustomization.yaml | 21 +- config/crd/kustomizeconfig.yaml | 6 +- .../cainjection_in_backupconfigurations.yaml | 1 - .../cainjection_in_backupsessions.yaml | 1 - .../crd/patches/cainjection_in_functions.yaml | 1 - config/crd/patches/cainjection_in_repoes.yaml | 7 + .../cainjection_in_restoresessions.yaml | 3 +- .../webhook_in_backupconfigurations.yaml | 10 +- .../patches/webhook_in_backupsessions.yaml | 20 +- config/crd/patches/webhook_in_functions.yaml | 19 +- config/crd/patches/webhook_in_repoes.yaml | 16 + .../patches/webhook_in_restoresessions.yaml | 21 +- config/default/kustomization.yaml | 20 +- config/default/manager_auth_proxy_patch.yaml | 40 +- config/default/manager_config_patch.yaml | 10 + config/default/manager_webhook_patch.yaml | 23 - config/default/webhookcainjection_patch.yaml | 15 - config/manager/kustomization.yaml | 8 - config/manager/manager.yaml | 39 -- config/prometheus/kustomization.yaml | 2 - config/prometheus/monitor.yaml | 16 - .../rbac/auth_proxy_client_clusterrole.yaml | 7 - config/rbac/auth_proxy_role.yaml | 13 - config/rbac/auth_proxy_role_binding.yaml | 12 - config/rbac/auth_proxy_service.yaml | 14 - .../rbac/backupconfiguration_editor_role.yaml | 24 - .../rbac/backupconfiguration_viewer_role.yaml | 20 - config/rbac/backupsession_editor_role.yaml | 24 - config/rbac/backupsession_viewer_role.yaml | 20 - config/rbac/function_editor_role.yaml | 24 - config/rbac/function_viewer_role.yaml | 20 - config/rbac/kustomization.yaml | 12 - config/rbac/leader_election_role.yaml | 32 -- config/rbac/leader_election_role_binding.yaml | 12 - config/rbac/role_binding.yaml | 12 - ...l.desmojim.fr_v1alpha1_restoresession.yaml | 7 - .../formol_v1alpha1_backupconfiguration.yaml | 21 +- .../formol_v1alpha1_backupsession.yaml | 12 +- config/samples/formol_v1alpha1_function.yaml | 15 +- config/samples/formol_v1alpha1_repo.yaml | 15 +- .../formol_v1alpha1_restoresession.yaml | 12 + config/samples/test_deployment.yaml | 28 -- config/webhook/kustomization.yaml | 6 - config/webhook/kustomizeconfig.yaml | 25 - config/webhook/service.yaml | 12 - .../.backupconfiguration_controller.go.un~ | Bin 0 -> 35950 bytes ...kupconfiguration_controller_cronjob.go.un~ | Bin 0 -> 7322 bytes ...kupconfiguration_controller_sidecar.go.un~ | Bin 0 -> 26628 bytes ...backupconfiguration_controller_test.go.un~ | Bin 0 -> 15460 bytes controllers/.backupsession_controller.go.un~ | Bin 0 -> 2760 bytes controllers/.suite_test.go.un~ | Bin 0 -> 7724 bytes controllers/backupconfiguration_controller.go | 358 ++------------ .../backupconfiguration_controller.go~ | 129 +++++ .../backupconfiguration_controller_cronjob.go | 103 ++++ ...backupconfiguration_controller_cronjob.go~ | 102 ++++ .../backupconfiguration_controller_sidecar.go | 137 ++++++ ...backupconfiguration_controller_sidecar.go~ | 134 ++++++ .../backupconfiguration_controller_test.go | 204 ++++---- .../backupconfiguration_controller_test.go~ | 165 +++++++ controllers/backupsession_controller.go | 453 +----------------- controllers/backupsession_controller.go~ | 62 +++ controllers/backupsession_controller_test.go | 147 ------ controllers/restoresession_controller.go | 420 +--------------- controllers/restoresession_controller_test.go | 95 ---- controllers/suite_test.go | 274 ++--------- controllers/suite_test.go~ | 155 ++++++ go.mod | 84 +++- hack/boilerplate.go.txt | 2 +- main.go | 81 ++-- pkg/rbac/backupconfiguration.go | 438 ----------------- pkg/utils/.root.go.un~ | Bin 0 -> 2166 bytes pkg/utils/root.go | 26 - pkg/utils/root.go~ | 33 ++ test/.00-setup.yaml.un~ | Bin 0 -> 12972 bytes test/.01-deployment.yaml.un~ | Bin 0 -> 1928 bytes test/.02-backupconf.yaml.un~ | Bin 0 -> 9847 bytes test/00-setup.yaml | 32 +- test/00-setup.yaml~ | 173 +++++++ test/01-deployment.yaml | 4 +- test/01-deployment.yaml~ | 92 ++++ test/02-backupconf.yaml | 39 +- test/02-backupconf.yaml~ | 35 ++ 108 files changed, 2665 insertions(+), 3274 deletions(-) delete mode 100644 LICENSE create mode 100644 api/v1alpha1/.backupconfiguration_types.go.un~ create mode 100644 api/v1alpha1/.backupsession_types.go.un~ create mode 100644 api/v1alpha1/.common.go.un~ create mode 100644 api/v1alpha1/.function_types.go.un~ create mode 100644 api/v1alpha1/.repo_types.go.un~ create mode 100644 api/v1alpha1/backupconfiguration_types.go~ create mode 100644 api/v1alpha1/backupsession_types.go~ create mode 100644 api/v1alpha1/common.go~ create mode 100644 api/v1alpha1/function_types.go~ create mode 100644 api/v1alpha1/repo_types.go~ delete mode 100644 config/certmanager/certificate.yaml delete mode 100644 config/certmanager/kustomization.yaml delete mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_repoes.yaml create mode 100644 config/crd/patches/webhook_in_repoes.yaml create mode 100644 config/default/manager_config_patch.yaml delete mode 100644 config/default/manager_webhook_patch.yaml delete mode 100644 config/default/webhookcainjection_patch.yaml delete mode 100644 config/manager/kustomization.yaml delete mode 100644 config/manager/manager.yaml delete mode 100644 config/prometheus/kustomization.yaml delete mode 100644 config/prometheus/monitor.yaml delete mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/auth_proxy_role_binding.yaml delete mode 100644 config/rbac/auth_proxy_service.yaml delete mode 100644 config/rbac/backupconfiguration_editor_role.yaml delete mode 100644 config/rbac/backupconfiguration_viewer_role.yaml delete mode 100644 config/rbac/backupsession_editor_role.yaml delete mode 100644 config/rbac/backupsession_viewer_role.yaml delete mode 100644 config/rbac/function_editor_role.yaml delete mode 100644 config/rbac/function_viewer_role.yaml delete mode 100644 config/rbac/kustomization.yaml delete mode 100644 config/rbac/leader_election_role.yaml delete mode 100644 config/rbac/leader_election_role_binding.yaml delete mode 100644 config/rbac/role_binding.yaml delete mode 100644 config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml create mode 100644 config/samples/formol_v1alpha1_restoresession.yaml delete mode 100644 config/samples/test_deployment.yaml delete mode 100644 config/webhook/kustomization.yaml delete mode 100644 config/webhook/kustomizeconfig.yaml delete mode 100644 config/webhook/service.yaml create mode 100644 controllers/.backupconfiguration_controller.go.un~ create mode 100644 controllers/.backupconfiguration_controller_cronjob.go.un~ create mode 100644 controllers/.backupconfiguration_controller_sidecar.go.un~ create mode 100644 controllers/.backupconfiguration_controller_test.go.un~ create mode 100644 controllers/.backupsession_controller.go.un~ create mode 100644 controllers/.suite_test.go.un~ create mode 100644 controllers/backupconfiguration_controller.go~ create mode 100644 controllers/backupconfiguration_controller_cronjob.go create mode 100644 controllers/backupconfiguration_controller_cronjob.go~ create mode 100644 controllers/backupconfiguration_controller_sidecar.go create mode 100644 controllers/backupconfiguration_controller_sidecar.go~ create mode 100644 controllers/backupconfiguration_controller_test.go~ create mode 100644 controllers/backupsession_controller.go~ delete mode 100644 controllers/backupsession_controller_test.go delete mode 100644 controllers/restoresession_controller_test.go create mode 100644 controllers/suite_test.go~ delete mode 100644 pkg/rbac/backupconfiguration.go create mode 100644 pkg/utils/.root.go.un~ create mode 100644 pkg/utils/root.go~ create mode 100644 test/.00-setup.yaml.un~ create mode 100644 test/.01-deployment.yaml.un~ create mode 100644 test/.02-backupconf.yaml.un~ create mode 100644 test/00-setup.yaml~ create mode 100644 test/01-deployment.yaml~ create mode 100644 test/02-backupconf.yaml~ diff --git a/.gitignore b/.gitignore index 871807a..d181f1b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +*~ + # Binaries for programs and plugins *.exe *.exe~ diff --git a/Dockerfile b/Dockerfile index f8b22c7..8f9cca1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,7 @@ # Build the manager binary -FROM golang:alpine as builder +FROM golang:1.19 as builder +ARG TARGETOS +ARG TARGETARCH WORKDIR /workspace # Copy the Go Modules manifests @@ -12,17 +14,20 @@ RUN go mod download # Copy the go source COPY main.go main.go COPY api/ api/ -COPY pkg/ pkg/ COPY controllers/ controllers/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -a -o manager main.go +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details FROM gcr.io/distroless/static:nonroot WORKDIR / COPY --from=builder /workspace/manager . -USER nonroot:nonroot +USER 65532:65532 ENTRYPOINT ["/manager"] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile index 1998ac6..9116f85 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # Image URL to use all building/pushing image targets -IMG ?= desmo999r/formolcontroller:0.3.0 -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false,crdVersions=v1" +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.25.0 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -12,11 +12,11 @@ GOBIN=$(shell go env GOBIN) endif # Setting SHELL to bash allows bash commands to be executed by recipes. -# This is a requirement for 'setup-envtest.sh' in the test target. # Options are set to exit when a recipe line exits non-zero or a piped command fails. SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec +.PHONY: all all: build ##@ General @@ -32,79 +32,126 @@ all: build # More info on the awk command: # http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Development +.PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases +.PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." +.PHONY: fmt fmt: ## Run go fmt against code. go fmt ./... +.PHONY: vet vet: ## Run go vet against code. go vet ./... -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin -test: manifests generate fmt vet ## Run tests. - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.8.3/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out ##@ Build -build: generate fmt vet ## Build manager binary. +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. go build -o bin/manager main.go +.PHONY: run run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go +# If you wish built the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build docker-build: test ## Build docker image with the manager. - podman build --disable-compression --format=docker . -t ${IMG} + docker build -t ${IMG} . +.PHONY: docker-push docker-push: ## Push docker image with the manager. - podman push ${IMG} + docker push ${IMG} -docker: docker-build docker-push +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross ##@ Deployment +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - -uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +.PHONY: deploy deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - -undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/default | kubectl delete -f - +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +##@ Build Dependencies -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef +## Tool Versions +KUSTOMIZE_VERSION ?= v3.8.7 +CONTROLLER_TOOLS_VERSION ?= v0.10.0 + +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. +$(KUSTOMIZE): $(LOCALBIN) + @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ + echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ + rm -rf $(LOCALBIN)/kustomize; \ + fi + test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest diff --git a/README.md b/README.md index aa6fa67..6864950 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,94 @@ # formol +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started +You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. +**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +### Running on the cluster +1. Install Instances of Custom Resources: + +```sh +kubectl apply -f config/samples/ +``` + +2. Build and push your image to the location specified by `IMG`: + +```sh +make docker-build docker-push IMG=/formol:tag +``` + +3. Deploy the controller to the cluster with the image specified by `IMG`: + +```sh +make deploy IMG=/formol:tag +``` + +### Uninstall CRDs +To delete the CRDs from the cluster: + +```sh +make uninstall +``` + +### Undeploy controller +UnDeploy the controller to the cluster: + +```sh +make undeploy +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +### How it works +This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) + +It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) +which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster + +### Test It Out +1. Install the CRDs into the cluster: + +```sh +make install +``` + +2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +```sh +make run +``` + +**NOTE:** You can also run this in one step by running: `make install run` + +### Modifying the API definitions +If you are editing the API definitions, generate the manifests such as CRs or CRDs using: + +```sh +make manifests +``` + +**NOTE:** Run `make --help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. -My k8s backup solution diff --git a/api/v1alpha1/.backupconfiguration_types.go.un~ b/api/v1alpha1/.backupconfiguration_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..8e215c26710e52b56efa013a3dd498ef89a91286 GIT binary patch literal 6508 zcmeI$ze~eF6bJBYtUs!4aTK*$W3{dhF3y4#6hx@21(j04rZ#nObksq25p?#S@L%XC zxF{(72Xxl=JumeQ&0Po2=?-4=AZd8K4|lm_w|0D2uCE2}!n-NHW^zm0{%t4sbaDSy zI(QsU7q*tm#m$#Ow)jyMLWC5Hshz_{5Y}2j^DG~>nzed0UpWec`l{dTp71MlWr<>t zB0-U*z`!vIvH#q@PU5U5_S!g0!}~eNU!00=BED$ITd@mV`j;Ibj)vqA;&Xy-yiTz} z0d-LHDZg?qG4YQ9if91VVMg7sel%e&$Z2ev#mW+PNmepkoh56C-PB}Rkd}r8Sy6%x zi*hu;>L{abSU(cwiEfRZmz4TPtCED=5Gxp}PLegmZfcVBrKEO`q#yw|%!&o7gJcb{ zo0=f!B&2Sys~q7r!fJ&n9I=;j+gCKf>LACKZfVU+N6jmXVgy@?RSHta$QoidH8IY{ zG0MHHUr~xs8)cP3)G4xt*iB7}GjWR8#dZ;aHpU7Cs6%88v74F@b8(2=5N2ssCp?`Y zYlz*{WH=pX$fhC4GOSE+IzrYEyD1T#%8gB;_jkFeIKtnB6k^dQSQHCQCx}k`%OJr^ p-PUw6PO$qVk9UjC!#5~wWL^LO literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.backupsession_types.go.un~ b/api/v1alpha1/.backupsession_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..a0433fe1ba5ebb94a6e220659ba125b7020a746f GIT binary patch literal 7478 zcmeI1&ubGw6vsDd)Q(yeY^(TlyI^g#&6E*?oJx9zDMnUuu4K z%5h_4>D!yN*SBvyem4E&?Ob{=UA(e(|G|q_7eAee=D#|QbAw#jCJM^ryJP~RawlBB z@Uc>&OvSnLPJUsCMxQQ53=kNd}{4KD-f=qKO}HC&iB1EH>BS~E_h~{ag{ms zNI(Om3b`@>#CHTl3r)#lY9wfG9<-YhL6t?k6QniQTREFl>rFps)uSN1SC4sn)o-ob zMI!O>B|ois5-K2~bd>?Hg{ssrKK6q;%QnCi3^!{_2Vcun6U1!TkLe66UZy~}MO!xT zTBMpFWzf zbFvrCqpZ+l3+QdfywX5+l-xdxDn$kv|ApqPPII@EA3q8N2Uyr_^WRTUA1Uq06Uatz zfi&Q(k&CBMTaZeY4UUpsxhKgLxk*uo zoz)gks`Ej_lU5M&B}92;2zY3_D+5?80nlw3LIk5mFy9N9L-{bh++8&fq+XF&GN9sI zws$yC!?@8Ks}BNX<<~0`$PWVo_wV^#~D+8r9=|{aA$b az$nf?aU$X#MP&fTr`ITMp$6@jmp=jh6MG5( literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.common.go.un~ b/api/v1alpha1/.common.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..7bba67b2a3a39314247af8f6bf37603553553965 GIT binary patch literal 5425 zcmeI0ze^lJ6vxNAON_s&AW0zLKnh7^B`TOMAf!vcl*--Ss7uaWxm^Vd69iGaR4GD= z^x_|r`bt5BbSaXQ=6&D3*?X75%IW68+{}D;cIWXv^WHA^KHdGkp1sbmO=EfK-P`fa z{gp3&e?ETx_4CQEXRW!}=987RkC&g`G_Q_~F*DS(jnQOU1oXa{WN`MXg{w`o`9l^o zq(_HKXl%D+v@lw*2qc4(4aQ;2@2CIm!Wb5h8B;}UxB$pTG1ar-qBy#*2oR>NJ25f5 z5d;DbpjyfeWK*4QoL?h%sg-ef4-p`62O3-*dU`eop22C=2snUxpgaJ?8e{~gA`@gt z;LaJ`SXCH34o1MxY8cpGph0`6`~g5{j5UNfB*mh literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.function_types.go.un~ b/api/v1alpha1/.function_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..09f8d8013fce7f131c216e83502f45bc7a0fbe25 GIT binary patch literal 7007 zcmeI0&u`N(6vxwTFbV?>fIytydf)_|1{z2c2dXuJN>ww~4upi*LgRE{O-c@7jTC6GN+gyS}tTKTeAG(xVG~?{@Sm;;s-Al!z%x3 zjI5sh*qe?%oER-UdiL`A^YH`LjaN@zznOUd>GGGS#dj-)Va#AwQvn|i$_gseY_vab zJ{+UB1!HMbHW-m574+&*`uqb^Qj=Eb@;`e3arg-i!}xv-`zF^P!F&PpubRVY&CKHj z8bKPR7BUgQ)0nB=(9p;Wc_xImu0or}!Omc=(4YxMf-$U-&KI zLK~>iQbaT;At)D{J=O*Qs~@(7&qMjRd@JVRY$?WdOaz#8 zoFuKp)DwA?2=%@S^(YRMx}Tt)o0AGF*p%}h~4h{5p&A` zm2dzdTL;f)T{ngS?}OvI5cFdKE6y5Ie(CWD#0pEB3Jy0J#4Pfiu74LhlWT(3z_{|g zv_ABjP#lu4iECnGJH;Ck6Jgn-OofnXOo~JDHM20ypmcvbXkF&{A&V0Qx-2I7LM$A? zfP2H1k}a_;D)T4KnyuoHe9bJj$Hkz>hRl*MDi%?{E#RI6NWk_glO&h~P=~^bQ}Q#j z0G=c)cMj4$0Tdf}O@hgk`@n6+)&psW>EGGYiu^Y0}~xP+U$3?2Z%ky7K>s z8U>ZXbbSGyyr)>A<2EjL_?B`5zQ(5dJr~6*)k&BSD4#}4Kx0=NlCO!_A8hxJrBo#5 z7$MQlDsP)S&i`@QMBvg(eVQ{t)!Qt2Vn(7u(3ilP}U!aC4iU_h?#&G1VA(lGbGcRmzbNXscG5>j!BSmWB`g)SaKMRN#u~B d2^$!bLePu_j!8eDEdf9bi%C5B*0k~YDgfF$N@4&2 literal 0 HcmV?d00001 diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index b460607..701ac03 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,87 +17,82 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod +type TargetKind string + const ( - SidecarKind string = "Sidecar" - JobKind string = "Job" - BackupVolumes string = "Volumes" + Deployment TargetKind = "Deployment" + StatefulSet TargetKind = "StatefulSet" + Pod TargetKind = "Pod" +) + +// +kubebuilder:validation:Enum=Online;Snapshot;Job +type BackupType string + +const ( + SnapshotKind BackupType = "Snapshot" + OnlineKind BackupType = "Online" + JobKind BackupType = "Job" ) type Step struct { Name string `json:"name"` // +optional - Finalize *bool `json:"finalize,omitempty"` + Finalize *bool `json:"finalize"` } -type Hook struct { - Cmd string `json:"cmd"` - // +optional - Args []string `json:"args,omitempty"` +type TargetContainer struct { + Name string `json:"name"` + Paths []string `json:"paths,omitempty"` + // +kubebuilder:default:=2 + Retry int `json:"retry"` + Steps []Step `json:"steps,omitempty"` } type Target struct { - // +kubebuilder:validation:Enum=Sidecar;Job - Kind string `json:"kind"` - Name string `json:"name"` - // +optional - ContainerName string `json:"containerName"` - // +optional - ApiVersion string `json:"apiVersion,omitempty"` - // +optional - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` - // +optional - Paths []string `json:"paths,omitempty"` - // +optional - // +kubebuilder:validation:MinItems=1 - Steps []Step `json:"steps,omitempty"` - // +kubebuilder:default:=2 - Retry int `json:"retry,omitempty"` + BackupType `json:"backupType"` + TargetKind `json:"targetKind"` + TargetName string `json:"targetName"` + Containers []TargetContainer `json:"containers"` } type Keep struct { - Last int32 `json:"last,omitempty"` - Daily int32 `json:"daily,omitempty"` - Weekly int32 `json:"weekly,omitempty"` - Monthly int32 `json:"monthly,omitempty"` - Yearly int32 `json:"yearly,omitempty"` + Last int32 `json:"last"` + Daily int32 `json:"daily"` + Weekly int32 `json:"weekly"` + Monthly int32 `json:"monthly"` + Yearly int32 `json:"yearly"` } // BackupConfigurationSpec defines the desired state of BackupConfiguration type BackupConfigurationSpec struct { Repository string `json:"repository"` Image string `json:"image"` - - // +optional - Suspend *bool `json:"suspend,omitempty"` - - // +optional - Schedule string `json:"schedule,omitempty"` - // +kubebuilder:validation:MinItems=1 - Targets []Target `json:"targets"` - // +optional - Keep `json:"keep,omitempty"` + // +kubebuilder:default:=false + Suspend *bool `json:"suspend"` + Schedule string `json:"schedule"` + Keep `json:"keep"` + Targets []Target `json:"targets"` } // BackupConfigurationStatus defines the observed state of BackupConfiguration type BackupConfigurationStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` Suspended bool `json:"suspended"` ActiveCronJob bool `json:"activeCronJob"` ActiveSidecar bool `json:"activeSidecar"` } +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="bc" +//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` +//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` + // BackupConfiguration is the Schema for the backupconfigurations API -// +kubebuilder:object:root=true -// +kubebuilder:resource:shortName="bc" -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` -// +kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` type BackupConfiguration struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -106,7 +101,7 @@ type BackupConfiguration struct { Status BackupConfigurationStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // BackupConfigurationList contains a list of BackupConfiguration type BackupConfigurationList struct { diff --git a/api/v1alpha1/backupconfiguration_types.go~ b/api/v1alpha1/backupconfiguration_types.go~ new file mode 100644 index 0000000..823e177 --- /dev/null +++ b/api/v1alpha1/backupconfiguration_types.go~ @@ -0,0 +1,115 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod +type TargetKind string + +const ( + Deployment TargetKind = "Deployment" + StatefulSet TargetKind = "StatefulSet" + Pod TargetKind = "Pod" +) + +// +kubebuilder:validation:Enum=Online;Snapshot;Job +type BackupType string + +const ( + SnapshotKind BackupType = "Snapshot" + OnlineKind BackupType = "Online" + JobKind BackupType = "Job" +) + +type Step struct { + Name string `json:"name"` + // +optional + Finalize *bool `json:"finalize"` +} + +type TargetContainer struct { + Name string `json:"name"` + Paths []string `json:"paths,omitempty"` + // +kubebuilder:default:=2 + Retry int `json:"retry"` + Steps []Step `json:"steps,omitempty"` +} + +type Target struct { + BackupType `json:"backupType"` + TargetKind `json:"targetKind"` + TargetName string `json:"targetName"` + Containers []TargetContainer `json:"containers"` +} + +type Keep struct { + Last int32 `json:"last"` + Daily int32 `json:"daily"` + Weekly int32 `json:"weekly"` + Monthly int32 `json:"monthly"` + Yearly int32 `json:"yearly"` +} + +// BackupConfigurationSpec defines the desired state of BackupConfiguration +type BackupConfigurationSpec struct { + Repo string `json:"repo"` + Image string `json:"image"` + // +kubebuilder:default:=false + Suspend *bool `json:"suspend"` + Schedule string `json:"schedule"` + Keep `json:"keep"` + Targets []Target `json:"targets"` +} + +// BackupConfigurationStatus defines the observed state of BackupConfiguration +type BackupConfigurationStatus struct { + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` + Suspended bool `json:"suspended"` + ActiveCronJob bool `json:"activeCronJob"` + ActiveSidecar bool `json:"activeSidecar"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="bc" +//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` +//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` + +// BackupConfiguration is the Schema for the backupconfigurations API +type BackupConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupConfigurationSpec `json:"spec,omitempty"` + Status BackupConfigurationStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// BackupConfigurationList contains a list of BackupConfiguration +type BackupConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupConfiguration{}, &BackupConfigurationList{}) +} diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 8e6d2f1..0650e80 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,28 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type SessionState string + +const ( + New SessionState = "New" + Init SessionState = "Initializing" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + Finalize SessionState = "Finalizing" + Success SessionState = "Success" + Failure SessionState = "Failure" + Deleted SessionState = "Deleted" +) + +type TargetStatus struct { + Name string `json:"name"` + Kind string `json:"kind"` + SessionState `json:"state"` + SnapshotId string `json:"snapshotId"` + StartTime *metav1.Time `json:"startTime"` + Duration *metav1.Duration `json:"duration"` + Try int `json:"try"` +} // BackupSessionSpec defines the desired state of BackupSession type BackupSessionSpec struct { @@ -31,21 +51,15 @@ type BackupSessionSpec struct { // BackupSessionStatus defines the observed state of BackupSession type BackupSessionStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - // +optional - SessionState `json:"state,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Targets []TargetStatus `json:"target,omitempty"` - // +optional - Keep string `json:"keep,omitempty"` + SessionState `json:"state"` + StartTime *metav1.Time `json:"startTime"` + Targets []TargetStatus `json:"target"` + Keep string `json:"keep"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // +kubebuilder:resource:shortName="bs" -// +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name` // +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` // +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime` @@ -60,7 +74,7 @@ type BackupSession struct { Status BackupSessionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // BackupSessionList contains a list of BackupSession type BackupSessionList struct { diff --git a/api/v1alpha1/backupsession_types.go~ b/api/v1alpha1/backupsession_types.go~ new file mode 100644 index 0000000..06e0ca1 --- /dev/null +++ b/api/v1alpha1/backupsession_types.go~ @@ -0,0 +1,89 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type SessionState string + +const ( + New SessionState = "New" + Init SessionState = "Initializing" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + Finalize SessionState = "Finalizing" + Success SessionState = "Success" + Failure SessionState = "Failure" + Deleted SessionState = "Deleted" +) + +type TargetStatus struct { + Name string `json:"name"` + Kind string `json:"kind"` + SessionState `json:"state"` + SnapshotId string `json:"snapshotId"` + StartTime *metav1.Time `json:"startTime"` + Duration *metav1.Duration `json:"duration"` + Try int `json:"try"` +} + +// BackupSessionSpec defines the desired state of BackupSession +type BackupSessionSpec struct { + Ref corev1.ObjectReference `json:"ref"` +} + +// BackupSessionStatus defines the observed state of BackupSession +type BackupSessionStatus struct { + SessionState `json:"state"` + StartTime *metav1.Time `json:"startTime"` + Targets []TargetStatus `json:"target"` + Keep string `json:"keep"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="bs" +// +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name` +// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime` +// +kubebuilder:printcolumn:name="Keep",type=string,JSONPath=`.status.keep` + +// BackupSession is the Schema for the backupsessions API +type BackupSession struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupSessionSpec `json:"spec,omitempty"` + Status BackupSessionStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// BackupSessionList contains a list of BackupSession +type BackupSessionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupSession `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupSession{}, &BackupSessionList{}) +} diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index d3b88d4..9c21ad6 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -1,47 +1,13 @@ package v1alpha1 -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type SessionState string - const ( - New SessionState = "New" - Init SessionState = "Initializing" - Running SessionState = "Running" - Waiting SessionState = "Waiting" - Finalize SessionState = "Finalizing" - Success SessionState = "Success" - Failure SessionState = "Failure" - Deleted SessionState = "Deleted" - // Environment variables used by the sidecar container - RESTORE_ANNOTATION = "restore" // the name of the sidecar container SIDECARCONTAINER_NAME string = "formol" // the name of the container we backup when there are more than 1 container in the pod TARGETCONTAINER_TAG string = "FORMOL_TARGET" // Used by both the backupsession and restoresession controllers to identified the target deployment TARGET_NAME string = "TARGET_NAME" - // Used by restoresession controller - RESTORESESSION_NAMESPACE string = "RESTORESESSION_NAMESPACE" - RESTORESESSION_NAME string = "RESTORESESSION_NAME" // Used by the backupsession controller POD_NAME string = "POD_NAME" POD_NAMESPACE string = "POD_NAMESPACE" ) - -type TargetStatus struct { - Name string `json:"name"` - Kind string `json:"kind"` - // +optional - SessionState `json:"state,omitempty"` - // +optional - SnapshotId string `json:"snapshotId,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Duration *metav1.Duration `json:"duration,omitempty"` - // +optional - Try int `json:"try,omitemmpty"` -} diff --git a/api/v1alpha1/common.go~ b/api/v1alpha1/common.go~ new file mode 100644 index 0000000..ebcd0cd --- /dev/null +++ b/api/v1alpha1/common.go~ @@ -0,0 +1,6 @@ +package v1alpha1 + +const ( + SIDECARCONTAINER_NAME string = "formol" + TARGETCONTAINER_TAG string = "FORMOL_TARGET" +) diff --git a/api/v1alpha1/function_types.go b/api/v1alpha1/function_types.go index f79d058..0f281ad 100644 --- a/api/v1alpha1/function_types.go +++ b/api/v1alpha1/function_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,20 +21,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// FunctionStatus defines the observed state of Function +type FunctionStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // Function is the Schema for the functions API type Function struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec corev1.Container `json:"spec"` + Spec corev1.Container `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // FunctionList contains a list of Function type FunctionList struct { diff --git a/api/v1alpha1/function_types.go~ b/api/v1alpha1/function_types.go~ new file mode 100644 index 0000000..32607dd --- /dev/null +++ b/api/v1alpha1/function_types.go~ @@ -0,0 +1,64 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// FunctionSpec defines the desired state of Function +type FunctionSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of Function. Edit function_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// FunctionStatus defines the observed state of Function +type FunctionStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Function is the Schema for the functions API +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// FunctionList contains a list of Function +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Function{}, &FunctionList{}) +} diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index 029f41b..7d7aee0 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index e66dea1..156d9e5 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,12 +17,14 @@ limitations under the License. package v1alpha1 import ( + "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - type S3 struct { Server string `json:"server"` Bucket string `json:"bucket"` @@ -31,26 +33,24 @@ type S3 struct { } type Backend struct { - S3 `json:"s3"` + // +optional + S3 *S3 `json:"s3,omitempty"` + // +optional + Nfs *string `json:"nfs,omitempty"` } // RepoSpec defines the desired state of Repo type RepoSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of Repo. Edit Repo_types.go to remove/update Backend `json:"backend"` RepositorySecrets string `json:"repositorySecrets"` } // RepoStatus defines the observed state of Repo type RepoStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // Repo is the Schema for the repoes API type Repo struct { @@ -61,7 +61,7 @@ type Repo struct { Status RepoStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // RepoList contains a list of Repo type RepoList struct { @@ -73,3 +73,37 @@ type RepoList struct { func init() { SchemeBuilder.Register(&Repo{}, &RepoList{}) } + +func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { + env := []corev1.EnvVar{} + if repo.Spec.Backend.S3 != nil { + url := fmt.Sprintf("s3:http://%s/%s/%s-%s", + repo.Spec.Backend.S3.Server, + repo.Spec.Backend.S3.Bucket, + strings.ToUpper(backupConf.Namespace), + strings.ToLower(backupConf.Name)) + env = append(env, corev1.EnvVar{ + Name: "RESTIC_REPOSITORY", + Value: url, + }) + for _, key := range []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "RESTIC_PASSWORD", + } { + env = append(env, corev1.EnvVar{ + Name: key, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: repo.Spec.RepositorySecrets, + }, + Key: key, + }, + }, + }) + } + } + + return env +} diff --git a/api/v1alpha1/repo_types.go~ b/api/v1alpha1/repo_types.go~ new file mode 100644 index 0000000..868a06c --- /dev/null +++ b/api/v1alpha1/repo_types.go~ @@ -0,0 +1,109 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type S3 struct { + Server string `json:"server"` + Bucket string `json:"bucket"` + // +optional + Prefix string `json:"prefix,omitempty"` +} + +type Backend struct { + // +optional + S3 *S3 `json:"s3,omitempty"` + // +optional + Nfs *string `json:"nfs,omitempty"` +} + +// RepoSpec defines the desired state of Repo +type RepoSpec struct { + Backend `json:"backend"` + RepositorySecrets string `json:"repositorySecrets"` +} + +// RepoStatus defines the observed state of Repo +type RepoStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Repo is the Schema for the repoes API +type Repo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RepoSpec `json:"spec,omitempty"` + Status RepoStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RepoList contains a list of Repo +type RepoList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Repo `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Repo{}, &RepoList{}) +} + +func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { + env := []corev1.EnvVar{} + if repo.Spec.Backend.S3 { + url := fmt.Sprintf("s3:http://%s/%s/%s-%s", + repo.Spec.Backend.S3.Server, + repo.Spec.Backend.S3.Bucket, + strings.ToUpper(backupConf.Namespace), + stringsToLower(backupConf.Name)) + env = append(env, corev1.EnvVar{ + Name: "RESTIC_REPOSITORY", + Value: url, + }) + for _, key := range []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "RESTIC_PASSWORD", + } { + env = append(env, corev1.EnvVar{ + Name: key, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: repo.Spec.RepositorySecrets, + }, + Key: key, + }, + }, + }) + } + } + + return env +} diff --git a/api/v1alpha1/restoresession_types.go b/api/v1alpha1/restoresession_types.go index 00aa941..462bd3c 100644 --- a/api/v1alpha1/restoresession_types.go +++ b/api/v1alpha1/restoresession_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,41 +17,29 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - //"k8s.io/apimachinery/pkg/types" ) -type BackupSessionRef struct { - // +optional - Ref corev1.ObjectReference `json:"ref,omitempty"` - // +optional - Spec BackupSessionSpec `json:"spec,omitempty"` - // +optional - Status BackupSessionStatus `json:"status,omitempty"` -} +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // RestoreSessionSpec defines the desired state of RestoreSession type RestoreSessionSpec struct { - BackupSessionRef `json:"backupSession"` - //Ref string `json:"backupSessionRef"` - // +optional - //Targets []TargetStatus `json:"target,omitempty"` + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of RestoreSession. Edit restoresession_types.go to remove/update + Foo string `json:"foo,omitempty"` } // RestoreSessionStatus defines the observed state of RestoreSession type RestoreSessionStatus struct { - // +optional - SessionState `json:"state,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Targets []TargetStatus `json:"target,omitempty"` + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file } -// +kubebuilder:object:root=true -// +kubebuilder:resource:shortName="rs" -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // RestoreSession is the Schema for the restoresessions API type RestoreSession struct { @@ -62,7 +50,7 @@ type RestoreSession struct { Status RestoreSessionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // RestoreSessionList contains a list of RestoreSession type RestoreSessionList struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c96ed6f..3cd2fac 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,15 +22,23 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backend) DeepCopyInto(out *Backend) { *out = *in - out.S3 = in.S3 + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3) + **out = **in + } + if in.Nfs != nil { + in, out := &in.Nfs, &out.Nfs + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. @@ -109,6 +118,7 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) { *out = new(bool) **out = **in } + out.Keep = in.Keep if in.Targets != nil { in, out := &in.Targets, &out.Targets *out = make([]Target, len(*in)) @@ -116,7 +126,6 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.Keep = in.Keep } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfigurationSpec. @@ -207,24 +216,6 @@ func (in *BackupSessionList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupSessionRef) DeepCopyInto(out *BackupSessionRef) { - *out = *in - out.Ref = in.Ref - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSessionRef. -func (in *BackupSessionRef) DeepCopy() *BackupSessionRef { - if in == nil { - return nil - } - out := new(BackupSessionRef) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSessionSpec) DeepCopyInto(out *BackupSessionSpec) { *out = *in @@ -273,6 +264,7 @@ func (in *Function) DeepCopyInto(out *Function) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. @@ -326,21 +318,16 @@ func (in *FunctionList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Hook) DeepCopyInto(out *Hook) { +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hook. -func (in *Hook) DeepCopy() *Hook { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { if in == nil { return nil } - out := new(Hook) + out := new(FunctionStatus) in.DeepCopyInto(out) return out } @@ -365,7 +352,7 @@ func (in *Repo) DeepCopyInto(out *Repo) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } @@ -422,7 +409,7 @@ func (in *RepoList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoSpec) DeepCopyInto(out *RepoSpec) { *out = *in - out.Backend = in.Backend + in.Backend.DeepCopyInto(&out.Backend) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoSpec. @@ -455,8 +442,8 @@ func (in *RestoreSession) DeepCopyInto(out *RestoreSession) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + out.Spec = in.Spec + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSession. @@ -512,7 +499,6 @@ func (in *RestoreSessionList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionSpec) DeepCopyInto(out *RestoreSessionSpec) { *out = *in - in.BackupSessionRef.DeepCopyInto(&out.BackupSessionRef) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionSpec. @@ -528,17 +514,6 @@ func (in *RestoreSessionSpec) DeepCopy() *RestoreSessionSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionStatus) DeepCopyInto(out *RestoreSessionStatus) { *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.Targets != nil { - in, out := &in.Targets, &out.Targets - *out = make([]TargetStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionStatus. @@ -589,21 +564,9 @@ func (in *Step) DeepCopy() *Step { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Target) DeepCopyInto(out *Target) { *out = *in - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Steps != nil { - in, out := &in.Steps, &out.Steps - *out = make([]Step, len(*in)) + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]TargetContainer, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -620,6 +583,33 @@ func (in *Target) DeepCopy() *Target { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetContainer) DeepCopyInto(out *TargetContainer) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetContainer. +func (in *TargetContainer) DeepCopy() *TargetContainer { + if in == nil { + return nil + } + out := new(TargetContainer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { *out = *in @@ -629,7 +619,7 @@ func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { } if in.Duration != nil { in, out := &in.Duration, &out.Duration - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } } diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml deleted file mode 100644 index 58db114..0000000 --- a/config/certmanager/certificate.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a certificate CR. -# More document can be found at https://docs.cert-manager.io -# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for -# breaking changes -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: selfsigned-issuer - namespace: system -spec: - selfSigned: {} ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize - dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml deleted file mode 100644 index bebea5a..0000000 --- a/config/certmanager/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- certificate.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml deleted file mode 100644 index 90d7c31..0000000 --- a/config/certmanager/kustomizeconfig.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -nameReference: -- kind: Issuer - group: cert-manager.io - fieldSpecs: - - kind: Certificate - group: cert-manager.io - path: spec/issuerRef/name - -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index d08e34e..151eb0e 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,32 +2,31 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: -- bases/formol.desmojim.fr_functions.yaml - bases/formol.desmojim.fr_backupconfigurations.yaml -- bases/formol.desmojim.fr_backupsessions.yaml +- bases/formol.desmojim.fr_functions.yaml - bases/formol.desmojim.fr_repoes.yaml +- bases/formol.desmojim.fr_backupsessions.yaml - bases/formol.desmojim.fr_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizeresource +#+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- patches/webhook_in_tasks.yaml -#- patches/webhook_in_functions.yaml #- patches/webhook_in_backupconfigurations.yaml -#- patches/webhook_in_backupsessions.yaml +#- patches/webhook_in_functions.yaml #- patches/webhook_in_repoes.yaml +#- patches/webhook_in_backupsessions.yaml #- patches/webhook_in_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizewebhookpatch +#+kubebuilder:scaffold:crdkustomizewebhookpatch -# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_functions.yaml #- patches/cainjection_in_backupconfigurations.yaml -#- patches/cainjection_in_backupsessions.yaml +#- patches/cainjection_in_functions.yaml #- patches/cainjection_in_repoes.yaml +#- patches/cainjection_in_backupsessions.yaml #- patches/cainjection_in_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch +#+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index 6f83d9a..ec5c150 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -4,13 +4,15 @@ nameReference: version: v1 fieldSpecs: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/name + path: spec/conversion/webhook/clientConfig/service/name namespace: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/namespace + path: spec/conversion/webhook/clientConfig/service/namespace create: false varReference: diff --git a/config/crd/patches/cainjection_in_backupconfigurations.yaml b/config/crd/patches/cainjection_in_backupconfigurations.yaml index ba16473..30c2d80 100644 --- a/config/crd/patches/cainjection_in_backupconfigurations.yaml +++ b/config/crd/patches/cainjection_in_backupconfigurations.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_backupsessions.yaml b/config/crd/patches/cainjection_in_backupsessions.yaml index f395951..d89ee98 100644 --- a/config/crd/patches/cainjection_in_backupsessions.yaml +++ b/config/crd/patches/cainjection_in_backupsessions.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_functions.yaml b/config/crd/patches/cainjection_in_functions.yaml index c8c1091..faa8295 100644 --- a/config/crd/patches/cainjection_in_functions.yaml +++ b/config/crd/patches/cainjection_in_functions.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_repoes.yaml b/config/crd/patches/cainjection_in_repoes.yaml new file mode 100644 index 0000000..c8dd2d8 --- /dev/null +++ b/config/crd/patches/cainjection_in_repoes.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: repoes.formol.desmojim.fr diff --git a/config/crd/patches/cainjection_in_restoresessions.yaml b/config/crd/patches/cainjection_in_restoresessions.yaml index cfed67d..d8747fa 100644 --- a/config/crd/patches/cainjection_in_restoresessions.yaml +++ b/config/crd/patches/cainjection_in_restoresessions.yaml @@ -1,8 +1,7 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: restoresessions.formol.desmojim.fr.desmojim.fr + name: restoresessions.formol.desmojim.fr diff --git a/config/crd/patches/webhook_in_backupconfigurations.yaml b/config/crd/patches/webhook_in_backupconfigurations.yaml index e08ff07..c882396 100644 --- a/config/crd/patches/webhook_in_backupconfigurations.yaml +++ b/config/crd/patches/webhook_in_backupconfigurations.yaml @@ -1,20 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: backupconfigurations.formol.desmojim.fr spec: - preserveUnknownFields: false conversion: strategy: Webhook webhook: - conversionReviewVersions: ["v1", "v1beta1", "v1alpha1"] clientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== service: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_backupsessions.yaml b/config/crd/patches/webhook_in_backupsessions.yaml index 7ae00b1..1b94114 100644 --- a/config/crd/patches/webhook_in_backupsessions.yaml +++ b/config/crd/patches/webhook_in_backupsessions.yaml @@ -1,18 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: backupsessions.formol.desmojim.fr spec: - preserveUnknownFields: false conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_functions.yaml b/config/crd/patches/webhook_in_functions.yaml index e969e6f..0e4e73b 100644 --- a/config/crd/patches/webhook_in_functions.yaml +++ b/config/crd/patches/webhook_in_functions.yaml @@ -1,5 +1,4 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -7,11 +6,11 @@ metadata: spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_repoes.yaml b/config/crd/patches/webhook_in_repoes.yaml new file mode 100644 index 0000000..898f6c1 --- /dev/null +++ b/config/crd/patches/webhook_in_repoes.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: repoes.formol.desmojim.fr +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_restoresessions.yaml b/config/crd/patches/webhook_in_restoresessions.yaml index 1dc3e58..fa17921 100644 --- a/config/crd/patches/webhook_in_restoresessions.yaml +++ b/config/crd/patches/webhook_in_restoresessions.yaml @@ -1,17 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: restoresessions.formol.desmojim.fr.desmojim.fr + name: restoresessions.formol.desmojim.fr spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index dbee156..961519a 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -16,21 +16,23 @@ bases: - ../crd - ../rbac - ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -- ../prometheus +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus patchesStrategicMerge: - # Protect the /metrics endpoint by putting it behind auth. - # If you want your controller-manager to expose the /metrics - # endpoint w/o any authn/z, please comment the following line. +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. - manager_auth_proxy_patch.yaml -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in + + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- manager_webhook_patch.yaml @@ -46,7 +48,7 @@ vars: # objref: # kind: Certificate # group: cert-manager.io -# version: v1alpha2 +# version: v1 # name: serving-cert # this name should match the one in certificate.yaml # fieldref: # fieldpath: metadata.namespace @@ -54,7 +56,7 @@ vars: # objref: # kind: Certificate # group: cert-manager.io -# version: v1alpha2 +# version: v1 # name: serving-cert # this name should match the one in certificate.yaml #- name: SERVICE_NAMESPACE # namespace of the service # objref: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index e44a8d4..b751266 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -1,4 +1,4 @@ -# This patch inject a sidecar container which is a HTTP proxy for the +# This patch inject a sidecar container which is a HTTP proxy for the # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. apiVersion: apps/v1 kind: Deployment @@ -8,18 +8,48 @@ metadata: spec: template: spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux containers: - name: kube-rbac-proxy - image: quay.io/brancz/kube-rbac-proxy:v0.8.0-arm + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" - "--logtostderr=true" - - "--v=10" + - "--v=0" ports: - containerPort: 8443 + protocol: TCP name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi - name: manager args: - - "--metrics-addr=127.0.0.1:8080" - - "--enable-leader-election" + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..f6f5891 --- /dev/null +++ b/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml deleted file mode 100644 index 738de35..0000000 --- a/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml deleted file mode 100644 index 7e79bf9..0000000 --- a/config/default/webhookcainjection_patch.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# This patch add annotation to admission webhook config and -# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - name: mutating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml deleted file mode 100644 index 881467f..0000000 --- a/config/manager/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -resources: -- manager.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -images: -- name: controller - newName: desmo999r/formolcontroller - newTag: 0.3.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml deleted file mode 100644 index b6c85a5..0000000 --- a/config/manager/manager.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager -spec: - selector: - matchLabels: - control-plane: controller-manager - replicas: 1 - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - command: - - /manager - args: - - --enable-leader-election - image: controller:latest - name: manager - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi - terminationGracePeriodSeconds: 10 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml deleted file mode 100644 index ed13716..0000000 --- a/config/prometheus/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml deleted file mode 100644 index 9b8047b..0000000 --- a/config/prometheus/monitor.yaml +++ /dev/null @@ -1,16 +0,0 @@ - -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-monitor - namespace: system -spec: - endpoints: - - path: /metrics - port: https - selector: - matchLabels: - control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index 7d62534..0000000 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4..0000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index 48ed1e4..0000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 6cf656b..0000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager diff --git a/config/rbac/backupconfiguration_editor_role.yaml b/config/rbac/backupconfiguration_editor_role.yaml deleted file mode 100644 index 423efa0..0000000 --- a/config/rbac/backupconfiguration_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit backupconfigurations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupconfiguration-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations/status - verbs: - - get diff --git a/config/rbac/backupconfiguration_viewer_role.yaml b/config/rbac/backupconfiguration_viewer_role.yaml deleted file mode 100644 index 60fef40..0000000 --- a/config/rbac/backupconfiguration_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view backupconfigurations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupconfiguration-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations/status - verbs: - - get diff --git a/config/rbac/backupsession_editor_role.yaml b/config/rbac/backupsession_editor_role.yaml deleted file mode 100644 index d884f01..0000000 --- a/config/rbac/backupsession_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit backupsessions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupsession-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions/status - verbs: - - get diff --git a/config/rbac/backupsession_viewer_role.yaml b/config/rbac/backupsession_viewer_role.yaml deleted file mode 100644 index 8817113..0000000 --- a/config/rbac/backupsession_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view backupsessions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupsession-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions/status - verbs: - - get diff --git a/config/rbac/function_editor_role.yaml b/config/rbac/function_editor_role.yaml deleted file mode 100644 index 963b8c2..0000000 --- a/config/rbac/function_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit functions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: function-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - functions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - functions/status - verbs: - - get diff --git a/config/rbac/function_viewer_role.yaml b/config/rbac/function_viewer_role.yaml deleted file mode 100644 index 27bcc02..0000000 --- a/config/rbac/function_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view functions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: function-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - functions - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - functions/status - verbs: - - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml deleted file mode 100644 index dbcbe1b..0000000 --- a/config/rbac/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -resources: -- role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -#- auth_proxy_service.yaml -#- auth_proxy_role.yaml -#- auth_proxy_role_binding.yaml -#- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml deleted file mode 100644 index eaa7915..0000000 --- a/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index eed1690..0000000 --- a/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml deleted file mode 100644 index 8f26587..0000000 --- a/config/rbac/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml b/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml deleted file mode 100644 index f6782a5..0000000 --- a/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: formol.desmojim.fr.desmojim.fr/v1alpha1 -kind: RestoreSession -metadata: - name: restoresession-sample -spec: - # Add fields here - foo: bar diff --git a/config/samples/formol_v1alpha1_backupconfiguration.yaml b/config/samples/formol_v1alpha1_backupconfiguration.yaml index cc33bb6..fe70b83 100644 --- a/config/samples/formol_v1alpha1_backupconfiguration.yaml +++ b/config/samples/formol_v1alpha1_backupconfiguration.yaml @@ -1,17 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: BackupConfiguration metadata: - name: backupconf-nginx + labels: + app.kubernetes.io/name: backupconfiguration + app.kubernetes.io/instance: backupconfiguration-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: backupconfiguration-sample spec: - repository: - name: repo-minio - schedule: "*/1 * * * *" - target: - apiVersion: v1 - kind: Deployment - name: nginx-deployment - volumeMounts: - - name: empty - mountPath: /data - paths: - - /data + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_backupsession.yaml b/config/samples/formol_v1alpha1_backupsession.yaml index f953734..77128f4 100644 --- a/config/samples/formol_v1alpha1_backupsession.yaml +++ b/config/samples/formol_v1alpha1_backupsession.yaml @@ -1,8 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: BackupSession metadata: - name: backupsession-nginx + labels: + app.kubernetes.io/name: backupsession + app.kubernetes.io/instance: backupsession-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: backupsession-sample spec: - # Add fields here - ref: - name: backupconf-nginx + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_function.yaml b/config/samples/formol_v1alpha1_function.yaml index 18c29a3..df74db6 100644 --- a/config/samples/formol_v1alpha1_function.yaml +++ b/config/samples/formol_v1alpha1_function.yaml @@ -1,11 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: Function metadata: - name: function-backup-pvc - namespace: backup + labels: + app.kubernetes.io/name: function + app.kubernetes.io/instance: function-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: function-sample spec: - name: function-backup-pvc - image: desmo999r/formolcli - args: - - backup - - volume + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_repo.yaml b/config/samples/formol_v1alpha1_repo.yaml index f7942a0..e1eb2a3 100644 --- a/config/samples/formol_v1alpha1_repo.yaml +++ b/config/samples/formol_v1alpha1_repo.yaml @@ -1,11 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: Repo metadata: - name: repo-minio - namespace: backup + labels: + app.kubernetes.io/name: repo + app.kubernetes.io/instance: repo-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: repo-sample spec: - backend: - s3: - server: raid5.desmojim.fr:9000 - bucket: testbucket2 - repositorySecrets: secret-minio + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_restoresession.yaml b/config/samples/formol_v1alpha1_restoresession.yaml new file mode 100644 index 0000000..24e7093 --- /dev/null +++ b/config/samples/formol_v1alpha1_restoresession.yaml @@ -0,0 +1,12 @@ +apiVersion: formol.desmojim.fr/v1alpha1 +kind: RestoreSession +metadata: + labels: + app.kubernetes.io/name: restoresession + app.kubernetes.io/instance: restoresession-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: restoresession-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/test_deployment.yaml b/config/samples/test_deployment.yaml deleted file mode 100644 index 8b54688..0000000 --- a/config/samples/test_deployment.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment - labels: - app: nginx -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx:1.14.2 - ports: - - containerPort: 80 - volumeMounts: - - name: empty - mountPath: /data - volumes: - - name: empty - emptyDir: {} - diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf2613..0000000 --- a/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3..0000000 --- a/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml deleted file mode 100644 index 31e0f82..0000000 --- a/config/webhook/service.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager diff --git a/controllers/.backupconfiguration_controller.go.un~ b/controllers/.backupconfiguration_controller.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..beb12d25232aea0bde1357d84862cb0ac41688a4 GIT binary patch literal 35950 zcmeI5+ix4k6^CW{qR>jyCcee-C2RRc$_iy!w&U7WV);@#jT;w{HLK!5^8fGP#jqUcMX3go3P?fK2lA&*FkyAnIj z%N$@hXK%B!=R32rJ3BMHUY`HaOTmTkAASA9uU()0%QJm{_|B!1`|H#9r~dQH-M{_x zuYdfc{;!|?c;KU7-|Fk@yF-CxJt_SZF6$MDIrMoSpWD7+9jzJcoBmMmfCI1!f&<@B z8vXp2q_Krp+Bmxf8kVF(-W7*kCw&0R-qMy26^_2XPamh{cPPAv4J1IaF*iDzyQ4O} zO^cicHXXsH+>GnptXI#6!EA9VcH?@jIO)aZ1%Iks@;x_lCY(5``znFA=pdvIK>`n} zv>m@v^PNSt$D6bkr-5LPz3LQ^AJ?P6@#3gbe9^DfEAir;5yy`rbqp>MDMN6~spOdA zm2j?jF^a+{PkW9yL-oL&srXJDI&P`t)O7Z|Fo@l9;73CRmD=l+0&)RCYR8dUc@}4L zPER;dak5m>mzHl{df|-PVc&!klq=5SiF?Ym;mRA3kQTT(a*OrWwPA!_1g@woNpZuf zq{FqpKmo4(XXV=K64wS=xb{ydY@%>1w`#C-)X7{f7p>m8=*ZnTawS)$?VEGCJGwB? zQi1kvI#{=|MLVyjL$%Yv$u?48fdk&70B5#N2O}62Z)w}Ka=QM+>F(0m?o;?F1t93p z<aOH{7w}xyI8=d9EJ0aXAbY@hCf@Ef2H_>JMY)3 zuIHDajW~yzx9Ea)2snVs8pRMe2QEX^Rn{S3k{%?BL+osI2zU-00-khSR;yV(LZb!B z90e9&M%orFTcnpb3))>0Ee^m9TA*EXsRhc-BKN=yw5XHqV-t+fi)2B&ryE*&R-aDZ zzgBxY2FO8?cR;pag+Y2!vw+>Fz^+dop?)QI`367^oh`vKC-OdAGM) zwusT#TiTJl%IK$GAcFZJK=*Zi5x$-uD-@p#X2blD7tUAR$alg@$#H{{6Zo$VQQJ%N zDT$Vv3$$grxkQ`95`sZ*;EpP~Uopl1Xh|!pwz?C;#@s}>%I7Jt zd;}?J0cXpWtzkSSwlj+DhUKrfI5F6cq0&X0=W6R*aQr+{(E`RJykK8}*b=5Zg78+f)wyQBa9lKGT;o4 zQec6j+@ql0hOKk3Sxm*`9DkS2ahFbXp8`9_2z@L_Zq@1Z`N0YZAEUqmyl4}}2LxN* zVFaV%E$teRNYOOSP0}du`w8hXrX9XI>N_{cZ4sFT)e{tuj3o&p7!@L!lO$pKrz!jf zXH_42u;wJM(a->CD+Lw+#6Gsci7jF@CTN>GLHmHty2|1Qa@#~^K@}wJjZ_%LoJu4I zyOA8qd>;k4?INRq3Xry`K^VoHng()T*7^|Kc8Gj}=_oJThgG)38e>>eJy}qUijDQ_ zq^s|q#4-6KyD_Q#@D{`7#}~A*9DRg!rbnFEjpqDVHS4>Yb+f?>Y1IRp0$DG|KuFY+DG6EPY4shPpoeUgY=|Ie~@}+--sWyZ-}Nd zxWX=R`EV2K8p0qwsnQ*!p4l~O57;$?XTTG7i>rs1SkDj!=}DE|Aa&^(d*zE`>(fT` zfIXwx+73t9BQ73JVjV*mq$gE6gVZxS#w!olF&bZC;0JrfwZlioD5zI3`+6cQvldnQ zg4Cs749OeDQEh&l3i%HiBamAkshJ89o1cijSt=xTU^-+qy2u)WI5 z_PK{GvBnsdL>B@`Es4}Gc%og4##rtCo#Hu=@O**RvH1^@zLmyEJD2@R)sGxJ8eVYOeb)L4l;yVYuq z&eq&hjS!nic9lcJRX0f*PwwA})#r)1D69vid||}#DrGuD@#@UWz8635-$tHxCkf;l zL&{D!FC4F;JH+#Cqpwn*KbqadWF+G(kQ_+@3bOO!c#$(I( z73u14G5&nD;>NyOA|T}oB{QItA@!hAJ6RaB0{FA_%9O86Tu6?o>9w`w1CX|{*!MFx za^f>N7+GYtH!B}&v9d@wo0<2lXl5!5@Zg=v8LD9kuG=gqpSd};yThE6x!%mkMMpa} z5?skIHz|AID_iAwGFJ$gUH&3D-k3o zYny+o+UB98wt?x(6uwOX2zUfNum2XUV{|};8i@rMuTij_s@Nh%V{hq`Z^+l}DLUIr z6c}Be9{g7G+y{JzM2>+LIMH{`mP8oAs1VMVyWymz?lr33d z5VL9;SsEqZR%p8q1!AxOIV>^{$QGoaERDo$1)Gg4I35vs2V=`s7{sia#`S1B z*RCV?xVQo%8iH7W*kWaC7?0DiQaeLyaqTu9lc|Ctx_PjGu%*h@FdnB-Ew)qbGRT&p z0wVe_vjDJ#%GNL*r$K$H9qP&hrVpdIKie4Qp?Gh`Fg=eHdi2gh* zz-y7RHH^n;NYSgK1?ft&8!%B2MCUjb(6vO_8ph)^qUhDpLUe`cDi|mL9uxTnTnkhf z#H^YIw9pQ;^YkOk6a2L(b;{?9rBFjK)aSDT&Rnu@D zZZDpl<_2P#poh`DEFf!XvNepyX*3VD)9f(O6T<|&lOoRmYhen5m{rqY9&Cr%K4}-T z1Ut+w!ve0BC0oOIoW}A%JIl8D*BB+xof0_)R*O;?#H^Zz5;JhMyrHztq{k#d?u#O~ zplV49gP2v*NMZ`07LqMfA~HyTLpMqmK(!#*8ph)^kTlCnE6C+jTQWy*J0r3Srk0~H zh*_05R=V{ra@sjAo1v630^65FR)N%F6b3P?reS=t9iyCQmMMbTgvcp~T8hFTW>umX zsKS^_I#k2-=xzNP1$3yUS+J7-B;yU}uFE~D!Ri?-YXL{>X9qB|MU2MY(%&XK-AI|; z-u4nGCVJ)q2x!lWd;%!?Wm|~CDCSgxcv(W^a@Gmr`6vv&63%ph2xwzgXwBF#Aww`25tTAp_l8%2@q=82B0W5n!kOPfDkMngX-Y^pAY8si9^RNm7H3+D^cw&G literal 0 HcmV?d00001 diff --git a/controllers/.backupconfiguration_controller_cronjob.go.un~ b/controllers/.backupconfiguration_controller_cronjob.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..e7f4016c07d2360a9728533c190df255a5a3b6b4 GIT binary patch literal 7322 zcmeI1?`s@I7{@n1+PIBBuxe{#)akY0-FYrXEY`LWYa-I5c#unjz6zV!xyz=T*<)@t zZ3!ZMBMANpg71CrTVE9a0Q*WX0Y#BQUx{GpJAJ;hJDW|7aP9Tp?E~M;&dlyi_W3;X zYi}jo{N{?BkN#gU0IsNIEUptP| zA-(Im?wZf9Cf&JMw!N12b<*=%Nv|tpAObea>c#4=;I@(;4oG995z^Bn7*Z>T@BZE# zN9}~O`Xg$|M$^RnMb(R>7m)jh%ru2eX+!V;|M}K}!7<|e4YwaBoggJ7pg88P5^&|s zi%4iOr((GfuT^VjZW?&F1GKFHWg#j%0 zB(;HSuz~^T>$o7GM~gsTrZP~sK+n2in+c`ZIgH7VgwA<>KofRH#fc0I)hHBFd(E{C z!S$lJv7L=q;5tQ@`Y!T{C} z-gk<4C3V&%S-kJ>8@+Q%_*(qm0M4f6^JcW4oMkeMm>tEyI#GBLAiqF5hO!bfyfA>} zo}@N#jg=RMN%|}dIjbsG$3`hV*@m4&`8tf{k|3BojD)JO^qlkIbuE(Z_<7jKfkGkvrjCVkbHB8&F%4cju!x54-M9NE~izq0g16~-WQFQ!OjE*Vl zY?@@FWAovn!=8c%iwVTQWZ|t5A63$OC@W(EUKplPOx!auF`iEGuTf_;l8uRteq-Wd z5ImlMKmcIgvWDOYN%+sofPfQ*YJ6wwHxm$t(tuc}-Zn@!AXIiMVA^EZ+3b%PJe9d1r^*y#t4^6vn?^>w& zdiGSd+gZNR6{h~A>vF#**aPp?lu;=A~l5;3MxW^7kB|7Uf>xCA@PKGK#0)tBZ5*>2?62(UO`Aa!1?CR**zPtcei_e z#mgLNcIM9gn|r={=KszvE-ioixysS%UvndqGspk&$tOpzojUTZUta(DV_$pezHh#g zzxKk*wXgnb*H6Fk!~f)Rx!1%U%;)p8P?)j@IPVed5N&?GSfJ+#c@}CO%hM|I@YY+a$0fckWFhL~a#F z6vBxQx{&<*SERK^BZEJ<21eyj)$F@Xcu~STxlC#O{x=%{|Y2<*MtmvjC+zs8mH>0~(4g_6d zhQLsm3Jplup$Y&w?gK;wdqWE;1Wn%J&wrl@lT%VLk(~1S)XmU9^b$K=6-A-B7+<^r z$ypUgi2xAPBWwg{Xd#85e}#rtHEf|Bw9q&RdwE%0NA*&7T$LSaa9ZdQg~0JW;;428 zb|@i@48ZQjWy_BC!#(0YFAjD)q)3vzd+V;hzN?cz#vvjr~?Sp7KD%NUo<-%A=n*DAU z(GyXrT8~#J2`?7wYd)8SE$@H|spDoeE&^+mkVXb!{VU5FGei9dtoJ(B`MKh9d^n0O zR_n{v^6F%Er!vam2Se{6Q)$jP~dnP=u^=g%$79Gmc1xiJ~83Wx(%)ka$1Vx>&% zX2LzOK9FEtES8(XwQZ+xR)m@}Yhu9(!cc~)M$-5e;rr!6-4MRf%QA0s2(M^o40On- zRVY)T04Tx)K+iYxS19QcB%1@#j?xRc%VhR3M46;Lt8^NlE%06HnX+2 zEj{v7*K!n}Qt-~!phlzKjC~QO)gnz9{HVD;OGL0Zw2(puVfJUsOq+b#KKz7?LoHWV z!tM%$Qux-r=_jur=~u) zh1A>7fvm+pN#z9y(9y(JNNVUOwh(%&REP0>=JDW9t=x?Uh1#6a%rLt69;-l|~_K|iOcj@r| z0txa}YG~chwiif3IdnCtM$-6}9-8e!Z+*9=r+rCZl!IyhTI17wuaY8u->$=XjZEF>8suC3OX5RiJqT`VmAS=Sh*CNK6-}O*5pX0+KMb)jN$_Q_R+L- za`YXbai=DI$N4LAmBhC7<=|y_NSq2U3jG}+BUX*1@$JiD!}jIq7&j*qrDE#JN>}#* z2Cs}QJ%Pl;TNMCuoReVDs)-@}*6T0l(=hC2(Vw#xtX{Y*F2i5uh2R3>~!4$X&Zx^y}TAy(R}! zAO3h!^7fhm(;iZn;`+?ta$M=04Kx14+X>CBF??<80_+jCy#SL2aHt}y zM$-5e;AZWoA6bBO9ie^%fDbu1D4)};%vV{)$| zQh;}99j3$TfHW>ng-cC}qe2?24ke_K0ieBIuvUla_leUsW$SBs-2qKCI9Tmr+Y20@ z635U$V22XY$RO;JKKmU>myIG)cKgNP+LH~(+KXHi+!rzn@p7#!3t_k|oocA_wn6#0 zNj;eNi=*c-phXFpN^4CK-y4TXPU#(eeFY>F8Ye!uIL-~kg(kbvOeR4=^lO`QlJQ|7 zTHJj^tWU9jvU070)R)KS1 ztJ=ui7q-9YRjZ;+Gb~zAQcF|O_EM-Yt41>Q1?_Kpq1`8FhpZ|qNlibLgVCxsGWYwD zeM-m<*9Ou?l|BMc16b8Y=6*l0ljO$3v~6M7>d%lc4d=i$bDnyGeq32zWf-hrJfJrb zAH)HxQAoBy!TP&iCG4=PAo~s}XdX0A0BMMa`jb&gc0r+g-O}yILp>sp95P7&S3qPG zl5J2B{h=QsYeA$2T%cscC5)_~giJ*c{m~)Xo(!644f9pGE7J_dDf$LlDc+kLsU6`* zgfVe0;y&*rA`HWzg)Bvud;g9#IX;MYj|RoSwtOQdUz|c7DQ^mg%x34A)`YHX#|ZsO^V;rs)QHC?G(rV zT72IkBBP7MeMa0p;$(7adxi&Hq`PIg(-m3R^l0nPP1g9bvXFe6siPnA7Yg>DP)g+` z`4^PJ$@9<0rN*=Ii+s`fuoQ`W$+=Q1mSkDR^TpB&&6@mo%0>5mr;BAUGxCSYf}BXQ zbX8%&tySd~aFk%bi==6RF4vL>nbL$F9w&OGA&bAtqa^FWx&^sfle1gVPHpmW4PU>WW4%~^ zxa)^@U%|H1)MaXoS&G(u<@zy6kbE86y3A%@$)0}Z=wQ8$&y=uKH@zNkgXewx8f>a99Xh7TAZ1y(I)y?kpvUhE5 z)6(=GRT~i6T2$JI6fuJIML}PDRY4Hn;+ujL^i_S;?{{`48G{X*TlQ^!;mgeYW_M@# zee#>xox2+*H9 z%MUV{%neA{I#gCbIq6`jlyQEZ{U1Ca0;Xeumem4G%Yph-jweHl6p2{hSk zR)Hx|;oVgRdIy8pRoI@dn2CSvWosH0<6U4bq_ z(b`8C1Svdnsw5<_&Y`3c;AO~6m5fGGiv#V74($>~WcX>!w;sG9^dG6XPMCM9fxj~t zDGs7l9CXIqQq6M<#lf+l=9TjOhibN$&vjc{qQ6|BAPJM`oxGkj(&iB2pUI@93QL1! z9Ku%@6TXVUC=zZ~?|?+le0>j6Qz}`lWLp|J@)%);or*+*CFeph&!J@WRh?6+e4RMC zUT4S9&p_`((Uhuy36D%TLb{7bNF%`OP>yW_jZsKoaiHxG(3&Dd*D%-&^fAPQD(KUMwAZ0oh)K8x zNtn%6-LO{mtx?;Xa3wc#x*JkPhCGpbG=>|4ki_C}dsgSh&<{i879|@!`;|vRBP6vi zhpGU`W4I4vpfLzZEDp5)?0PC_pJ7xcTbG_}*g^9a_Rflfp&iyHiqAP=x$gEwuNaal z)9@bc#z}Y>GO4HqrQvEEs(pGYGNFd$aHP6gn?XZ7ut;j8v{oL*f&|UNrv)TYH4e2G zbZUziz6NuV6z|pUoP_z=lXxyQ&!Adl8mY#idQ7Lv&>w~tmPLB%$|F;eknV$&5g?B< z6R3(vk}?joNgY}!!_PsNm_SKs9gmSlf)%KlZIfz@LIR6JZAzyWV6a1wdUJRj%O2m| zi-Vr)R`O46-HHuQ;n-F7yz+$W1b#{FeUjb%+TD`tSxD@M8kSmQ8mY!%`H9Z*3`RKz zDVDc+gdrud|0ZvW1bLdpwxPx-B(ONtzSgNRVa`K}S^*ijtPM2sdr-S$vgHNMjH(5t z;c6V7-|IY?N(?`Z>)`S~W8##HzlG13NOV1~CylfxAwyV2BuN>E@DGA;bF@cu9HX6p zK7~klS!*WQZGe=KD}B6>nSB|AR1MqRzw6nmQcJOH&*u2Flauclr1Ow=LnL<^GW*Z9hiq>uSxyk8a?Y_v6DJ$L=%`=V`nN+II-1+E3rPhcW7ps$y z;|FrTv$UHg**77RsajCdtAb{`PV=Y?KSxZQsbu*muO!VvY1pc0 zq+Wt;hhWE-Z;tjK%3Y$PeqJEnugzeek*9UM>Mx%upQ71h?)8% z=3N+nj$xO5w>q)i+OuyosE!9-z1#LGV|I6OzEk}eL(fiJL-6HA#U8t1%8;aKVET(- W+7U5jIz0m^rdGr>y9CBtv;P7hlF@De literal 0 HcmV?d00001 diff --git a/controllers/.backupsession_controller.go.un~ b/controllers/.backupsession_controller.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..fd13c2cd717ac1fb29facda0e4de87811002221f GIT binary patch literal 2760 zcmeH|KTE?<6vba_Rf>W*_-_zHQD{Z%V$=$4H5Lb#B32hI1w&{Hwt?6}9R&M19Nfjv zB7&2OlZv>wh_jya`d*12AX^R`ZgO(TgZtwqhxNwwL9^h#i|E}*`Jr6R<{rO}Z%4;Z zmOh_q=I-#(+hYFm;_CJLSqNdlS`l2(Ln>g^7WRu(iuF1w_o^YPDhnS?sw#dVnea)d z044w&vl538Kht>s1Uv=jc$Q2Eqo0^v35}2j*r7y#37B~ZjY9}jfR^w>d`n8eoMG2Vpa zIE5eua(jMqq?Un6uJ@xOjDDil&A)1FBtZ6%bqI|^2vmT!FQG;LBjTY%Fc(j!jZ(2t aOt#M)FSTKuyKRr3&$?~Y9M3VkA2)wBlWA@M literal 0 HcmV?d00001 diff --git a/controllers/.suite_test.go.un~ b/controllers/.suite_test.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9a5f9f305922288e7f91e62f1fcc4057c0092bc9 GIT binary patch literal 7724 zcmeI1Pe>F|9LL9X`}55Hg5jZT6tdlr6$v3UYi+fg8mvlTuuS7nTf6Skj$)L7)hRu9 zu%JV?ZV`0p-q}M1-nvv^=OBcz-}lY??LH-JaI(|;!gt@i-|XxgexLol`SZr}#TR2! z1Enuk?8xU_;_LgqU+cY%_UP*A<(H{<4JVq#(MwCo4{IM^|FkUY8M#Gtb~@#9Wv0t+ zyM4Bj%9p5{m!Zy?t~UBcJGoBzFtrd`w&kuYevjiDaVxisy`)&s$WQF*Bcrv}Nx>F+ zM*lto?#jX^iNmsfG}F%`^%ux$U+2#LHJZhh}3tJ z`C?cXye8u?$ntA4kdhN7VkA&}NXFYXLk>xZqFy=}b0JiHZA{zRtCqz#Iyt2NF&X57 zr*=dhaYQ{r0MbD+3jpz*76eSWAqx={2HIN*EmlJWXwS)@-75xX%k%`1phbvGAy02O zQ<*!S<9i9$pY5=x_(mp~Ol5DDrVEpa?6m88-ee+~RD?nF5Sazq*U0kEWC(LwcB-Xi zpcP5DJ|H}y(vR_gASUJ%D1hOWBYy02-JpUhLbefDKh3yfcndlpn|AZ f8XY8o8i>fme^C`;JgBzK5RZ$kmUX-5zrFem$?I1Y literal 0 HcmV?d00001 diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 8aaedf4..37c88d3 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,360 +18,108 @@ package controllers import ( "context" - //"time" - formolrbac "github.com/desmo999r/formol/pkg/rbac" - formolutils "github.com/desmo999r/formol/pkg/utils" "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - kbatch_beta1 "k8s.io/api/batch/v1beta1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + formolutils "github.com/desmo999r/formol/pkg/utils" ) // BackupConfigurationReconciler reconciles a BackupConfiguration object type BackupConfigurationReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme + Log logr.Logger + context.Context } -var _ reconcile.Reconciler = &BackupConfigurationReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=* -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Context = ctx + r.Log = log.FromContext(ctx) -func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - var changed bool - log := r.Log.WithValues("backupconfiguration", req.NamespacedName) - //time.Sleep(300 * time.Millisecond) + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, req.NamespacedName, backupConf); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - getDeployment := func(namespace string, name string) (*appsv1.Deployment, error) { - deployment := &appsv1.Deployment{} - err := r.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: name, - }, deployment) - return deployment, err - } - - deleteCronJob := func() error { - _ = formolrbac.DeleteFormolRBAC(r.Client, "default", backupConf.Namespace) - _ = formolrbac.DeleteBackupSessionCreatorRBAC(r.Client, backupConf.Namespace) - cronjob := &kbatch_beta1.CronJob{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) - return r.Delete(context.TODO(), cronjob) - } else { - return err + backupConf := formolv1alpha1.BackupConfiguration{} + err := r.Get(ctx, req.NamespacedName, &backupConf) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil } - } - - addCronJob := func() error { - if err := formolrbac.CreateFormolRBAC(r.Client, "default", backupConf.Namespace); err != nil { - log.Error(err, "unable to create backupsessionlistener RBAC") - return nil - } - - if err := formolrbac.CreateBackupSessionCreatorRBAC(r.Client, backupConf.Namespace); err != nil { - log.Error(err, "unable to create backupsession-creator RBAC") - return nil - } - - cronjob := &kbatch_beta1.CronJob{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - log.V(0).Info("there is already a cronjob") - if backupConf.Spec.Schedule != cronjob.Spec.Schedule { - log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) - cronjob.Spec.Schedule = backupConf.Spec.Schedule - changed = true - } - if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { - log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) - cronjob.Spec.Suspend = backupConf.Spec.Suspend - changed = true - } - if changed == true { - if err := r.Update(context.TODO(), cronjob); err != nil { - log.Error(err, "unable to update cronjob definition") - return err - } - } - return nil - } else if errors.IsNotFound(err) == false { - log.Error(err, "something went wrong") - return err - } - - cronjob = &kbatch_beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "backup-" + backupConf.Name, - Namespace: backupConf.Namespace, - }, - Spec: kbatch_beta1.CronJobSpec{ - Suspend: backupConf.Spec.Suspend, - Schedule: backupConf.Spec.Schedule, - JobTemplate: kbatch_beta1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - ServiceAccountName: "backupsession-creator", - Containers: []corev1.Container{ - corev1.Container{ - Name: "job-createbackupsession-" + backupConf.Name, - Image: backupConf.Spec.Image, - Args: []string{ - "backupsession", - "create", - "--namespace", - backupConf.Namespace, - "--name", - backupConf.Name, - }, - }, - }, - }, - }, - }, - }, - }, - } - if err := ctrl.SetControllerReference(backupConf, cronjob, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) - return err - } - log.V(0).Info("creating the cronjob") - if err := r.Create(context.Background(), cronjob); err != nil { - log.Error(err, "unable to create the cronjob", "cronjob", cronjob) - return err - } else { - changed = true - return nil - } - } - - deleteSidecarContainer := func(target formolv1alpha1.Target) error { - deployment, err := getDeployment(backupConf.Namespace, target.Name) - if err != nil { - return err - } - restorecontainers := []corev1.Container{} - for _, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - continue - } - restorecontainers = append(restorecontainers, container) - } - deployment.Spec.Template.Spec.Containers = restorecontainers - if err := r.Update(context.Background(), deployment); err != nil { - return err - } - if err := formolrbac.DeleteFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil { - return err - } - return nil - } - - addSidecarContainer := func(target formolv1alpha1.Target) error { - deployment, err := getDeployment(backupConf.Namespace, target.Name) - if err != nil { - log.Error(err, "unable to get Deployment") - return err - } - log.V(1).Info("got deployment", "Deployment", deployment) - for i, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - log.V(0).Info("There is already a backup sidecar container. Skipping", "container", container) - return nil - } - if target.ContainerName != "" && target.ContainerName == container.Name { - // Put a tag so we can find what container we are supposed to backup - // and what process we are supposed to chroot to run the init steps - deployment.Spec.Template.Spec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: "True", - }) - } - } - sidecar := corev1.Container{ - Name: formolv1alpha1.SIDECARCONTAINER_NAME, - // TODO: Put the image in the BackupConfiguration YAML file - Image: backupConf.Spec.Image, - Args: []string{"backupsession", "server"}, - //Image: "busybox", - //Command: []string{ - // "sh", - // "-c", - // "sleep 3600; echo done", - //}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAME, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAMESPACE, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.Name, - }, - }, - VolumeMounts: []corev1.VolumeMount{}, - } - - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - sidecar.Env = append(sidecar.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - - for _, volumemount := range target.VolumeMounts { - log.V(1).Info("mounts", "volumemount", volumemount) - volumemount.ReadOnly = true - sidecar.VolumeMounts = append(sidecar.VolumeMounts, *volumemount.DeepCopy()) - } - deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sidecar) - deployment.Spec.Template.Spec.ShareProcessNamespace = func() *bool { b := true; return &b }() - - if err := formolrbac.CreateFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil { - log.Error(err, "unable to create backupsessionlistener RBAC") - return nil - } - - log.V(0).Info("Adding a sicar container") - if err := r.Update(context.Background(), deployment); err != nil { - log.Error(err, "unable to update the Deployment") - return err - } else { - changed = true - return nil - } - } - - deleteExternalResources := func() error { - for _, target := range backupConf.Spec.Targets { - switch target.Kind { - case formolv1alpha1.SidecarKind: - _ = deleteSidecarContainer(target) - } - } - // TODO: remove the hardcoded "default" - _ = deleteCronJob() - return nil + return ctrl.Result{}, err } finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { - log.V(0).Info("backupconf being deleted", "backupconf", backupConf.Name) + r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { - _ = deleteExternalResources() + _ = r.DeleteSidecar(backupConf) + _ = r.DeleteCronJob(backupConf) backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) - if err := r.Update(context.Background(), backupConf); err != nil { - log.Error(err, "unable to remove finalizer") - return reconcile.Result{}, err + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to remove finalizer") + return ctrl.Result{}, err } } // We have been deleted. Return here - log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) - return reconcile.Result{}, nil + r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) + return ctrl.Result{}, nil } // Add finalizer if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) - err := r.Update(context.Background(), backupConf) - if err != nil { - log.Error(err, "unable to append finalizer") + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to append finalizer") + return ctrl.Result{}, err } - return reconcile.Result{}, err + // backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job. + return ctrl.Result{}, nil } - if err := addCronJob(); err != nil { - return reconcile.Result{}, nil + if err := r.AddCronJob(backupConf); err != nil { + return ctrl.Result{}, err } else { backupConf.Status.ActiveCronJob = true } - for _, target := range backupConf.Spec.Targets { - switch target.Kind { - case formolv1alpha1.SidecarKind: - if err := addSidecarContainer(target); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } else { - backupConf.Status.ActiveSidecar = true - } - } + if err := r.AddSidecar(backupConf); err != nil { + r.Log.Error(err, "unable to add sidecar container") + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveSidecar = true } - //backupConf.Status.Suspended = false - if changed == true { - log.V(1).Info("updating backupconf") - if err := r.Status().Update(ctx, backupConf); err != nil { - log.Error(err, "unable to update backupconf", "backupconf", backupConf) - return reconcile.Result{}, err - } + if err := r.Status().Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "Unable to update BackupConfiguration status") + return ctrl.Result{}, err } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.BackupConfiguration{}). - WithOptions(controller.Options{MaxConcurrentReconciles: 3}). - //WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated - //Owns(&formolv1alpha1.BackupSession{}). - Owns(&kbatch_beta1.CronJob{}). Complete(r) } diff --git a/controllers/backupconfiguration_controller.go~ b/controllers/backupconfiguration_controller.go~ new file mode 100644 index 0000000..b983514 --- /dev/null +++ b/controllers/backupconfiguration_controller.go~ @@ -0,0 +1,129 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + //appsv1 "k8s.io/api/apps/v1" + //batchv1 "k8s.io/api/batch/v1" + //corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + formolutils "github.com/desmo999r/formol/pkg/utils" +) + +// BackupConfigurationReconciler reconciles a BackupConfiguration object +type BackupConfigurationReconciler struct { + client.Client + Scheme *runtime.Scheme + Log logr.Logger + context.Context +} + +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Context = ctx + r.Log = log.FromContext(ctx) + + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) + + backupConf := formolv1alpha1.BackupConfiguration{} + err := r.Get(ctx, req.NamespacedName, &backupConf) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" + + if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { + r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) + if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + _ = r.DeleteSidecar(backupConf) + _ = r.DeleteCronJob(backupConf) + backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to remove finalizer") + return ctrl.Result{}, err + } + } + // We have been deleted. Return here + r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) + return ctrl.Result{}, nil + } + + // Add finalizer + if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) + backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to append finalizer") + return ctrl.Result{}, err + } + // backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job. + return ctrl.Result{}, nil + } + + if err := r.AddCronJob(backupConf); err != nil { + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveCronJob = true + } + + if err := r.AddSidecar(backupConf); err != nil { + r.Log.Error(err, "unable to add sidecar container") + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveSidecar = true + } + + if err := r.Status().Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "Unable to update BackupConfiguration status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&formolv1alpha1.BackupConfiguration{}). + Complete(r) +} diff --git a/controllers/backupconfiguration_controller_cronjob.go b/controllers/backupconfiguration_controller_cronjob.go new file mode 100644 index 0000000..3a424fb --- /dev/null +++ b/controllers/backupconfiguration_controller_cronjob.go @@ -0,0 +1,103 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) + return r.Delete(r.Context, cronjob) + } else { + return err + } +} + +func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("there is already a cronjob") + var changed bool + if backupConf.Spec.Schedule != cronjob.Spec.Schedule { + r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) + cronjob.Spec.Schedule = backupConf.Spec.Schedule + changed = true + } + if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { + r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) + cronjob.Spec.Suspend = backupConf.Spec.Suspend + changed = true + } + if changed == true { + if err := r.Update(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to update cronjob definition") + return err + } + backupConf.Status.Suspended = *backupConf.Spec.Suspend + } + return nil + } else if errors.IsNotFound(err) == false { + r.Log.Error(err, "something went wrong") + return err + } + + cronjob = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-" + backupConf.Name, + Namespace: backupConf.Namespace, + }, + Spec: batchv1.CronJobSpec{ + Suspend: backupConf.Spec.Suspend, + Schedule: backupConf.Spec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: "backupsession-creator", + Containers: []corev1.Container{ + corev1.Container{ + Name: "job-createbackupsession-" + backupConf.Name, + Image: backupConf.Spec.Image, + Args: []string{ + "backupsession", + "create", + "--namespace", + backupConf.Namespace, + "--name", + backupConf.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { + r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) + return err + } + r.Log.V(0).Info("creating the cronjob") + if err := r.Create(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) + return err + } else { + backupConf.Status.Suspended = *backupConf.Spec.Suspend + return nil + } +} diff --git a/controllers/backupconfiguration_controller_cronjob.go~ b/controllers/backupconfiguration_controller_cronjob.go~ new file mode 100644 index 0000000..459d613 --- /dev/null +++ b/controllers/backupconfiguration_controller_cronjob.go~ @@ -0,0 +1,102 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) + return r.Delete(r.Context, cronjob) + } else { + return err + } +} + +func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("there is already a cronjob") + var changed bool + if backupConf.Spec.Schedule != cronjob.Spec.Schedule { + r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) + cronjob.Spec.Schedule = backupConf.Spec.Schedule + changed = true + } + if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { + r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) + cronjob.Spec.Suspend = backupConf.Spec.Suspend + changed = true + } + if changed == true { + if err := r.Update(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to update cronjob definition") + return err + } + backupConf.Status.Suspended = *backupConf.Spec.Suspend + } + return nil + } else if errors.IsNotFound(err) == false { + r.Log.Error(err, "something went wrong") + return err + } + + cronjob = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-" + backupConf.Name, + Namespace: backupConf.Namespace, + }, + Spec: batchv1.CronJobSpec{ + Suspend: backupConf.Spec.Suspend, + Schedule: backupConf.Spec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: "backupsession-creator", + Containers: []corev1.Container{ + corev1.Container{ + Name: "job-createbackupsession-" + backupConf.Name, + Image: backupConf.Spec.Image, + Args: []string{ + "backupsession", + "create", + "--namespace", + backupConf.Namespace, + "--name", + backupConf.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { + r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) + return err + } + r.Log.V(0).Info("creating the cronjob") + if err := r.Create(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) + return err + } else { + return nil + } +} diff --git a/controllers/backupconfiguration_controller_sidecar.go b/controllers/backupconfiguration_controller_sidecar.go new file mode 100644 index 0000000..688e339 --- /dev/null +++ b/controllers/backupconfiguration_controller_sidecar.go @@ -0,0 +1,137 @@ +package controllers + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + } else { + for j, e := range container.Env { + if e.Name == formolv1alpha1.TARGETCONTAINER_TAG { + container.Env[j] = container.Env[len(container.Env)-1] + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + break + } + } + } + } + } + } + } + for _, target := range backupConf.Spec.Targets { + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + restoreContainers := []corev1.Container{} + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + continue + } + restoreContainers = append(restoreContainers, container) + } + deployment.Spec.Template.Spec.Containers = restoreContainers + removeTags(&deployment.Spec.Template.Spec, target) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "unable to update deployment", "deployment", deployment) + return err + } + } + } + + return nil +} + +func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + // Go through all the 'targets' + // the backupType: Online needs a sidecar container for every single listed 'container' + // if the backupType is something else than Online, the 'container' will still need a sidecar + // if it has 'steps' + addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { + for i, container := range podSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + return false + } + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + } + } + } + return true + } + + for _, target := range backupConf.Spec.Targets { + addSidecar := false + for _, targetContainer := range target.Containers { + if len(targetContainer.Steps) > 0 { + addSidecar = true + } + } + if target.BackupType == formolv1alpha1.OnlineKind { + addSidecar = true + } + if addSidecar { + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) + env := repo.GetResticEnv(backupConf) + sideCar := corev1.Container{ + Name: formolv1alpha1.SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"backupsession", "server"}, + Env: append(env, corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }), + VolumeMounts: []corev1.VolumeMount{}, + } + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + if addTags(&deployment.Spec.Template.Spec, target) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) + r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "cannot update deployment", "Deployment", deployment) + return err + } + } + } + } + } + + return nil +} diff --git a/controllers/backupconfiguration_controller_sidecar.go~ b/controllers/backupconfiguration_controller_sidecar.go~ new file mode 100644 index 0000000..817691b --- /dev/null +++ b/controllers/backupconfiguration_controller_sidecar.go~ @@ -0,0 +1,134 @@ +package controllers + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + } else { + for j, e := range container.Env { + if e.Name == formolv1alpha1.TARGETCONTAINER_TAG { + container.Env[j] = container.Env[len(container.Env)-1] + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + break + } + } + } + } + } + } + } + for _, target := range backupConf.Spec.Targets { + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + restoreContainers := []corev1.Container{} + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + continue + } + restoreContainers = append(restoreContainers, container) + } + deployment.Spec.Template.Spec.Containers = restoreContainers + removeTags(&deployment.Spec.Template.Spec, target) + return r.Update(r.Context, deployment) + } + } + + return nil +} + +func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + // Go through all the 'targets' + // the backupType: Online needs a sidecar container for every single listed 'container' + // if the backupType is something else than Online, the 'container' will still need a sidecar + // if it has 'steps' + addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { + for i, container := range podSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + return false + } + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + } + } + } + return true + } + + for _, target := range backupConf.Spec.Targets { + addSidecar := false + for _, targetContainer := range target.Containers { + if len(targetContainer.Steps) > 0 { + addSidecar = true + } + } + if target.BackupType == formolv1alpha1.OnlineKind { + addSidecar = true + } + if addSidecar { + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) + env := repo.GetResticEnv(backupConf) + sideCar := corev1.Container{ + Name: formolv1alpha1.SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"backupsession", "server"}, + Env: append(env, corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }), + VolumeMounts: []corev1.VolumeMount{}, + } + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + if addTags(&deployment.Spec.Template.Spec, target) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) + r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "cannot update deployment", "Deployment", deployment) + return err + } + } + } + } + } + + return nil +} diff --git a/controllers/backupconfiguration_controller_test.go b/controllers/backupconfiguration_controller_test.go index 1bd6ac9..406027f 100644 --- a/controllers/backupconfiguration_controller_test.go +++ b/controllers/backupconfiguration_controller_test.go @@ -1,67 +1,64 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "context" - //"k8s.io/apimachinery/pkg/types" - //"reflect" - //"fmt" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - //batchv1 "k8s.io/api/batch/v1" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - corev1 "k8s.io/api/core/v1" - //"k8s.io/apimachinery/pkg/api/errors" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + //"time" + //appsv1 "k8s.io/api/apps/v1" + //corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) -var _ = Describe("Testing BackupConf controller", func() { - const ( - BCBackupConfName = "test-backupconf-controller" - ) +var _ = Describe("BackupConfiguration controller", func() { + const BACKUPCONF_NAME = "test-backupconf-controller" + var ( - key = types.NamespacedName{ - Name: BCBackupConfName, - Namespace: TestNamespace, - } + backupConf *formolv1alpha1.BackupConfiguration ctx = context.Background() - backupConf = &formolv1alpha1.BackupConfiguration{} + key = types.NamespacedName{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + } ) BeforeEach(func() { backupConf = &formolv1alpha1.BackupConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: BCBackupConfName, - Namespace: TestNamespace, + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, }, Spec: formolv1alpha1.BackupConfigurationSpec{ - Repository: TestRepoName, + Repository: REPO_NAME, Schedule: "1 * * * *", - Image: "desmo999r/formolcli:latest", + Image: "desmo999r/formolcli:v0.3.2", Targets: []formolv1alpha1.Target{ formolv1alpha1.Target{ - Kind: formolv1alpha1.SidecarKind, - Name: TestDeploymentName, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: TestDataVolume, - MountPath: TestDataMountPath, - }, - }, - Paths: []string{ - TestDataMountPath, - }, - }, - formolv1alpha1.Target{ - Kind: formolv1alpha1.JobKind, - Name: TestBackupFuncName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestBackupFuncName, + BackupType: formolv1alpha1.OnlineKind, + TargetKind: formolv1alpha1.Deployment, + TargetName: DEPLOYMENT_NAME, + Containers: []formolv1alpha1.TargetContainer{ + formolv1alpha1.TargetContainer{ + Name: CONTAINER_NAME, }, }, }, @@ -69,7 +66,8 @@ var _ = Describe("Testing BackupConf controller", func() { }, } }) - Context("Creating a backupconf", func() { + + Context("Creating a BackupConf", func() { JustBeforeEach(func() { Eventually(func() error { return k8sClient.Create(ctx, backupConf) @@ -81,97 +79,87 @@ var _ = Describe("Testing BackupConf controller", func() { It("Has a schedule", func() { realBackupConf := &formolv1alpha1.BackupConfiguration{} Eventually(func() bool { - err := k8sClient.Get(ctx, key, realBackupConf) - if err != nil { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { return false } return true }, timeout, interval).Should(BeTrue()) Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *")) - Expect(realBackupConf.Spec.Targets[0].Retry).Should(Equal(2)) }) - It("Should also create a CronJob", func() { - cronJob := &batchv1beta1.CronJob{} - Eventually(func() bool { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - return err == nil - }, timeout, interval).Should(BeTrue()) - Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) - }) - It("Should also create a sidecar container", func() { - realDeployment := &appsv1.Deployment{} - Eventually(func() (int, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: TestDeploymentName, - Namespace: TestNamespace, - }, realDeployment) - if err != nil { - return 0, err - } - return len(realDeployment.Spec.Template.Spec.Containers), nil - }, timeout, interval).Should(Equal(2)) - }) - It("Should also update the CronJob", func() { + It("Should create a CronJob", func() { realBackupConf := &formolv1alpha1.BackupConfiguration{} - time.Sleep(300 * time.Millisecond) Eventually(func() bool { - err := k8sClient.Get(ctx, key, realBackupConf) - if err != nil { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { return false } return true }, timeout, interval).Should(BeTrue()) + Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should update the CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) realBackupConf.Spec.Schedule = "1 0 * * *" suspend := true realBackupConf.Spec.Suspend = &suspend Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed()) - cronJob := &batchv1beta1.CronJob{} - Eventually(func() (string, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - if err != nil { - return "", err + cronJob := &batchv1.CronJob{} + Eventually(func() string { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return "" } - return cronJob.Spec.Schedule, nil + return cronJob.Spec.Schedule }, timeout, interval).Should(Equal("1 0 * * *")) - Eventually(func() (bool, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - if err != nil { - return false, err - } - return *cronJob.Spec.Suspend == true, nil - }, timeout, interval).Should(BeTrue()) + Expect(*cronJob.Spec.Suspend).Should(BeTrue()) }) }) - Context("Deleting a backupconf", func() { + Context("Deleting a BackupConf", func() { JustBeforeEach(func() { Eventually(func() error { return k8sClient.Create(ctx, backupConf) }, timeout, interval).Should(Succeed()) }) - It("Should also delete the sidecar container", func() { - Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) - realDeployment := &appsv1.Deployment{} - Eventually(func() (int, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: TestDeploymentName, - Namespace: TestNamespace, - }, realDeployment) - if err != nil { - return 0, err + It("Should delete the CronJob", func() { + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false } - return len(realDeployment.Spec.Template.Spec.Containers), nil - }, timeout, interval).Should(Equal(1)) + return true + }, timeout, interval).Should(BeTrue()) + By("The CronJob has been created. Now deleting the BackupConfiguration") + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeFalse()) }) }) - }) diff --git a/controllers/backupconfiguration_controller_test.go~ b/controllers/backupconfiguration_controller_test.go~ new file mode 100644 index 0000000..44eb982 --- /dev/null +++ b/controllers/backupconfiguration_controller_test.go~ @@ -0,0 +1,165 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + //"time" + //appsv1 "k8s.io/api/apps/v1" + //corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("BackupConfiguration controller", func() { + const BACKUPCONF_NAME = "test-backupconf-controller" + + var ( + backupConf *formolv1alpha1.BackupConfiguration + ctx = context.Background() + key = types.NamespacedName{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + } + ) + + BeforeEach(func() { + backupConf = &formolv1alpha1.BackupConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, + Spec: formolv1alpha1.BackupConfigurationSpec{ + Repository: REPO_NAME, + Schedule: "1 * * * *", + Image: "desmo999r/formolcli:v0.3.2", + Targets: []formolv1alpha1.Target{ + formolv1alpha1.Target{ + BackupType: formolv1alpha1.OnlineKind, + TargetKind: formolv1alpha1.Deployment, + TargetName: DEPLOYMENT_NAME, + Containers: []formolv1alpha1.TargetContainer{ + formolv1alpha1.Container{ + Name: CONTAINER_NAME, + }, + }, + }, + }, + }, + } + }) + + Context("Creating a BackupConf", func() { + JustBeforeEach(func() { + Eventually(func() error { + return k8sClient.Create(ctx, backupConf) + }, timeout, interval).Should(Succeed()) + }) + AfterEach(func() { + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + }) + It("Has a schedule", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should create a CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should update the CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + realBackupConf.Spec.Schedule = "1 0 * * *" + suspend := true + realBackupConf.Spec.Suspend = &suspend + Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed()) + cronJob := &batchv1.CronJob{} + Eventually(func() string { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return "" + } + return cronJob.Spec.Schedule + }, timeout, interval).Should(Equal("1 0 * * *")) + Expect(*cronJob.Spec.Suspend).Should(BeTrue()) + }) + }) + Context("Deleting a BackupConf", func() { + JustBeforeEach(func() { + Eventually(func() error { + return k8sClient.Create(ctx, backupConf) + }, timeout, interval).Should(Succeed()) + }) + It("Should delete the CronJob", func() { + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + By("The CronJob has been created. Now deleting the BackupConfiguration") + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeFalse()) + + }) + }) +}) diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 029a15c..79ba0c3 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,458 +18,49 @@ package controllers import ( "context" - "fmt" - "sort" - "strings" - "time" "github.com/go-logr/logr" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" -) - -const ( - sessionState string = ".metadata.state" - finalizerName string = "finalizer.backupsession.formol.desmojim.fr" - JOBTTL int32 = 7200 ) // BackupSessionReconciler reconciles a BackupSession object type BackupSessionReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme + Log logr.Logger + context.Context } -var _ reconcile.Reconciler = &BackupSessionReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch;create;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=functions,verbs=get;list;watch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete;watch -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Log = log.FromContext(ctx) + r.Context = ctx -func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.Log.WithValues("backupsession", req.NamespacedName) + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - backupSession := &formolv1alpha1.BackupSession{} - if err := r.Get(ctx, req.NamespacedName, backupSession); err != nil { - log.Error(err, "unable to get backupsession") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Namespace, - Name: backupSession.Spec.Ref.Name, - }, backupConf); err != nil { - log.Error(err, "unable to get backupConfiguration") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - // helper functions - // is there a backup operation ongoing - isBackupOngoing := func() bool { - backupSessionList := &formolv1alpha1.BackupSessionList{} - if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: "Running"})}); err != nil { - log.Error(err, "unable to get backupsessionlist") - return true - } - return len(backupSessionList.Items) > 0 - } - - // delete session specific backup resources - deleteExternalResources := func() error { - log := r.Log.WithValues("deleteExternalResources", backupSession.Name) - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - env := formolutils.ConfigureResticEnvVar(backupConf, repo) - // container that will delete the restic snapshot(s) matching the backupsession - deleteSnapshots := []corev1.Container{} - for _, target := range backupSession.Status.Targets { - if target.SessionState == formolv1alpha1.Success { - deleteSnapshots = append(deleteSnapshots, corev1.Container{ - Name: target.Name, - Image: backupConf.Spec.Image, - Args: []string{"snapshot", "delete", "--snapshot-id", target.SnapshotId}, - Env: env, - }) - } - } - // create a job to delete the restic snapshot(s) with the backupsession name tag - if len(deleteSnapshots) > 0 { - jobTtl := JOBTTL - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("delete-%s-", backupSession.Name), - Namespace: backupSession.Namespace, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &jobTtl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: deleteSnapshots[1:], - Containers: []corev1.Container{deleteSnapshots[0]}, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - log.V(0).Info("creating a job to delete restic snapshots") - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to delete job", "job", job) - return err - } - } - return nil - } - - // create a backup job - createBackupJob := func(target formolv1alpha1.Target) error { - log := r.Log.WithValues("createbackupjob", target.Name) - ctx := context.Background() - backupSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "TARGET_NAME", - Value: target.Name, - }, - corev1.EnvVar{ - Name: "BACKUPSESSION_NAME", - Value: backupSession.Name, - }, - corev1.EnvVar{ - Name: "BACKUPSESSION_NAMESPACE", - Value: backupSession.Namespace, - }, - } - - output := corev1.VolumeMount{ - Name: "output", - MountPath: "/output", - } - restic := corev1.Container{ - Name: "restic", - Image: backupConf.Spec.Image, - Args: []string{"volume", "backup", "--tag", backupSession.Name, "--path", "/output"}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: backupSessionEnv, - } - log.V(1).Info("creating a tagged backup job", "container", restic) - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - jobTtl := JOBTTL - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-%s-", backupSession.Name, target.Name), - Namespace: backupConf.Namespace, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &jobTtl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{}, - Containers: []corev1.Container{restic}, - Volumes: []corev1.Volume{ - corev1.Volume{Name: "output"}, - }, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - for _, step := range target.Steps { - function := &formolv1alpha1.Function{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: step.Name, - }, function); err != nil { - log.Error(err, "unable to get function", "Function", step) - return err - } - function.Spec.Name = function.Name - function.Spec.Env = append(function.Spec.Env, backupSessionEnv...) - function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output) - job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec) - } - if err := ctrl.SetControllerReference(backupConf, job, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "job", job, "backupconf", backupConf) - return err - } - log.V(0).Info("creating a backup job", "target", target) - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to create job", "job", job) - return err - } - return nil - } - - // start the next task - startNextTask := func() (*formolv1alpha1.TargetStatus, error) { - nextTarget := len(backupSession.Status.Targets) - if nextTarget < len(backupConf.Spec.Targets) { - target := backupConf.Spec.Targets[nextTarget] - targetStatus := formolv1alpha1.TargetStatus{ - Name: target.Name, - Kind: target.Kind, - SessionState: formolv1alpha1.New, - StartTime: &metav1.Time{Time: time.Now()}, - Try: 1, - } - backupSession.Status.Targets = append(backupSession.Status.Targets, targetStatus) - switch target.Kind { - case formolv1alpha1.JobKind: - if err := createBackupJob(target); err != nil { - log.V(0).Info("unable to create task", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - } - return &targetStatus, nil - } else { - return nil, nil - } - } - - // cleanup existing backupsessions - cleanupSessions := func() { - backupSessionList := &formolv1alpha1.BackupSessionList{} - if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: string(formolv1alpha1.Success)})}); err != nil { - log.Error(err, "unable to get backupsessionlist") - return - } - if len(backupSessionList.Items) < 2 { - // Not enough backupSession to proceed - log.V(1).Info("Not enough successful backup jobs") - return - } - - sort.Slice(backupSessionList.Items, func(i, j int) bool { - return backupSessionList.Items[i].Status.StartTime.Time.Unix() > backupSessionList.Items[j].Status.StartTime.Time.Unix() - }) - - type KeepBackup struct { - Counter int32 - Last time.Time - } - - var lastBackups, dailyBackups, weeklyBackups, monthlyBackups, yearlyBackups KeepBackup - lastBackups.Counter = backupConf.Spec.Keep.Last - dailyBackups.Counter = backupConf.Spec.Keep.Daily - weeklyBackups.Counter = backupConf.Spec.Keep.Weekly - monthlyBackups.Counter = backupConf.Spec.Keep.Monthly - yearlyBackups.Counter = backupConf.Spec.Keep.Yearly - for _, session := range backupSessionList.Items { - if session.Spec.Ref.Name != backupConf.Name { - continue - } - deleteSession := true - keep := []string{} - if lastBackups.Counter > 0 { - log.V(1).Info("Keep backup", "last", session.Status.StartTime) - lastBackups.Counter-- - keep = append(keep, "last") - deleteSession = false - } - if dailyBackups.Counter > 0 { - if session.Status.StartTime.Time.YearDay() != dailyBackups.Last.YearDay() { - log.V(1).Info("Keep backup", "daily", session.Status.StartTime) - dailyBackups.Counter-- - dailyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "daily") - deleteSession = false - } - } - if weeklyBackups.Counter > 0 { - if session.Status.StartTime.Time.Weekday().String() == "Sunday" && session.Status.StartTime.Time.YearDay() != weeklyBackups.Last.YearDay() { - log.V(1).Info("Keep backup", "weekly", session.Status.StartTime) - weeklyBackups.Counter-- - weeklyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "weekly") - deleteSession = false - } - } - if monthlyBackups.Counter > 0 { - if session.Status.StartTime.Time.Day() == 1 && session.Status.StartTime.Time.Month() != monthlyBackups.Last.Month() { - log.V(1).Info("Keep backup", "monthly", session.Status.StartTime) - monthlyBackups.Counter-- - monthlyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "monthly") - deleteSession = false - } - } - if yearlyBackups.Counter > 0 { - if session.Status.StartTime.Time.YearDay() == 1 && session.Status.StartTime.Time.Year() != yearlyBackups.Last.Year() { - log.V(1).Info("Keep backup", "yearly", session.Status.StartTime) - yearlyBackups.Counter-- - yearlyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "yearly") - deleteSession = false - } - } - if deleteSession { - log.V(1).Info("Delete session", "delete", session.Status.StartTime) - if err := r.Delete(ctx, &session); err != nil { - log.Error(err, "unable to delete backupsession", "session", session.Name) - // we don't return anything, we keep going - } - } else { - session.Status.Keep = strings.Join(keep, ",") // + " " + time.Now().Format("2006 Jan 02 15:04:05 -0700 MST") - if err := r.Status().Update(ctx, &session); err != nil { - log.Error(err, "unable to update session status", "session", session) - } - } - } - } - // end helper functions - - log.V(0).Info("backupSession", "backupSession.ObjectMeta", backupSession.ObjectMeta, "backupSession.Status", backupSession.Status) - if backupSession.ObjectMeta.DeletionTimestamp.IsZero() { - switch backupSession.Status.SessionState { - case formolv1alpha1.New: - // Check if the finalizer has been registered - if !controllerutil.ContainsFinalizer(backupSession, finalizerName) { - controllerutil.AddFinalizer(backupSession, finalizerName) - // We update the BackupSession to add the finalizer - // Reconcile will be called again - // return now - err := r.Update(ctx, backupSession) - if err != nil { - log.Error(err, "unable to add finalizer") - } - return reconcile.Result{}, err - } - // Brand new backupsession - if isBackupOngoing() { - log.V(0).Info("There is an ongoing backup. Let's reschedule this operation") - return reconcile.Result{RequeueAfter: 30 * time.Second}, nil - } - // start the first task - backupSession.Status.SessionState = formolv1alpha1.Running - targetStatus, err := startNextTask() - if err != nil { - return reconcile.Result{}, err - } - log.V(0).Info("New backup. Start the first task", "task", targetStatus) - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - case formolv1alpha1.Running: - // Backup ongoing. Check the status of the last task to decide what to do - currentTargetStatus := &backupSession.Status.Targets[len(backupSession.Status.Targets)-1] - switch currentTargetStatus.SessionState { - case formolv1alpha1.Running: - // The current task is still running. Nothing to do - log.V(0).Info("task is still running", "targetStatus", currentTargetStatus) - case formolv1alpha1.Success: - // The last task succeed. Let's try to start the next one - targetStatus, err := startNextTask() - log.V(0).Info("last task was a success. start a new one", "currentTargetStatus", currentTargetStatus, "targetStatus", targetStatus) - if err != nil { - return reconcile.Result{}, err - } - if targetStatus == nil { - // No more task to start. The backup is a success - backupSession.Status.SessionState = formolv1alpha1.Success - log.V(0).Info("Backup is successful. Let's try to do some cleanup") - cleanupSessions() - } - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - case formolv1alpha1.Failure: - // last task failed. Try to run it again - currentTarget := backupConf.Spec.Targets[len(backupSession.Status.Targets)-1] - if currentTargetStatus.Try < currentTarget.Retry { - log.V(0).Info("last task was a failure. try again", "currentTargetStatus", currentTargetStatus) - currentTargetStatus.Try++ - currentTargetStatus.SessionState = formolv1alpha1.New - currentTargetStatus.StartTime = &metav1.Time{Time: time.Now()} - switch currentTarget.Kind { - case formolv1alpha1.JobKind: - if err := createBackupJob(currentTarget); err != nil { - log.V(0).Info("unable to create task", "task", currentTarget) - currentTargetStatus.SessionState = formolv1alpha1.Failure - return reconcile.Result{}, err - } - } - } else { - log.V(0).Info("task failed again and for the last time", "currentTargetStatus", currentTargetStatus) - backupSession.Status.SessionState = formolv1alpha1.Failure - } - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - } - case formolv1alpha1.Success: - // Should never go there - case formolv1alpha1.Failure: - // The backup failed - case "": - // BackupSession has just been created - backupSession.Status.SessionState = formolv1alpha1.New - backupSession.Status.StartTime = &metav1.Time{Time: time.Now()} - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update backupSession") - return reconcile.Result{}, err - } - } - } else { - log.V(0).Info("backupsession being deleted", "backupsession", backupSession.Name) - if controllerutil.ContainsFinalizer(backupSession, finalizerName) { - if err := deleteExternalResources(); err != nil { - return reconcile.Result{}, err - } - } - controllerutil.RemoveFinalizer(backupSession, finalizerName) - if err := r.Update(ctx, backupSession); err != nil { - log.Error(err, "unable to remove finalizer") - return reconcile.Result{}, err - } - // We have been deleted. Return here - return reconcile.Result{}, nil - } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { - if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &formolv1alpha1.BackupSession{}, sessionState, func(rawObj client.Object) []string { - session := rawObj.(*formolv1alpha1.BackupSession) - return []string{string(session.Status.SessionState)} - }); err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.BackupSession{}). - //WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated - Owns(&batchv1.Job{}). Complete(r) } diff --git a/controllers/backupsession_controller.go~ b/controllers/backupsession_controller.go~ new file mode 100644 index 0000000..64a8b06 --- /dev/null +++ b/controllers/backupsession_controller.go~ @@ -0,0 +1,62 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +// BackupSessionReconciler reconciles a BackupSession object +type BackupSessionReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&formolv1alpha1.BackupSession{}). + Complete(r) +} diff --git a/controllers/backupsession_controller_test.go b/controllers/backupsession_controller_test.go deleted file mode 100644 index b283522..0000000 --- a/controllers/backupsession_controller_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package controllers - -import ( - "context" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - //corev1 "k8s.io/api/core/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var _ = Describe("Testing BackupSession controller", func() { - const ( - BSBackupSessionName = "test-backupsession-controller" - ) - var ( - ctx = context.Background() - key = types.NamespacedName{ - Name: BSBackupSessionName, - Namespace: TestNamespace, - } - backupSession = &formolv1alpha1.BackupSession{} - ) - BeforeEach(func() { - backupSession = &formolv1alpha1.BackupSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: BSBackupSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupSessionSpec{ - Ref: corev1.ObjectReference{ - Name: TestBackupConfName, - }, - }, - } - }) - Context("Creating a backupsession", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, backupSession) - }, timeout, interval).Should(Succeed()) - realBackupSession := &formolv1alpha1.BackupSession{} - Eventually(func() error { - err := k8sClient.Get(ctx, key, realBackupSession) - return err - }, timeout, interval).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - if err := k8sClient.Get(ctx, key, realBackupSession); err != nil { - return "" - } else { - return realBackupSession.Status.SessionState - } - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - }) - AfterEach(func() { - Expect(k8sClient.Delete(ctx, backupSession)).Should(Succeed()) - }) - - It("Should have a new task", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - _ = k8sClient.Get(ctx, key, realBackupSession) - Expect(realBackupSession.Status.Targets[0].Name).Should(Equal(TestDeploymentName)) - Expect(realBackupSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New)) - Expect(realBackupSession.Status.Targets[0].Kind).Should(Equal(formolv1alpha1.SidecarKind)) - Expect(realBackupSession.Status.Targets[0].Try).Should(Equal(1)) - }) - - It("Should move to the next task when the first one is a success", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Expect(realBackupSession.Status.Targets[1].Name).Should(Equal(TestBackupFuncName)) - Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New)) - Expect(realBackupSession.Status.Targets[1].Kind).Should(Equal(formolv1alpha1.JobKind)) - }) - - It("Should be a success when the last task is a success", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Success)) - }) - - It("Should retry when the task is a failure", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.Targets[1].Try - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New)) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Failure)) - }) - - It("should create a backup job", func() { - }) - }) - Context("When other BackupSession exist", func() { - const ( - bs1Name = "test-backupsession-controller1" - bs2Name = "test-backupsession-controller2" - bs3Name = "test-backupsession-controller3" - ) - var () - BeforeEach(func() { - }) - JustBeforeEach(func() { - }) - It("Should clean up old sessions", func() { - }) - }) -}) diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index 7a125d0..b9d8da1 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,429 +18,45 @@ package controllers import ( "context" - "fmt" - "strings" - "time" - "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" -) - -const ( - RESTORESESSION string = "restoresession" - UPDATESTATUS string = "updatestatus" - jobOwnerKey string = ".metadata.controller" ) // RestoreSessionReconciler reconciles a RestoreSession object type RestoreSessionReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme } -var _ reconcile.Reconciler = &RestoreSessionReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the RestoreSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) -func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := log.FromContext(ctx).WithValues("restoresession", req.NamespacedName) + // TODO(user): your logic here - // Get the RestoreSession - restoreSession := &formolv1alpha1.RestoreSession{} - if err := r.Get(ctx, req.NamespacedName, restoreSession); err != nil { - log.Error(err, "unable to get restoresession") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - log = r.Log.WithValues("restoresession", req.NamespacedName, "version", restoreSession.ObjectMeta.ResourceVersion) - // Get the BackupSession the RestoreSession references - backupSession := &formolv1alpha1.BackupSession{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: restoreSession.Spec.BackupSessionRef.Ref.Name, - }, backupSession); err != nil { - if errors.IsNotFound(err) { - backupSession = &formolv1alpha1.BackupSession{ - Spec: restoreSession.Spec.BackupSessionRef.Spec, - Status: restoreSession.Spec.BackupSessionRef.Status, - } - log.V(1).Info("generated backupsession", "spec", backupSession.Spec, "status", backupSession.Status) - } else { - log.Error(err, "unable to get backupsession", "restoresession", restoreSession.Spec) - return reconcile.Result{}, client.IgnoreNotFound(err) - } - } - // Get the BackupConfiguration linked to the BackupSession - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Spec.Ref.Namespace, - Name: backupSession.Spec.Ref.Name, - }, backupConf); err != nil { - log.Error(err, "unable to get backupConfiguration", "name", backupSession.Spec.Ref, "namespace", backupSession.Namespace) - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - // Helper functions - createRestoreJob := func(target formolv1alpha1.Target, snapshotId string) error { - // TODO: Get the list of existing jobs and see if there is already one scheduled for the target - var jobList batchv1.JobList - if err := r.List(ctx, &jobList, client.InNamespace(restoreSession.Namespace), client.MatchingFields{jobOwnerKey: restoreSession.Name}); err != nil { - log.Error(err, "unable to get job list") - return err - } - log.V(1).Info("Found jobs", "jobs", jobList.Items) - for _, job := range jobList.Items { - if job.Annotations["targetName"] == target.Name && job.Annotations["snapshotId"] == snapshotId { - log.V(0).Info("there is already a cronjob to restore that target", "targetName", target.Name, "snapshotId", snapshotId) - return nil - } - } - restoreSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "TARGET_NAME", - Value: target.Name, - }, - corev1.EnvVar{ - Name: "RESTORESESSION_NAME", - Value: restoreSession.Name, - }, - corev1.EnvVar{ - Name: "RESTORESESSION_NAMESPACE", - Value: restoreSession.Namespace, - }, - } - - output := corev1.VolumeMount{ - Name: "output", - MountPath: "/output", - } - restic := corev1.Container{ - Name: "restic", - Image: backupConf.Spec.Image, - Args: []string{"volume", "restore", "--snapshot-id", snapshotId}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: restoreSessionEnv, - } - finalizer := corev1.Container{ - Name: "finalizer", - Image: backupConf.Spec.Image, - Args: []string{"target", "finalize"}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: restoreSessionEnv, - } - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - var ttl int32 = 300 - restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-%s-", restoreSession.Name, target.Name), - Namespace: restoreSession.Namespace, - Annotations: map[string]string{ - "targetName": target.Name, - "snapshotId": snapshotId, - }, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &ttl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{restic}, - Containers: []corev1.Container{finalizer}, - Volumes: []corev1.Volume{ - corev1.Volume{Name: "output"}, - }, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - for _, step := range target.Steps { - function := &formolv1alpha1.Function{} - // get the backup function - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: step.Name, - }, function); err != nil { - log.Error(err, "unable to get backup function", "name", step.Name) - return err - } - var restoreName string - if function.Annotations["restoreFunction"] != "" { - restoreName = function.Annotations["restoreFunction"] - } else { - restoreName = strings.Replace(step.Name, "backup", "restore", 1) - } - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: restoreName, - }, function); err != nil { - log.Error(err, "unable to get function", "function", step) - return err - } - function.Spec.Name = function.Name - function.Spec.Env = append(function.Spec.Env, restoreSessionEnv...) - function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output) - job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec) - } - if err := ctrl.SetControllerReference(restoreSession, job, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "job", job, "restoresession", restoreSession) - return err - } - log.V(0).Info("creating a restore job", "target", target.Name) - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to create job", "job", job) - return err - } - return nil - } - - deleteRestoreInitContainer := func(target formolv1alpha1.Target) (err error) { - deployment := &appsv1.Deployment{} - if err = r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return err - } - log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name) - newInitContainers := []corev1.Container{} - for _, initContainer := range deployment.Spec.Template.Spec.InitContainers { - if initContainer.Name == RESTORESESSION { - log.V(0).Info("Found our restoresession container. Removing it from the list of init containers", "container", initContainer) - defer func() { - if err = r.Update(ctx, deployment); err != nil { - log.Error(err, "unable to update deployment") - } - }() - } else { - newInitContainers = append(newInitContainers, initContainer) - } - } - deployment.Spec.Template.Spec.InitContainers = newInitContainers - return nil - } - - createRestoreInitContainer := func(target formolv1alpha1.Target, snapshotId string) error { - deployment := &appsv1.Deployment{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return err - } - log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name) - for _, initContainer := range deployment.Spec.Template.Spec.InitContainers { - if initContainer.Name == RESTORESESSION { - log.V(0).Info("there is already a restoresession initcontainer", "deployment", deployment.Spec.Template.Spec.InitContainers) - return nil - } - } - restoreSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.Name, - }, - corev1.EnvVar{ - Name: formolv1alpha1.RESTORESESSION_NAME, - Value: restoreSession.Name, - }, - corev1.EnvVar{ - Name: formolv1alpha1.RESTORESESSION_NAMESPACE, - Value: restoreSession.Namespace, - }, - } - initContainer := corev1.Container{ - Name: RESTORESESSION, - Image: backupConf.Spec.Image, - Args: []string{"volume", "restore", "--snapshot-id", snapshotId}, - VolumeMounts: target.VolumeMounts, - Env: restoreSessionEnv, - } - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - initContainer.Env = append(initContainer.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - deployment.Spec.Template.Spec.InitContainers = append([]corev1.Container{initContainer}, - deployment.Spec.Template.Spec.InitContainers...) - if err := r.Update(ctx, deployment); err != nil { - log.Error(err, "unable to update deployment") - return err - } - - return nil - } - - startNextTask := func() (*formolv1alpha1.TargetStatus, error) { - nextTarget := len(restoreSession.Status.Targets) - if nextTarget < len(backupConf.Spec.Targets) { - target := backupConf.Spec.Targets[nextTarget] - targetStatus := formolv1alpha1.TargetStatus{ - Name: target.Name, - Kind: target.Kind, - SessionState: formolv1alpha1.New, - StartTime: &metav1.Time{Time: time.Now()}, - } - restoreSession.Status.Targets = append(restoreSession.Status.Targets, targetStatus) - switch target.Kind { - case formolv1alpha1.SidecarKind: - log.V(0).Info("Next task is a Sidecard restore", "target", target) - if err := createRestoreInitContainer(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil { - log.V(0).Info("unable to create restore init container", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - case formolv1alpha1.JobKind: - log.V(0).Info("Next task is a Job restore", "target", target) - if err := createRestoreJob(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil { - log.V(0).Info("unable to create restore job", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - } - return &targetStatus, nil - } else { - return nil, nil - } - } - - endTask := func() error { - target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1] - switch target.Kind { - case formolv1alpha1.SidecarKind: - if err := deleteRestoreInitContainer(target); err != nil { - log.Error(err, "unable to delete restore init container") - return err - } - } - return nil - } - - switch restoreSession.Status.SessionState { - case formolv1alpha1.New: - restoreSession.Status.SessionState = formolv1alpha1.Running - if targetStatus, err := startNextTask(); err != nil { - log.Error(err, "unable to start next restore task") - return reconcile.Result{}, err - } else { - log.V(0).Info("New restore. Start the first task", "task", targetStatus.Name) - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - } - case formolv1alpha1.Running: - currentTargetStatus := &restoreSession.Status.Targets[len(restoreSession.Status.Targets)-1] - switch currentTargetStatus.SessionState { - case formolv1alpha1.Failure: - log.V(0).Info("last restore task failed. Stop here", "target", currentTargetStatus.Name) - restoreSession.Status.SessionState = formolv1alpha1.Failure - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - case formolv1alpha1.Running: - log.V(0).Info("task is still running", "target", currentTargetStatus.Name) - return reconcile.Result{}, nil - case formolv1alpha1.Waiting: - target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1] - if target.Kind == formolv1alpha1.SidecarKind { - deployment := &appsv1.Deployment{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return reconcile.Result{}, err - } - - if deployment.Status.ReadyReplicas == *deployment.Spec.Replicas { - log.V(0).Info("The deployment is ready. We can resume the backup") - currentTargetStatus.SessionState = formolv1alpha1.Finalize - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - } else { - log.V(0).Info("Waiting for the sidecar to come back") - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - } else { - log.V(0).Info("not a SidecarKind. Ignoring Waiting") - } - case formolv1alpha1.Success: - _ = endTask() - log.V(0).Info("last task was a success. start a new one", "target", currentTargetStatus, "restoreSession version", restoreSession.ObjectMeta.ResourceVersion) - targetStatus, err := startNextTask() - if err != nil { - return reconcile.Result{}, err - } - if targetStatus == nil { - // No more task to start. The restore is over - restoreSession.Status.SessionState = formolv1alpha1.Success - } - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{RequeueAfter: 300 * time.Millisecond}, nil - } - } - case "": - // Restore session has just been created - restoreSession.Status.SessionState = formolv1alpha1.New - restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()} - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoreSession") - return reconcile.Result{}, err - } - } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *RestoreSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, jobOwnerKey, func(rawObj client.Object) []string { - job := rawObj.(*batchv1.Job) - owner := metav1.GetControllerOf(job) - if owner == nil { - return nil - } - if owner.APIVersion != formolv1alpha1.GroupVersion.String() || owner.Kind != "RestoreSession" { - return nil - } - return []string{owner.Name} - }); err != nil { - return err - } return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.RestoreSession{}). - Owns(&batchv1.Job{}). Complete(r) } diff --git a/controllers/restoresession_controller_test.go b/controllers/restoresession_controller_test.go deleted file mode 100644 index 2a3750c..0000000 --- a/controllers/restoresession_controller_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package controllers - -import ( - "context" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var _ = Describe("Testing RestoreSession controller", func() { - const ( - RSRestoreSessionName = "test-restoresession-controller" - ) - var ( - ctx = context.Background() - key = types.NamespacedName{ - Name: RSRestoreSessionName, - Namespace: TestNamespace, - } - restoreSession = &formolv1alpha1.RestoreSession{} - ) - BeforeEach(func() { - restoreSession = &formolv1alpha1.RestoreSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: RSRestoreSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.RestoreSessionSpec{ - BackupSessionRef: formolv1alpha1.BackupSessionRef{ - Ref: corev1.ObjectReference{ - Name: TestBackupSessionName, - }, - }, - }, - } - }) - Context("Creating a RestoreSession", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, restoreSession) - }, timeout, interval).Should(Succeed()) - realRestoreSession := &formolv1alpha1.RestoreSession{} - Eventually(func() error { - return k8sClient.Get(ctx, key, realRestoreSession) - }, timeout, interval).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realRestoreSession) - return realRestoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - }) - AfterEach(func() { - Expect(k8sClient.Delete(ctx, restoreSession)).Should(Succeed()) - }) - It("Should have a new task and should fail if the task fails", func() { - restoreSession := &formolv1alpha1.RestoreSession{} - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Expect(len(restoreSession.Status.Targets)).Should(Equal(1)) - Expect(restoreSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Running - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.Targets[0].SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Failure)) - }) - It("Should move to the new task if the first one is a success and be a success if all the tasks succeed", func() { - restoreSession := &formolv1alpha1.RestoreSession{} - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Expect(len(restoreSession.Status.Targets)).Should(Equal(1)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, restoreSession) - return len(restoreSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - restoreSession.Status.Targets[1].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Success)) - }) - }) -}) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 0eb694a..88ebe55 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,55 +22,48 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -const ( - TestBackupFuncName = "test-backup-func" - TestFunc = "test-norestore-func" - TestRestoreFuncName = "test-restore-func" - TestNamespace = "test-namespace" - TestRepoName = "test-repo" - TestDeploymentName = "test-deployment" - TestBackupConfName = "test-backupconf" - TestBackupSessionName = "test-backupsession" - TestDataVolume = "data" - TestDataMountPath = "/data" - timeout = time.Second * 10 - interval = time.Millisecond * 250 -) -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment +const ( + NAMESPACE_NAME = "test-namespace" + REPO_NAME = "test-repo" + DEPLOYMENT_NAME = "test-deployment" + CONTAINER_NAME = "test-container" + DATAVOLUME_NAME = "data" + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) var ( namespace = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: TestNamespace, + Name: NAMESPACE_NAME, }, } deployment = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: TestDeploymentName, - Namespace: TestNamespace, + Namespace: NAMESPACE_NAME, + Name: DEPLOYMENT_NAME, }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ @@ -89,239 +82,74 @@ var ( }, Volumes: []corev1.Volume{ corev1.Volume{ - Name: TestDataVolume, + Name: DATAVOLUME_NAME, }, }, }, }, }, } - sa = &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: TestNamespace, - }, - } - secret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: TestNamespace, - }, - Data: map[string][]byte{ - "RESTIC_PASSWORD": []byte("toto"), - "AWS_ACCESS_KEY_ID": []byte("titi"), - "AWS_SECRET_ACCESS_KEY": []byte("tata"), - }, - } - repo = &formolv1alpha1.Repo{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestRepoName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.RepoSpec{ - Backend: formolv1alpha1.Backend{ - S3: formolv1alpha1.S3{ - Server: "raid5.desmojim.fr:9000", - Bucket: "testbucket2", - }, - }, - RepositorySecrets: "test-secret", - }, - } - function = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestFunc, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "norestore-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - }, - } - backupFunc = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestRestoreFuncName, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "restore-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - }, - } - restoreFunc = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupFuncName, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "backup-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: "foo", - Value: "bar", - }, - }, - }, - } - testBackupConf = &formolv1alpha1.BackupConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupConfName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupConfigurationSpec{ - Repository: TestRepoName, - Image: "desmo999r/formolcli:latest", - Schedule: "1 * * * *", - Keep: formolv1alpha1.Keep{ - Last: 2, - }, - Targets: []formolv1alpha1.Target{ - formolv1alpha1.Target{ - Kind: formolv1alpha1.SidecarKind, - Name: TestDeploymentName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestFunc, - }, - }, - Paths: []string{ - TestDataMountPath, - }, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: TestDataVolume, - MountPath: TestDataMountPath, - }, - }, - }, - formolv1alpha1.Target{ - Kind: formolv1alpha1.JobKind, - Name: TestBackupFuncName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestFunc, - }, - formolv1alpha1.Step{ - Name: TestBackupFuncName, - }, - }, - }, - }, - }, - } - testBackupSession = &formolv1alpha1.BackupSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupSessionSpec{ - Ref: corev1.ObjectReference{ - Name: TestBackupConfName, - Namespace: TestNamespace, - }, - }, - } + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc ) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + ctx, cancel = context.WithCancel(context.TODO()) By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, } - cfg, err := testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) err = formolv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - // +kubebuilder:scaffold:scheme + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme, }) - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) err = (&BackupConfigurationReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"), }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&BackupSessionReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("BackupSession"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&RestoreSessionReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") }() - - k8sClient = k8sManager.GetClient() - ctx := context.Background() - Expect(k8sClient).ToNot(BeNil()) - Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) - Expect(k8sClient.Create(ctx, sa)).Should(Succeed()) - Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) - Expect(k8sClient.Create(ctx, repo)).Should(Succeed()) - Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) - Expect(k8sClient.Create(ctx, function)).Should(Succeed()) - Expect(k8sClient.Create(ctx, backupFunc)).Should(Succeed()) - Expect(k8sClient.Create(ctx, restoreFunc)).Should(Succeed()) - Expect(k8sClient.Create(ctx, testBackupConf)).Should(Succeed()) - Expect(k8sClient.Create(ctx, testBackupSession)).Should(Succeed()) - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKey{ - Name: TestBackupSessionName, - Namespace: TestNamespace, - }, testBackupSession) - }, timeout, interval).Should(Succeed()) - testBackupSession.Status.SessionState = formolv1alpha1.Success - testBackupSession.Status.Targets = []formolv1alpha1.TargetStatus{ - formolv1alpha1.TargetStatus{ - Name: TestDeploymentName, - Kind: formolv1alpha1.SidecarKind, - SessionState: formolv1alpha1.Success, - SnapshotId: "12345abcdef", - }, - formolv1alpha1.TargetStatus{ - Name: TestBackupFuncName, - Kind: formolv1alpha1.JobKind, - SessionState: formolv1alpha1.Success, - SnapshotId: "67890ghijk", - }, - } - Expect(k8sClient.Status().Update(ctx, testBackupSession)).Should(Succeed()) -}, 60) +}) var _ = AfterSuite(func() { + cancel() By("tearing down the test environment") err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) }) diff --git a/controllers/suite_test.go~ b/controllers/suite_test.go~ new file mode 100644 index 0000000..762734a --- /dev/null +++ b/controllers/suite_test.go~ @@ -0,0 +1,155 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + //+kubebuilder:scaffold:imports + + //appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +const ( + NAMESPACE_NAME = "test-namespace" + REPO_NAME = "test-repo" + DEPLOYMENT_NAME = "test-deployment" + CONTAINER_NAME = "test-container" + DATAVOLUME_NAME = "data" + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) + +var ( + namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: NAMESPACE_NAME, + }, + } + deployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: NAMESPACE_NAME, + Name: DEPLOYMENT_NAME, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test-deployment"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test-deployment"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + corev1.Container{ + Name: "test-container", + Image: "test-image", + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: DATAVOLUME_NAME, + }, + }, + }, + }, + }, + } + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = formolv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&BackupConfigurationReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/go.mod b/go.mod index 68f2be1..6eb9d3f 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,81 @@ module github.com/desmo999r/formol -go 1.13 +go 1.19 require ( - github.com/go-logr/logr v0.3.0 - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.10.2 - k8s.io/api v0.20.2 - k8s.io/apimachinery v0.20.2 - k8s.io/client-go v0.20.2 - sigs.k8s.io/controller-runtime v0.8.3 + github.com/go-logr/logr v1.2.3 + github.com/onsi/ginkgo/v2 v2.1.4 + github.com/onsi/gomega v1.19.0 + k8s.io/api v0.25.0 + k8s.io/apimachinery v0.25.0 + k8s.io/client-go v0.25.0 + sigs.k8s.io/controller-runtime v0.13.1 +) + +require ( + cloud.google.com/go v0.97.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.27 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.12.2 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.25.0 // indirect + k8s.io/component-base v0.25.0 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 767efde..65b8622 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/main.go b/main.go index 88c58f6..0ae8782 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,16 +20,20 @@ import ( "flag" "os" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - formoldesmojimfrv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" "github.com/desmo999r/formol/controllers" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports ) var ( @@ -38,30 +42,47 @@ var ( ) func init() { - _ = clientgoscheme.AddToScheme(scheme) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - _ = formolv1alpha1.AddToScheme(scheme) - _ = formoldesmojimfrv1alpha1.AddToScheme(scheme) - // +kubebuilder:scaffold:scheme + utilruntime.Must(formolv1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme } func main() { var metricsAddr string var enableLeaderElection bool - flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) flag.Parse() - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - LeaderElection: enableLeaderElection, - LeaderElectionID: "6846258d.desmojim.fr", + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "6846258d.desmojim.fr", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, }) if err != nil { setupLog.Error(err, "unable to start manager") @@ -70,7 +91,6 @@ func main() { if err = (&controllers.BackupConfigurationReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "BackupConfiguration") @@ -78,7 +98,6 @@ func main() { } if err = (&controllers.BackupSessionReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("BackupSession"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "BackupSession") @@ -86,27 +105,21 @@ func main() { } if err = (&controllers.RestoreSessionReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RestoreSession") os.Exit(1) } - // if os.Getenv("ENABLE_WEBHOOKS") != "false" { - // if err = (&formolv1alpha1.BackupSession{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "BackupSession") - // os.Exit(1) - // } - // if err = (&formolv1alpha1.BackupConfiguration{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "BackupConfiguration") - // os.Exit(1) - // } - // if err = (&formoldesmojimfrv1alpha1.Function{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "Function") - // os.Exit(1) - // } - // } - // +kubebuilder:scaffold:builder + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { diff --git a/pkg/rbac/backupconfiguration.go b/pkg/rbac/backupconfiguration.go deleted file mode 100644 index a3d729d..0000000 --- a/pkg/rbac/backupconfiguration.go +++ /dev/null @@ -1,438 +0,0 @@ -package rbac - -import ( - "context" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - formolRole = "formol-sidecar-role" - backupListenerRole = "backup-listener-role" - backupListenerRoleBinding = "backup-listener-rolebinding" - backupSessionCreatorSA = "backupsession-creator" - backupSessionCreatorRole = "backupsession-creator-role" - backupSessionCreatorRoleBinding = "backupsession-creator-rolebinding" - backupSessionStatusUpdaterRole = "backupsession-statusupdater-role" - backupSessionStatusUpdaterRoleBinding = "backupsession-statusupdater-rolebinding" -) - -func DeleteBackupSessionCreatorRBAC(cl client.Client, namespace string) error { - serviceaccount := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, serviceaccount); err == nil { - if err = cl.Delete(context.Background(), serviceaccount); err != nil { - return err - } - } - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func CreateBackupSessionCreatorRBAC(cl client.Client, namespace string) error { - serviceaccount := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, serviceaccount); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), serviceaccount); err != nil { - return err - } - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions/status"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupconfigurations"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: backupSessionCreatorSA, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupSessionCreatorRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteFormolRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - formolRoleBinding := namespace + "-" + saName + "-formol-sidecar-rolebinding" - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: formolRole, - }, - } - if err := cl.Delete(context.Background(), clusterRoleBinding); err != nil { - return client.IgnoreNotFound(err) - } - return nil -} - -func CreateFormolRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"*"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"*"}, - //APIGroups: []string{"formol.desmojim.fr"}, - //Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{""}, - Resources: []string{"pods", "secrets", "configmaps"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments", "replicasets"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Name: formolRole, - }, clusterRole); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), clusterRole); err != nil { - return err - } - } - formolRoleBinding := namespace + "-" + saName + "-formol-rolebinding" - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: formolRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Name: formolRoleBinding, - }, clusterRoleBinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), clusterRoleBinding); err != nil { - return err - } - } - return nil -} - -func CreateBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{""}, - Resources: []string{"pods", "secrets", "configmaps"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments", "replicasets"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"update", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupListenerRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func CreateBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "patch", "update"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions/status", "backupsessions/status"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupListenerRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - return nil -} diff --git a/pkg/utils/.root.go.un~ b/pkg/utils/.root.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..d01deb2076c97073850e7880e7d159e40f367722 GIT binary patch literal 2166 zcmeHIPiqrF6yGMbc0A}w6oqzNtt60cDMgf&9MW`!(qh6+no>lWO&hV`v5AArR+A*RXTm~Wbk)}%A&ykKx z3$Bp9@Orbh7jE0L2AkU(6zR1Jl{^#%3%O$~*v>8wHbF)A!0b68UFp$sg}QzW2-yi@ zx1dLJXylxOSQITHtKAtS+~}8{41OXM9cQN0D&5yWR#ErMwlc7DoNWVy36}m9BBtzoZf$s zoC4in0%N_p#@f|d&19^-Y_7ML7M0(tLlrY?mf6f8l=>p7(~KHb#@6aqVs{3jtEOBB z#4%2Y`KL#OG%^1hC{ahcD}`UCH~**#6J6v78#^Ee8!ma^K=INbM=3E_F|kcEJ^T_a_$H$c1n4IrBRs))0v;fLA>kpx zGWG#QXO&24$BAeSbijI!8xL*V?j?Fmc3vQh0Pm92I=&YN$PNNZ)n*2^v+k8Ot$>kurZ>;H37}~ G^6VENZxDq5 literal 0 HcmV?d00001 diff --git a/pkg/utils/root.go b/pkg/utils/root.go index 565b692..027ca5b 100644 --- a/pkg/utils/root.go +++ b/pkg/utils/root.go @@ -1,10 +1,8 @@ package utils import ( - "fmt" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" corev1 "k8s.io/api/core/v1" - "strings" ) func ContainsString(slice []string, s string) bool { @@ -29,29 +27,5 @@ func RemoveString(slice []string, s string) (result []string) { func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { env := []corev1.EnvVar{} // S3 backing storage - if (formolv1alpha1.S3{}) != repo.Spec.Backend.S3 { - url := fmt.Sprintf("s3:http://%s/%s/%s-%s", repo.Spec.Backend.S3.Server, repo.Spec.Backend.S3.Bucket, strings.ToUpper(backupConf.Namespace), strings.ToLower(backupConf.Name)) - env = append(env, corev1.EnvVar{ - Name: "RESTIC_REPOSITORY", - Value: url, - }) - for _, key := range []string{ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "RESTIC_PASSWORD", - } { - env = append(env, corev1.EnvVar{ - Name: key, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: repo.Spec.RepositorySecrets, - }, - Key: key, - }, - }, - }) - } - } return env } diff --git a/pkg/utils/root.go~ b/pkg/utils/root.go~ new file mode 100644 index 0000000..dd17272 --- /dev/null +++ b/pkg/utils/root.go~ @@ -0,0 +1,33 @@ +package utils + +import ( + "fmt" + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "strings" +) + +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return +} + +func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { + env := []corev1.EnvVar{} + // S3 backing storage + return env +} diff --git a/test/.00-setup.yaml.un~ b/test/.00-setup.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..343bbecbbf34a12beb19474c4bf80965c6dbe956 GIT binary patch literal 12972 zcmeI2%WoS+9LJqBp%0q$-BMDiD?mLU?AoS|Vsk(tPlv>5oOu1Kk4WBdGee|1| z-^{3%{Y9=m>HHDupW1mp_xvX>-+lG=OC$Y_?|=IJ?&aUgpZ=|lXKsIXa42Ev}imN1tNKpmBgHJGves)N@S9ql?|3?q# z+aKl`j!@{Ir|7+K{VCF~Ng0xE?GdW^iZqmXkAVXykmdr2C;1-*RosjuCW-n}KfURk2#$vtJ zPAL_w=9)e^ka|*R9Vkk2fMfGfs*H%KOWfsEv|-Ny?Wk0jj%9Nj4yMJ5dRayb-AA+aG_+!1Z^)@%hI$ z^i_a6--$c7q3INSt1V5}Y^TO&D}aU7X)XYn=6@tS?uo}NyPl-ySA=({+}7{?{FgY| z&C-P&Dd2T*;uUke#_uWt=`<-90L+jwkP;eq%VU=CKbN<-L8S=oUlG~@U0fvnkrWYk zYFESw%U|GghLj5!E|JQmXzr1Nl#jw~N914eEMV#mn`ydUUm=r}Lz8}P!W-SFBzJ0k42=UCwL|hvlqSH2?EQ`LC=Q= zSUASQ1t0;Bjf@f=cS}$u!F%>$p?B_qq35#$tBXR*z!B`&98|IsJQIl7i{04=vGdUZ z)zd<&fD!0e+9%Noo(aV4g^qUcu$bvDs&x8HpBzYy2#o?okVEn(kB7ktfl4oOr}vZd z!2uLMV2dUKB4#>6_#;COUI9}q{t4~L3VcN8BlNqNcrvoOfEPJMQjI(DfJa5X57ro2sp{*{qwiHW0C#tA;~M z1x?+lSFF{Wu2ZM`hdOsFivQIA>);|s)@xy+M1u3 z+bk9`TSh$DE={CI)3sb;iSActrW1Oun8?-UCX8q;z7(rQ)7GX{%E#8U{3hPBUdm6m z%K7Pbv7K1g^3hf?pQ!4#I$nyids@YK(!(>wf*RG>yBo7=oj`1@##Ulk*UTkdbw$nQ yrxLDOHD)sTjGA9j)$&whvNZXMyHK0DrEQH{l|n4Zp>X#oN;n**N{Y%qzxxliAamsa literal 0 HcmV?d00001 diff --git a/test/.01-deployment.yaml.un~ b/test/.01-deployment.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9024c4b7a7103a605a5045a481e276da23514416 GIT binary patch literal 1928 zcmWH`%$*;a=aT=FfvKJUqHdPQH$^+&k8d~Li#cTfbH!JSGJC<#s|-EGxLk!97#L!J zSXu!JGIJBtQ>_&8(lhfatPJ%GP4tX_(vm>T55$Z>%mTz90HR@-A^Cc5Jqt*dnIZlH zNEQf~AQYGcNr}Mt|A7E(6ii^0rg8`*U+>WcM+YOrR|%lmpr98A;%FcSMFkrqw&8MO zK(QzwW-x68M;|06K>`o}ib{|g4iI-V`aq#TO8|_%tKjHkf<&Jv&@xc;fpRJ+YD8hV zH7_w2m|2Tb)02x*Q=oANiYr(i1u}s-5|MvL;|>%eGzGwtumT)+43M}3Wj`q(hQ%Ge zk`@|+pcq32px6YJz=|Nj(cA|L1zG}N4Aw#GGH?Nj9)qf|+y|{IQ}UCuQ;YO6^YxKy JP1DBbs{m=5U>yJe literal 0 HcmV?d00001 diff --git a/test/.02-backupconf.yaml.un~ b/test/.02-backupconf.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9615446d689186f3bf6b31290ccdaddd340f89b0 GIT binary patch literal 9847 zcmeI2O^6&t6o4n2-Hj%je>E{NNtq}Ta12qS?A!#sDG2IbrR}NNZD#t%p5Be)O%OeL z@!&-S@g&B-ix)vbkAlJ?3VIU}JPBq`3hVo-t7dx!GYm8Itsc}=S66pU&->nc^{RT- zqWHZRk|)#O8w>wF`ugq@Z(rW|`K`{4?D0>&y7|#}PoKT_@;~4F_}ibo?o*#O8jVX* zPAiq`EbT`{nh#p$xEV)DluF-0DJP}ulX9mNgxZxx`0ImjF4DWyc<~2%MHb4+eA0DT z$|9{@5i=AGFEwZ&-2Qvl;Ak{kW1UbUK^Cp)ydR}WOKn}XZU?9(F0%qK56)?$_FBMQ90zw)zSV`xMZlRr3Q~HQ zFdG&gu$F0C0VLOWuX7rywH9~}RpH?`Oji!x8Q>*BEYUJqkCIoKZJqVfL9CO)0tDEB z$}<8JfMh*>(nATgiU7Pi2_XDb5#;N-n1~TRB_~ zFNN7}*>EhwD1FDJ;G-^#g6nqLRzS)9Mm#>Ji5Y7&>9os=;DjA#%^eHA_pA80;BF}o zNWrrXg0ElbtlyH>JtfSh4XkkxC529cB-G6`k^Z|z`>#jEMpGwS(qqCgX>94suBVfy zvGkrBy>DdPPG@gmr8-d(^rBZKsu%gDwvim0F;QD;gkQ`r=Lh%@fD-1BX>90&Nj9`n z(j8jS`Lexd4I{jTnRGKL`Yjs=;KZJX6Bu%%j{{C4^VD;5lWh#d z`E*;yX|o*^fu#p67Ftdq2|bhC`Aop6WyX3MdLK{nx?#|a@wsP%Vxy(5RPedYM)!Q6 zbYANxVW*W@Ycy%5AF0n&j!ERqc}e6x8^?E6I?1)!AT)_wCe%zKci6dNm7v?@kgJWb zr1s&{Dr4?y8iJosB52d7A;Y63Ljr1*qT2{@56q(Lf@}CN>NGK9jV4VxAvLb$b{_CU zb1Z)Nb(P|I>2T`j#sf-qwa!>g9Rgr4=oc+@O`3*pJBWJn8t!3f8y~%{b=RFZ!u4^Q z6dRm*%w00jLC%Rs>^$mL+0t)>U9n+Sz2&RA$ba*J~=WSfM%*St;j&`?R85^nFjW1uaRh)&6up?-uV$4OidA^MKyJ&?MC z>#RVL1>`x}o*Y`JQ^KF#{mYQsUy{QgSqJi}26C5_isE2hHjLW3AE&FUtNE2qn#XA` z>_x3!Q0TsUApkl4+CZv|K$W3P+ml2kbxQcl_g@;4izP|73*$)M@}DX;gjSu9=LDt4 zS0}NNvE{!&Zy0<_)8o@|Rtzk@>g)$fyw2krQE{CO@xCt%KAWUW<9#}d!S~09{{aog B-&z0w literal 0 HcmV?d00001 diff --git a/test/00-setup.yaml b/test/00-setup.yaml index f6d11b1..665603d 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -6,6 +6,15 @@ metadata: --- apiVersion: v1 kind: Secret +metadata: + name: regcred + namespace: demo +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 +--- +apiVersion: v1 +kind: Secret metadata: namespace: demo name: demo-chap-secret @@ -35,16 +44,9 @@ spec: storage: 50Mi accessModes: - ReadWriteOnce - iscsi: - targetPortal: 192.168.1.159 - iqn: iqn.2020-08.raid5:demo - lun: 1 - fsType: ext4 - readOnly: false - chapAuthDiscovery: true - chapAuthSession: true - secretRef: - name: demo-chap-secret + hostPath: + path: /tmp/demo + type: DirectoryOrCreate --- apiVersion: v1 kind: PersistentVolumeClaim @@ -71,6 +73,16 @@ data: --- apiVersion: formol.desmojim.fr/v1alpha1 kind: Repo +metadata: + name: repo-empty + namespace: demo +spec: + backend: + nfs: "toto" + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo metadata: name: repo-minio namespace: demo diff --git a/test/00-setup.yaml~ b/test/00-setup.yaml~ new file mode 100644 index 0000000..b62e6b9 --- /dev/null +++ b/test/00-setup.yaml~ @@ -0,0 +1,173 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: demo +--- +apiVersion: v1 +kind: Secret +metadata: + name: regcred + namespace: demo +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: demo + name: demo-chap-secret +type: "kubernetes.io/iscsi-chap" +data: + discovery.sendtargets.auth.username: ZGVtbw== + discovery.sendtargets.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= + node.session.auth.username: ZGVtbw== + node.session.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: demo + name: with-envfrom-secret +data: + title: dmVyeXNlY3JldA== +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: demo-pv + namespace: demo +spec: + storageClassName: manual + capacity: + storage: 50Mi + accessModes: + - ReadWriteOnce + hostPath: + path: /tmp/demo + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: demo-pvc + namespace: demo +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-minio + namespace: demo +data: + RESTIC_PASSWORD: bHIyOXhtOTU= + AWS_ACCESS_KEY_ID: OWFTSXZBSEVzWlNVMmkyTU9zVGxWSk1lL1NjPQ== + AWS_SECRET_ACCESS_KEY: WVN5ck9ncVllcjBWNFNLdlVOcmx2OGhjTllhZGZuN2xaNjBIaXRlL3djWT0= +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-empty + namespace: demo +spec: + backend: + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-minio + namespace: demo +spec: + backend: + s3: + server: raid5.desmojim.fr:9000 + bucket: testbucket2 + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: restore-pg + namespace: demo +spec: + name: restore-pg + image: desmo999r/formolcli:latest + args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] + env: + - name: PGHOST + value: postgres + - name: PGDATABASE + value: demopostgres + - name: PGUSER + value: demopostgres + - name: PGPASSWD + value: password123! +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-envfrom + namespace: demo +spec: + name: with-envfrom + command: ["touch", $(title)] + envFrom: + - secretRef: + name: with-envfrom-secret +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-env + namespace: demo +spec: + name: with-env + command: ["touch", $(TESTFILE)] + env: + - name: TESTFILE + value: /data/testfile +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: backup-pg + namespace: demo +spec: + name: backup-pg + image: desmo999r/formolcli:latest + args: ["postgres", "backup", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] + env: + - name: PGHOST + value: postgres + - name: PGDATABASE + value: demopostgres + - name: PGUSER + value: demopostgres + - name: PGPASSWD + value: password123! +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-off + namespace: demo +spec: + name: maintenance-off + command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-on + namespace: demo +spec: + name: maintenance-on + command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] diff --git a/test/01-deployment.yaml b/test/01-deployment.yaml index eff5dd1..a08040e 100644 --- a/test/01-deployment.yaml +++ b/test/01-deployment.yaml @@ -18,9 +18,11 @@ spec: labels: app: nginx spec: + imagePullSecrets: + - name: regcred containers: - name: nginx - image: nginx:1.14.2 + image: docker.io/nginx:1.23.3 ports: - containerPort: 80 volumeMounts: diff --git a/test/01-deployment.yaml~ b/test/01-deployment.yaml~ new file mode 100644 index 0000000..f6e9cc3 --- /dev/null +++ b/test/01-deployment.yaml~ @@ -0,0 +1,92 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: demo + labels: + app: nginx +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + imagePullSecrets: + - name: regcred + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + volumeMounts: + - name: demo-data + mountPath: /data + volumes: + - name: demo-data + persistentVolumeClaim: + claimName: demo-pvc +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config-demo + namespace: demo + labels: + app: postgres +data: + POSTGRES_DB: demopostgres + POSTGRES_USER: demopostgres + POSTGRES_PASSWORD: password123! +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: demo + labels: + app: postgres +spec: + ports: + - port: 5432 + name: postgres + clusterIP: None + selector: + app: postgres +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres-demo + namespace: demo +spec: + serviceName: "postgres" + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:12 + envFrom: + - configMapRef: + name: postgres-config-demo + ports: + - containerPort: 5432 + name: postgredb + volumeMounts: + - name: postgredb + mountPath: /var/lib/postgresql/data + volumes: + - name: postgredb diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index f037c82..2ef6b15 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -6,31 +6,30 @@ metadata: namespace: demo spec: suspend: true - image: desmo999r/formolcli:latest + image: desmo999r/formolcli:0.3.2 repository: repo-minio schedule: "15 * * * *" - targets: - - kind: Sidecar - apiVersion: v1 - name: nginx-deployment - steps: - - name: maintenance-on - - name: with-env - - name: with-envfrom - - name: maintenance-off - finalize: true - volumeMounts: - - name: demo-data - mountPath: /data - paths: - - /data -# - kind: Job -# name: backup-pg -# steps: -# - name: backup-pg keep: last: 5 daily: 2 weekly: 2 monthly: 6 yearly: 3 + targets: + - backupType: Online + targetKind: Deployment + targetName: nginx-deployment + containers: + - name: nginx + steps: + - name: maintenance-on + - name: with-env + - name: with-envfrom + - name: maintenance-off + finalize: true + paths: + - /data +# - kind: Job +# name: backup-pg +# steps: +# - name: backup-pg diff --git a/test/02-backupconf.yaml~ b/test/02-backupconf.yaml~ new file mode 100644 index 0000000..d9d4d03 --- /dev/null +++ b/test/02-backupconf.yaml~ @@ -0,0 +1,35 @@ +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: BackupConfiguration +metadata: + name: backup-demo + namespace: demo +spec: + suspend: true + image: desmo999r/formolcli:0.3.2 + repository: repo-empty + schedule: "15 * * * *" + keep: + last: 5 + daily: 2 + weekly: 2 + monthly: 6 + yearly: 3 + targets: + - backupType: Online + targetKind: Deployment + targetName: nginx-deployment + containers: + - name: nginx + steps: + - name: maintenance-on + - name: with-env + - name: with-envfrom + - name: maintenance-off + finalize: true + paths: + - /data +# - kind: Job +# name: backup-pg +# steps: +# - name: backup-pg