From 6d83e5917168be8b43dd4558556571f198932e0f Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 3 Feb 2023 18:23:41 +0100 Subject: [PATCH 01/69] snapshot initial commit with some BackupConfiguration and BackupSession controllers --- .gitignore | 2 + Dockerfile | 13 +- LICENSE | 201 -------- Makefile | 121 +++-- README.md | 93 +++- .../.backupconfiguration_types.go.un~ | Bin 0 -> 6508 bytes api/v1alpha1/.backupsession_types.go.un~ | Bin 0 -> 7478 bytes api/v1alpha1/.common.go.un~ | Bin 0 -> 5425 bytes api/v1alpha1/.function_types.go.un~ | Bin 0 -> 7007 bytes api/v1alpha1/.repo_types.go.un~ | Bin 0 -> 1033 bytes api/v1alpha1/backupconfiguration_types.go | 93 ++-- api/v1alpha1/backupconfiguration_types.go~ | 115 +++++ api/v1alpha1/backupsession_types.go | 46 +- api/v1alpha1/backupsession_types.go~ | 89 ++++ api/v1alpha1/common.go | 34 -- api/v1alpha1/common.go~ | 6 + api/v1alpha1/function_types.go | 17 +- api/v1alpha1/function_types.go~ | 64 +++ api/v1alpha1/groupversion_info.go | 2 +- api/v1alpha1/repo_types.go | 56 ++- api/v1alpha1/repo_types.go~ | 109 +++++ api/v1alpha1/restoresession_types.go | 38 +- api/v1alpha1/zz_generated.deepcopy.go | 118 +++-- config/certmanager/certificate.yaml | 26 - config/certmanager/kustomization.yaml | 5 - config/certmanager/kustomizeconfig.yaml | 16 - config/crd/kustomization.yaml | 21 +- config/crd/kustomizeconfig.yaml | 6 +- .../cainjection_in_backupconfigurations.yaml | 1 - .../cainjection_in_backupsessions.yaml | 1 - .../crd/patches/cainjection_in_functions.yaml | 1 - config/crd/patches/cainjection_in_repoes.yaml | 7 + .../cainjection_in_restoresessions.yaml | 3 +- .../webhook_in_backupconfigurations.yaml | 10 +- .../patches/webhook_in_backupsessions.yaml | 20 +- config/crd/patches/webhook_in_functions.yaml | 19 +- config/crd/patches/webhook_in_repoes.yaml | 16 + .../patches/webhook_in_restoresessions.yaml | 21 +- config/default/kustomization.yaml | 20 +- config/default/manager_auth_proxy_patch.yaml | 40 +- config/default/manager_config_patch.yaml | 10 + config/default/manager_webhook_patch.yaml | 23 - config/default/webhookcainjection_patch.yaml | 15 - config/manager/kustomization.yaml | 8 - config/manager/manager.yaml | 39 -- config/prometheus/kustomization.yaml | 2 - config/prometheus/monitor.yaml | 16 - .../rbac/auth_proxy_client_clusterrole.yaml | 7 - config/rbac/auth_proxy_role.yaml | 13 - config/rbac/auth_proxy_role_binding.yaml | 12 - config/rbac/auth_proxy_service.yaml | 14 - .../rbac/backupconfiguration_editor_role.yaml | 24 - .../rbac/backupconfiguration_viewer_role.yaml | 20 - config/rbac/backupsession_editor_role.yaml | 24 - config/rbac/backupsession_viewer_role.yaml | 20 - config/rbac/function_editor_role.yaml | 24 - config/rbac/function_viewer_role.yaml | 20 - config/rbac/kustomization.yaml | 12 - config/rbac/leader_election_role.yaml | 32 -- config/rbac/leader_election_role_binding.yaml | 12 - config/rbac/role_binding.yaml | 12 - ...l.desmojim.fr_v1alpha1_restoresession.yaml | 7 - .../formol_v1alpha1_backupconfiguration.yaml | 21 +- .../formol_v1alpha1_backupsession.yaml | 12 +- config/samples/formol_v1alpha1_function.yaml | 15 +- config/samples/formol_v1alpha1_repo.yaml | 15 +- .../formol_v1alpha1_restoresession.yaml | 12 + config/samples/test_deployment.yaml | 28 -- config/webhook/kustomization.yaml | 6 - config/webhook/kustomizeconfig.yaml | 25 - config/webhook/service.yaml | 12 - .../.backupconfiguration_controller.go.un~ | Bin 0 -> 35950 bytes ...kupconfiguration_controller_cronjob.go.un~ | Bin 0 -> 7322 bytes ...kupconfiguration_controller_sidecar.go.un~ | Bin 0 -> 26628 bytes ...backupconfiguration_controller_test.go.un~ | Bin 0 -> 15460 bytes controllers/.backupsession_controller.go.un~ | Bin 0 -> 2760 bytes controllers/.suite_test.go.un~ | Bin 0 -> 7724 bytes controllers/backupconfiguration_controller.go | 358 ++------------ .../backupconfiguration_controller.go~ | 129 +++++ .../backupconfiguration_controller_cronjob.go | 103 ++++ ...backupconfiguration_controller_cronjob.go~ | 102 ++++ .../backupconfiguration_controller_sidecar.go | 137 ++++++ ...backupconfiguration_controller_sidecar.go~ | 134 ++++++ .../backupconfiguration_controller_test.go | 204 ++++---- .../backupconfiguration_controller_test.go~ | 165 +++++++ controllers/backupsession_controller.go | 453 +----------------- controllers/backupsession_controller.go~ | 62 +++ controllers/backupsession_controller_test.go | 147 ------ controllers/restoresession_controller.go | 420 +--------------- controllers/restoresession_controller_test.go | 95 ---- controllers/suite_test.go | 274 ++--------- controllers/suite_test.go~ | 155 ++++++ go.mod | 84 +++- hack/boilerplate.go.txt | 2 +- main.go | 81 ++-- pkg/rbac/backupconfiguration.go | 438 ----------------- pkg/utils/.root.go.un~ | Bin 0 -> 2166 bytes pkg/utils/root.go | 26 - pkg/utils/root.go~ | 33 ++ test/.00-setup.yaml.un~ | Bin 0 -> 12972 bytes test/.01-deployment.yaml.un~ | Bin 0 -> 1928 bytes test/.02-backupconf.yaml.un~ | Bin 0 -> 9847 bytes test/00-setup.yaml | 32 +- test/00-setup.yaml~ | 173 +++++++ test/01-deployment.yaml | 4 +- test/01-deployment.yaml~ | 92 ++++ test/02-backupconf.yaml | 39 +- test/02-backupconf.yaml~ | 35 ++ 108 files changed, 2665 insertions(+), 3274 deletions(-) delete mode 100644 LICENSE create mode 100644 api/v1alpha1/.backupconfiguration_types.go.un~ create mode 100644 api/v1alpha1/.backupsession_types.go.un~ create mode 100644 api/v1alpha1/.common.go.un~ create mode 100644 api/v1alpha1/.function_types.go.un~ create mode 100644 api/v1alpha1/.repo_types.go.un~ create mode 100644 api/v1alpha1/backupconfiguration_types.go~ create mode 100644 api/v1alpha1/backupsession_types.go~ create mode 100644 api/v1alpha1/common.go~ create mode 100644 api/v1alpha1/function_types.go~ create mode 100644 api/v1alpha1/repo_types.go~ delete mode 100644 config/certmanager/certificate.yaml delete mode 100644 config/certmanager/kustomization.yaml delete mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_repoes.yaml create mode 100644 config/crd/patches/webhook_in_repoes.yaml create mode 100644 config/default/manager_config_patch.yaml delete mode 100644 config/default/manager_webhook_patch.yaml delete mode 100644 config/default/webhookcainjection_patch.yaml delete mode 100644 config/manager/kustomization.yaml delete mode 100644 config/manager/manager.yaml delete mode 100644 config/prometheus/kustomization.yaml delete mode 100644 config/prometheus/monitor.yaml delete mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/auth_proxy_role_binding.yaml delete mode 100644 config/rbac/auth_proxy_service.yaml delete mode 100644 config/rbac/backupconfiguration_editor_role.yaml delete mode 100644 config/rbac/backupconfiguration_viewer_role.yaml delete mode 100644 config/rbac/backupsession_editor_role.yaml delete mode 100644 config/rbac/backupsession_viewer_role.yaml delete mode 100644 config/rbac/function_editor_role.yaml delete mode 100644 config/rbac/function_viewer_role.yaml delete mode 100644 config/rbac/kustomization.yaml delete mode 100644 config/rbac/leader_election_role.yaml delete mode 100644 config/rbac/leader_election_role_binding.yaml delete mode 100644 config/rbac/role_binding.yaml delete mode 100644 config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml create mode 100644 config/samples/formol_v1alpha1_restoresession.yaml delete mode 100644 config/samples/test_deployment.yaml delete mode 100644 config/webhook/kustomization.yaml delete mode 100644 config/webhook/kustomizeconfig.yaml delete mode 100644 config/webhook/service.yaml create mode 100644 controllers/.backupconfiguration_controller.go.un~ create mode 100644 controllers/.backupconfiguration_controller_cronjob.go.un~ create mode 100644 controllers/.backupconfiguration_controller_sidecar.go.un~ create mode 100644 controllers/.backupconfiguration_controller_test.go.un~ create mode 100644 controllers/.backupsession_controller.go.un~ create mode 100644 controllers/.suite_test.go.un~ create mode 100644 controllers/backupconfiguration_controller.go~ create mode 100644 controllers/backupconfiguration_controller_cronjob.go create mode 100644 controllers/backupconfiguration_controller_cronjob.go~ create mode 100644 controllers/backupconfiguration_controller_sidecar.go create mode 100644 controllers/backupconfiguration_controller_sidecar.go~ create mode 100644 controllers/backupconfiguration_controller_test.go~ create mode 100644 controllers/backupsession_controller.go~ delete mode 100644 controllers/backupsession_controller_test.go delete mode 100644 controllers/restoresession_controller_test.go create mode 100644 controllers/suite_test.go~ delete mode 100644 pkg/rbac/backupconfiguration.go create mode 100644 pkg/utils/.root.go.un~ create mode 100644 pkg/utils/root.go~ create mode 100644 test/.00-setup.yaml.un~ create mode 100644 test/.01-deployment.yaml.un~ create mode 100644 test/.02-backupconf.yaml.un~ create mode 100644 test/00-setup.yaml~ create mode 100644 test/01-deployment.yaml~ create mode 100644 test/02-backupconf.yaml~ diff --git a/.gitignore b/.gitignore index 871807a..d181f1b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +*~ + # Binaries for programs and plugins *.exe *.exe~ diff --git a/Dockerfile b/Dockerfile index f8b22c7..8f9cca1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,7 @@ # Build the manager binary -FROM golang:alpine as builder +FROM golang:1.19 as builder +ARG TARGETOS +ARG TARGETARCH WORKDIR /workspace # Copy the Go Modules manifests @@ -12,17 +14,20 @@ RUN go mod download # Copy the go source COPY main.go main.go COPY api/ api/ -COPY pkg/ pkg/ COPY controllers/ controllers/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -a -o manager main.go +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details FROM gcr.io/distroless/static:nonroot WORKDIR / COPY --from=builder /workspace/manager . -USER nonroot:nonroot +USER 65532:65532 ENTRYPOINT ["/manager"] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile index 1998ac6..9116f85 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # Image URL to use all building/pushing image targets -IMG ?= desmo999r/formolcontroller:0.3.0 -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false,crdVersions=v1" +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.25.0 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -12,11 +12,11 @@ GOBIN=$(shell go env GOBIN) endif # Setting SHELL to bash allows bash commands to be executed by recipes. -# This is a requirement for 'setup-envtest.sh' in the test target. # Options are set to exit when a recipe line exits non-zero or a piped command fails. SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec +.PHONY: all all: build ##@ General @@ -32,79 +32,126 @@ all: build # More info on the awk command: # http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Development +.PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases +.PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." +.PHONY: fmt fmt: ## Run go fmt against code. go fmt ./... +.PHONY: vet vet: ## Run go vet against code. go vet ./... -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin -test: manifests generate fmt vet ## Run tests. - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.8.3/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out ##@ Build -build: generate fmt vet ## Build manager binary. +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. go build -o bin/manager main.go +.PHONY: run run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go +# If you wish built the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build docker-build: test ## Build docker image with the manager. - podman build --disable-compression --format=docker . -t ${IMG} + docker build -t ${IMG} . +.PHONY: docker-push docker-push: ## Push docker image with the manager. - podman push ${IMG} + docker push ${IMG} -docker: docker-build docker-push +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross ##@ Deployment +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - -uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +.PHONY: deploy deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - -undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/default | kubectl delete -f - +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +##@ Build Dependencies -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef +## Tool Versions +KUSTOMIZE_VERSION ?= v3.8.7 +CONTROLLER_TOOLS_VERSION ?= v0.10.0 + +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. +$(KUSTOMIZE): $(LOCALBIN) + @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ + echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ + rm -rf $(LOCALBIN)/kustomize; \ + fi + test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest diff --git a/README.md b/README.md index aa6fa67..6864950 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,94 @@ # formol +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started +You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. +**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +### Running on the cluster +1. Install Instances of Custom Resources: + +```sh +kubectl apply -f config/samples/ +``` + +2. Build and push your image to the location specified by `IMG`: + +```sh +make docker-build docker-push IMG=/formol:tag +``` + +3. Deploy the controller to the cluster with the image specified by `IMG`: + +```sh +make deploy IMG=/formol:tag +``` + +### Uninstall CRDs +To delete the CRDs from the cluster: + +```sh +make uninstall +``` + +### Undeploy controller +UnDeploy the controller to the cluster: + +```sh +make undeploy +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +### How it works +This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) + +It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) +which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster + +### Test It Out +1. Install the CRDs into the cluster: + +```sh +make install +``` + +2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +```sh +make run +``` + +**NOTE:** You can also run this in one step by running: `make install run` + +### Modifying the API definitions +If you are editing the API definitions, generate the manifests such as CRs or CRDs using: + +```sh +make manifests +``` + +**NOTE:** Run `make --help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. -My k8s backup solution diff --git a/api/v1alpha1/.backupconfiguration_types.go.un~ b/api/v1alpha1/.backupconfiguration_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..8e215c26710e52b56efa013a3dd498ef89a91286 GIT binary patch literal 6508 zcmeI$ze~eF6bJBYtUs!4aTK*$W3{dhF3y4#6hx@21(j04rZ#nObksq25p?#S@L%XC zxF{(72Xxl=JumeQ&0Po2=?-4=AZd8K4|lm_w|0D2uCE2}!n-NHW^zm0{%t4sbaDSy zI(QsU7q*tm#m$#Ow)jyMLWC5Hshz_{5Y}2j^DG~>nzed0UpWec`l{dTp71MlWr<>t zB0-U*z`!vIvH#q@PU5U5_S!g0!}~eNU!00=BED$ITd@mV`j;Ibj)vqA;&Xy-yiTz} z0d-LHDZg?qG4YQ9if91VVMg7sel%e&$Z2ev#mW+PNmepkoh56C-PB}Rkd}r8Sy6%x zi*hu;>L{abSU(cwiEfRZmz4TPtCED=5Gxp}PLegmZfcVBrKEO`q#yw|%!&o7gJcb{ zo0=f!B&2Sys~q7r!fJ&n9I=;j+gCKf>LACKZfVU+N6jmXVgy@?RSHta$QoidH8IY{ zG0MHHUr~xs8)cP3)G4xt*iB7}GjWR8#dZ;aHpU7Cs6%88v74F@b8(2=5N2ssCp?`Y zYlz*{WH=pX$fhC4GOSE+IzrYEyD1T#%8gB;_jkFeIKtnB6k^dQSQHCQCx}k`%OJr^ p-PUw6PO$qVk9UjC!#5~wWL^LO literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.backupsession_types.go.un~ b/api/v1alpha1/.backupsession_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..a0433fe1ba5ebb94a6e220659ba125b7020a746f GIT binary patch literal 7478 zcmeI1&ubGw6vsDd)Q(yeY^(TlyI^g#&6E*?oJx9zDMnUuu4K z%5h_4>D!yN*SBvyem4E&?Ob{=UA(e(|G|q_7eAee=D#|QbAw#jCJM^ryJP~RawlBB z@Uc>&OvSnLPJUsCMxQQ53=kNd}{4KD-f=qKO}HC&iB1EH>BS~E_h~{ag{ms zNI(Om3b`@>#CHTl3r)#lY9wfG9<-YhL6t?k6QniQTREFl>rFps)uSN1SC4sn)o-ob zMI!O>B|ois5-K2~bd>?Hg{ssrKK6q;%QnCi3^!{_2Vcun6U1!TkLe66UZy~}MO!xT zTBMpFWzf zbFvrCqpZ+l3+QdfywX5+l-xdxDn$kv|ApqPPII@EA3q8N2Uyr_^WRTUA1Uq06Uatz zfi&Q(k&CBMTaZeY4UUpsxhKgLxk*uo zoz)gks`Ej_lU5M&B}92;2zY3_D+5?80nlw3LIk5mFy9N9L-{bh++8&fq+XF&GN9sI zws$yC!?@8Ks}BNX<<~0`$PWVo_wV^#~D+8r9=|{aA$b az$nf?aU$X#MP&fTr`ITMp$6@jmp=jh6MG5( literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.common.go.un~ b/api/v1alpha1/.common.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..7bba67b2a3a39314247af8f6bf37603553553965 GIT binary patch literal 5425 zcmeI0ze^lJ6vxNAON_s&AW0zLKnh7^B`TOMAf!vcl*--Ss7uaWxm^Vd69iGaR4GD= z^x_|r`bt5BbSaXQ=6&D3*?X75%IW68+{}D;cIWXv^WHA^KHdGkp1sbmO=EfK-P`fa z{gp3&e?ETx_4CQEXRW!}=987RkC&g`G_Q_~F*DS(jnQOU1oXa{WN`MXg{w`o`9l^o zq(_HKXl%D+v@lw*2qc4(4aQ;2@2CIm!Wb5h8B;}UxB$pTG1ar-qBy#*2oR>NJ25f5 z5d;DbpjyfeWK*4QoL?h%sg-ef4-p`62O3-*dU`eop22C=2snUxpgaJ?8e{~gA`@gt z;LaJ`SXCH34o1MxY8cpGph0`6`~g5{j5UNfB*mh literal 0 HcmV?d00001 diff --git a/api/v1alpha1/.function_types.go.un~ b/api/v1alpha1/.function_types.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..09f8d8013fce7f131c216e83502f45bc7a0fbe25 GIT binary patch literal 7007 zcmeI0&u`N(6vxwTFbV?>fIytydf)_|1{z2c2dXuJN>ww~4upi*LgRE{O-c@7jTC6GN+gyS}tTKTeAG(xVG~?{@Sm;;s-Al!z%x3 zjI5sh*qe?%oER-UdiL`A^YH`LjaN@zznOUd>GGGS#dj-)Va#AwQvn|i$_gseY_vab zJ{+UB1!HMbHW-m574+&*`uqb^Qj=Eb@;`e3arg-i!}xv-`zF^P!F&PpubRVY&CKHj z8bKPR7BUgQ)0nB=(9p;Wc_xImu0or}!Omc=(4YxMf-$U-&KI zLK~>iQbaT;At)D{J=O*Qs~@(7&qMjRd@JVRY$?WdOaz#8 zoFuKp)DwA?2=%@S^(YRMx}Tt)o0AGF*p%}h~4h{5p&A` zm2dzdTL;f)T{ngS?}OvI5cFdKE6y5Ie(CWD#0pEB3Jy0J#4Pfiu74LhlWT(3z_{|g zv_ABjP#lu4iECnGJH;Ck6Jgn-OofnXOo~JDHM20ypmcvbXkF&{A&V0Qx-2I7LM$A? zfP2H1k}a_;D)T4KnyuoHe9bJj$Hkz>hRl*MDi%?{E#RI6NWk_glO&h~P=~^bQ}Q#j z0G=c)cMj4$0Tdf}O@hgk`@n6+)&psW>EGGYiu^Y0}~xP+U$3?2Z%ky7K>s z8U>ZXbbSGyyr)>A<2EjL_?B`5zQ(5dJr~6*)k&BSD4#}4Kx0=NlCO!_A8hxJrBo#5 z7$MQlDsP)S&i`@QMBvg(eVQ{t)!Qt2Vn(7u(3ilP}U!aC4iU_h?#&G1VA(lGbGcRmzbNXscG5>j!BSmWB`g)SaKMRN#u~B d2^$!bLePu_j!8eDEdf9bi%C5B*0k~YDgfF$N@4&2 literal 0 HcmV?d00001 diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index b460607..701ac03 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,87 +17,82 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod +type TargetKind string + const ( - SidecarKind string = "Sidecar" - JobKind string = "Job" - BackupVolumes string = "Volumes" + Deployment TargetKind = "Deployment" + StatefulSet TargetKind = "StatefulSet" + Pod TargetKind = "Pod" +) + +// +kubebuilder:validation:Enum=Online;Snapshot;Job +type BackupType string + +const ( + SnapshotKind BackupType = "Snapshot" + OnlineKind BackupType = "Online" + JobKind BackupType = "Job" ) type Step struct { Name string `json:"name"` // +optional - Finalize *bool `json:"finalize,omitempty"` + Finalize *bool `json:"finalize"` } -type Hook struct { - Cmd string `json:"cmd"` - // +optional - Args []string `json:"args,omitempty"` +type TargetContainer struct { + Name string `json:"name"` + Paths []string `json:"paths,omitempty"` + // +kubebuilder:default:=2 + Retry int `json:"retry"` + Steps []Step `json:"steps,omitempty"` } type Target struct { - // +kubebuilder:validation:Enum=Sidecar;Job - Kind string `json:"kind"` - Name string `json:"name"` - // +optional - ContainerName string `json:"containerName"` - // +optional - ApiVersion string `json:"apiVersion,omitempty"` - // +optional - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` - // +optional - Paths []string `json:"paths,omitempty"` - // +optional - // +kubebuilder:validation:MinItems=1 - Steps []Step `json:"steps,omitempty"` - // +kubebuilder:default:=2 - Retry int `json:"retry,omitempty"` + BackupType `json:"backupType"` + TargetKind `json:"targetKind"` + TargetName string `json:"targetName"` + Containers []TargetContainer `json:"containers"` } type Keep struct { - Last int32 `json:"last,omitempty"` - Daily int32 `json:"daily,omitempty"` - Weekly int32 `json:"weekly,omitempty"` - Monthly int32 `json:"monthly,omitempty"` - Yearly int32 `json:"yearly,omitempty"` + Last int32 `json:"last"` + Daily int32 `json:"daily"` + Weekly int32 `json:"weekly"` + Monthly int32 `json:"monthly"` + Yearly int32 `json:"yearly"` } // BackupConfigurationSpec defines the desired state of BackupConfiguration type BackupConfigurationSpec struct { Repository string `json:"repository"` Image string `json:"image"` - - // +optional - Suspend *bool `json:"suspend,omitempty"` - - // +optional - Schedule string `json:"schedule,omitempty"` - // +kubebuilder:validation:MinItems=1 - Targets []Target `json:"targets"` - // +optional - Keep `json:"keep,omitempty"` + // +kubebuilder:default:=false + Suspend *bool `json:"suspend"` + Schedule string `json:"schedule"` + Keep `json:"keep"` + Targets []Target `json:"targets"` } // BackupConfigurationStatus defines the observed state of BackupConfiguration type BackupConfigurationStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` Suspended bool `json:"suspended"` ActiveCronJob bool `json:"activeCronJob"` ActiveSidecar bool `json:"activeSidecar"` } +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="bc" +//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` +//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` + // BackupConfiguration is the Schema for the backupconfigurations API -// +kubebuilder:object:root=true -// +kubebuilder:resource:shortName="bc" -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` -// +kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` type BackupConfiguration struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -106,7 +101,7 @@ type BackupConfiguration struct { Status BackupConfigurationStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // BackupConfigurationList contains a list of BackupConfiguration type BackupConfigurationList struct { diff --git a/api/v1alpha1/backupconfiguration_types.go~ b/api/v1alpha1/backupconfiguration_types.go~ new file mode 100644 index 0000000..823e177 --- /dev/null +++ b/api/v1alpha1/backupconfiguration_types.go~ @@ -0,0 +1,115 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod +type TargetKind string + +const ( + Deployment TargetKind = "Deployment" + StatefulSet TargetKind = "StatefulSet" + Pod TargetKind = "Pod" +) + +// +kubebuilder:validation:Enum=Online;Snapshot;Job +type BackupType string + +const ( + SnapshotKind BackupType = "Snapshot" + OnlineKind BackupType = "Online" + JobKind BackupType = "Job" +) + +type Step struct { + Name string `json:"name"` + // +optional + Finalize *bool `json:"finalize"` +} + +type TargetContainer struct { + Name string `json:"name"` + Paths []string `json:"paths,omitempty"` + // +kubebuilder:default:=2 + Retry int `json:"retry"` + Steps []Step `json:"steps,omitempty"` +} + +type Target struct { + BackupType `json:"backupType"` + TargetKind `json:"targetKind"` + TargetName string `json:"targetName"` + Containers []TargetContainer `json:"containers"` +} + +type Keep struct { + Last int32 `json:"last"` + Daily int32 `json:"daily"` + Weekly int32 `json:"weekly"` + Monthly int32 `json:"monthly"` + Yearly int32 `json:"yearly"` +} + +// BackupConfigurationSpec defines the desired state of BackupConfiguration +type BackupConfigurationSpec struct { + Repo string `json:"repo"` + Image string `json:"image"` + // +kubebuilder:default:=false + Suspend *bool `json:"suspend"` + Schedule string `json:"schedule"` + Keep `json:"keep"` + Targets []Target `json:"targets"` +} + +// BackupConfigurationStatus defines the observed state of BackupConfiguration +type BackupConfigurationStatus struct { + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` + Suspended bool `json:"suspended"` + ActiveCronJob bool `json:"activeCronJob"` + ActiveSidecar bool `json:"activeSidecar"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="bc" +//+kubebuilder:printcolumn:name="Suspended",type=boolean,JSONPath=`.spec.suspend` +//+kubebuilder:printcolumn:name="Schedule",type=string,JSONPath=`.spec.schedule` + +// BackupConfiguration is the Schema for the backupconfigurations API +type BackupConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupConfigurationSpec `json:"spec,omitempty"` + Status BackupConfigurationStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// BackupConfigurationList contains a list of BackupConfiguration +type BackupConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupConfiguration{}, &BackupConfigurationList{}) +} diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 8e6d2f1..0650e80 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,28 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type SessionState string + +const ( + New SessionState = "New" + Init SessionState = "Initializing" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + Finalize SessionState = "Finalizing" + Success SessionState = "Success" + Failure SessionState = "Failure" + Deleted SessionState = "Deleted" +) + +type TargetStatus struct { + Name string `json:"name"` + Kind string `json:"kind"` + SessionState `json:"state"` + SnapshotId string `json:"snapshotId"` + StartTime *metav1.Time `json:"startTime"` + Duration *metav1.Duration `json:"duration"` + Try int `json:"try"` +} // BackupSessionSpec defines the desired state of BackupSession type BackupSessionSpec struct { @@ -31,21 +51,15 @@ type BackupSessionSpec struct { // BackupSessionStatus defines the observed state of BackupSession type BackupSessionStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - // +optional - SessionState `json:"state,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Targets []TargetStatus `json:"target,omitempty"` - // +optional - Keep string `json:"keep,omitempty"` + SessionState `json:"state"` + StartTime *metav1.Time `json:"startTime"` + Targets []TargetStatus `json:"target"` + Keep string `json:"keep"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // +kubebuilder:resource:shortName="bs" -// +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name` // +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` // +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime` @@ -60,7 +74,7 @@ type BackupSession struct { Status BackupSessionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // BackupSessionList contains a list of BackupSession type BackupSessionList struct { diff --git a/api/v1alpha1/backupsession_types.go~ b/api/v1alpha1/backupsession_types.go~ new file mode 100644 index 0000000..06e0ca1 --- /dev/null +++ b/api/v1alpha1/backupsession_types.go~ @@ -0,0 +1,89 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type SessionState string + +const ( + New SessionState = "New" + Init SessionState = "Initializing" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + Finalize SessionState = "Finalizing" + Success SessionState = "Success" + Failure SessionState = "Failure" + Deleted SessionState = "Deleted" +) + +type TargetStatus struct { + Name string `json:"name"` + Kind string `json:"kind"` + SessionState `json:"state"` + SnapshotId string `json:"snapshotId"` + StartTime *metav1.Time `json:"startTime"` + Duration *metav1.Duration `json:"duration"` + Try int `json:"try"` +} + +// BackupSessionSpec defines the desired state of BackupSession +type BackupSessionSpec struct { + Ref corev1.ObjectReference `json:"ref"` +} + +// BackupSessionStatus defines the observed state of BackupSession +type BackupSessionStatus struct { + SessionState `json:"state"` + StartTime *metav1.Time `json:"startTime"` + Targets []TargetStatus `json:"target"` + Keep string `json:"keep"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="bs" +// +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name` +// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime` +// +kubebuilder:printcolumn:name="Keep",type=string,JSONPath=`.status.keep` + +// BackupSession is the Schema for the backupsessions API +type BackupSession struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupSessionSpec `json:"spec,omitempty"` + Status BackupSessionStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// BackupSessionList contains a list of BackupSession +type BackupSessionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupSession `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BackupSession{}, &BackupSessionList{}) +} diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index d3b88d4..9c21ad6 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -1,47 +1,13 @@ package v1alpha1 -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type SessionState string - const ( - New SessionState = "New" - Init SessionState = "Initializing" - Running SessionState = "Running" - Waiting SessionState = "Waiting" - Finalize SessionState = "Finalizing" - Success SessionState = "Success" - Failure SessionState = "Failure" - Deleted SessionState = "Deleted" - // Environment variables used by the sidecar container - RESTORE_ANNOTATION = "restore" // the name of the sidecar container SIDECARCONTAINER_NAME string = "formol" // the name of the container we backup when there are more than 1 container in the pod TARGETCONTAINER_TAG string = "FORMOL_TARGET" // Used by both the backupsession and restoresession controllers to identified the target deployment TARGET_NAME string = "TARGET_NAME" - // Used by restoresession controller - RESTORESESSION_NAMESPACE string = "RESTORESESSION_NAMESPACE" - RESTORESESSION_NAME string = "RESTORESESSION_NAME" // Used by the backupsession controller POD_NAME string = "POD_NAME" POD_NAMESPACE string = "POD_NAMESPACE" ) - -type TargetStatus struct { - Name string `json:"name"` - Kind string `json:"kind"` - // +optional - SessionState `json:"state,omitempty"` - // +optional - SnapshotId string `json:"snapshotId,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Duration *metav1.Duration `json:"duration,omitempty"` - // +optional - Try int `json:"try,omitemmpty"` -} diff --git a/api/v1alpha1/common.go~ b/api/v1alpha1/common.go~ new file mode 100644 index 0000000..ebcd0cd --- /dev/null +++ b/api/v1alpha1/common.go~ @@ -0,0 +1,6 @@ +package v1alpha1 + +const ( + SIDECARCONTAINER_NAME string = "formol" + TARGETCONTAINER_TAG string = "FORMOL_TARGET" +) diff --git a/api/v1alpha1/function_types.go b/api/v1alpha1/function_types.go index f79d058..0f281ad 100644 --- a/api/v1alpha1/function_types.go +++ b/api/v1alpha1/function_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,20 +21,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// FunctionStatus defines the observed state of Function +type FunctionStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // Function is the Schema for the functions API type Function struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec corev1.Container `json:"spec"` + Spec corev1.Container `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // FunctionList contains a list of Function type FunctionList struct { diff --git a/api/v1alpha1/function_types.go~ b/api/v1alpha1/function_types.go~ new file mode 100644 index 0000000..32607dd --- /dev/null +++ b/api/v1alpha1/function_types.go~ @@ -0,0 +1,64 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// FunctionSpec defines the desired state of Function +type FunctionSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of Function. Edit function_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// FunctionStatus defines the observed state of Function +type FunctionStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Function is the Schema for the functions API +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// FunctionList contains a list of Function +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Function{}, &FunctionList{}) +} diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index 029f41b..7d7aee0 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index e66dea1..156d9e5 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,12 +17,14 @@ limitations under the License. package v1alpha1 import ( + "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - type S3 struct { Server string `json:"server"` Bucket string `json:"bucket"` @@ -31,26 +33,24 @@ type S3 struct { } type Backend struct { - S3 `json:"s3"` + // +optional + S3 *S3 `json:"s3,omitempty"` + // +optional + Nfs *string `json:"nfs,omitempty"` } // RepoSpec defines the desired state of Repo type RepoSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of Repo. Edit Repo_types.go to remove/update Backend `json:"backend"` RepositorySecrets string `json:"repositorySecrets"` } // RepoStatus defines the observed state of Repo type RepoStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // Repo is the Schema for the repoes API type Repo struct { @@ -61,7 +61,7 @@ type Repo struct { Status RepoStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // RepoList contains a list of Repo type RepoList struct { @@ -73,3 +73,37 @@ type RepoList struct { func init() { SchemeBuilder.Register(&Repo{}, &RepoList{}) } + +func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { + env := []corev1.EnvVar{} + if repo.Spec.Backend.S3 != nil { + url := fmt.Sprintf("s3:http://%s/%s/%s-%s", + repo.Spec.Backend.S3.Server, + repo.Spec.Backend.S3.Bucket, + strings.ToUpper(backupConf.Namespace), + strings.ToLower(backupConf.Name)) + env = append(env, corev1.EnvVar{ + Name: "RESTIC_REPOSITORY", + Value: url, + }) + for _, key := range []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "RESTIC_PASSWORD", + } { + env = append(env, corev1.EnvVar{ + Name: key, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: repo.Spec.RepositorySecrets, + }, + Key: key, + }, + }, + }) + } + } + + return env +} diff --git a/api/v1alpha1/repo_types.go~ b/api/v1alpha1/repo_types.go~ new file mode 100644 index 0000000..868a06c --- /dev/null +++ b/api/v1alpha1/repo_types.go~ @@ -0,0 +1,109 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type S3 struct { + Server string `json:"server"` + Bucket string `json:"bucket"` + // +optional + Prefix string `json:"prefix,omitempty"` +} + +type Backend struct { + // +optional + S3 *S3 `json:"s3,omitempty"` + // +optional + Nfs *string `json:"nfs,omitempty"` +} + +// RepoSpec defines the desired state of Repo +type RepoSpec struct { + Backend `json:"backend"` + RepositorySecrets string `json:"repositorySecrets"` +} + +// RepoStatus defines the observed state of Repo +type RepoStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Repo is the Schema for the repoes API +type Repo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RepoSpec `json:"spec,omitempty"` + Status RepoStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RepoList contains a list of Repo +type RepoList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Repo `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Repo{}, &RepoList{}) +} + +func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { + env := []corev1.EnvVar{} + if repo.Spec.Backend.S3 { + url := fmt.Sprintf("s3:http://%s/%s/%s-%s", + repo.Spec.Backend.S3.Server, + repo.Spec.Backend.S3.Bucket, + strings.ToUpper(backupConf.Namespace), + stringsToLower(backupConf.Name)) + env = append(env, corev1.EnvVar{ + Name: "RESTIC_REPOSITORY", + Value: url, + }) + for _, key := range []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "RESTIC_PASSWORD", + } { + env = append(env, corev1.EnvVar{ + Name: key, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: repo.Spec.RepositorySecrets, + }, + Key: key, + }, + }, + }) + } + } + + return env +} diff --git a/api/v1alpha1/restoresession_types.go b/api/v1alpha1/restoresession_types.go index 00aa941..462bd3c 100644 --- a/api/v1alpha1/restoresession_types.go +++ b/api/v1alpha1/restoresession_types.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,41 +17,29 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - //"k8s.io/apimachinery/pkg/types" ) -type BackupSessionRef struct { - // +optional - Ref corev1.ObjectReference `json:"ref,omitempty"` - // +optional - Spec BackupSessionSpec `json:"spec,omitempty"` - // +optional - Status BackupSessionStatus `json:"status,omitempty"` -} +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // RestoreSessionSpec defines the desired state of RestoreSession type RestoreSessionSpec struct { - BackupSessionRef `json:"backupSession"` - //Ref string `json:"backupSessionRef"` - // +optional - //Targets []TargetStatus `json:"target,omitempty"` + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of RestoreSession. Edit restoresession_types.go to remove/update + Foo string `json:"foo,omitempty"` } // RestoreSessionStatus defines the observed state of RestoreSession type RestoreSessionStatus struct { - // +optional - SessionState `json:"state,omitempty"` - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - // +optional - Targets []TargetStatus `json:"target,omitempty"` + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file } -// +kubebuilder:object:root=true -// +kubebuilder:resource:shortName="rs" -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status // RestoreSession is the Schema for the restoresessions API type RestoreSession struct { @@ -62,7 +50,7 @@ type RestoreSession struct { Status RestoreSessionStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // RestoreSessionList contains a list of RestoreSession type RestoreSessionList struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c96ed6f..3cd2fac 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,15 +22,23 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backend) DeepCopyInto(out *Backend) { *out = *in - out.S3 = in.S3 + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3) + **out = **in + } + if in.Nfs != nil { + in, out := &in.Nfs, &out.Nfs + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. @@ -109,6 +118,7 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) { *out = new(bool) **out = **in } + out.Keep = in.Keep if in.Targets != nil { in, out := &in.Targets, &out.Targets *out = make([]Target, len(*in)) @@ -116,7 +126,6 @@ func (in *BackupConfigurationSpec) DeepCopyInto(out *BackupConfigurationSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.Keep = in.Keep } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfigurationSpec. @@ -207,24 +216,6 @@ func (in *BackupSessionList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupSessionRef) DeepCopyInto(out *BackupSessionRef) { - *out = *in - out.Ref = in.Ref - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSessionRef. -func (in *BackupSessionRef) DeepCopy() *BackupSessionRef { - if in == nil { - return nil - } - out := new(BackupSessionRef) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSessionSpec) DeepCopyInto(out *BackupSessionSpec) { *out = *in @@ -273,6 +264,7 @@ func (in *Function) DeepCopyInto(out *Function) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. @@ -326,21 +318,16 @@ func (in *FunctionList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Hook) DeepCopyInto(out *Hook) { +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hook. -func (in *Hook) DeepCopy() *Hook { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { if in == nil { return nil } - out := new(Hook) + out := new(FunctionStatus) in.DeepCopyInto(out) return out } @@ -365,7 +352,7 @@ func (in *Repo) DeepCopyInto(out *Repo) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } @@ -422,7 +409,7 @@ func (in *RepoList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoSpec) DeepCopyInto(out *RepoSpec) { *out = *in - out.Backend = in.Backend + in.Backend.DeepCopyInto(&out.Backend) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoSpec. @@ -455,8 +442,8 @@ func (in *RestoreSession) DeepCopyInto(out *RestoreSession) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + out.Spec = in.Spec + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSession. @@ -512,7 +499,6 @@ func (in *RestoreSessionList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionSpec) DeepCopyInto(out *RestoreSessionSpec) { *out = *in - in.BackupSessionRef.DeepCopyInto(&out.BackupSessionRef) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionSpec. @@ -528,17 +514,6 @@ func (in *RestoreSessionSpec) DeepCopy() *RestoreSessionSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionStatus) DeepCopyInto(out *RestoreSessionStatus) { *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.Targets != nil { - in, out := &in.Targets, &out.Targets - *out = make([]TargetStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionStatus. @@ -589,21 +564,9 @@ func (in *Step) DeepCopy() *Step { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Target) DeepCopyInto(out *Target) { *out = *in - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Steps != nil { - in, out := &in.Steps, &out.Steps - *out = make([]Step, len(*in)) + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]TargetContainer, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -620,6 +583,33 @@ func (in *Target) DeepCopy() *Target { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetContainer) DeepCopyInto(out *TargetContainer) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetContainer. +func (in *TargetContainer) DeepCopy() *TargetContainer { + if in == nil { + return nil + } + out := new(TargetContainer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { *out = *in @@ -629,7 +619,7 @@ func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { } if in.Duration != nil { in, out := &in.Duration, &out.Duration - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } } diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml deleted file mode 100644 index 58db114..0000000 --- a/config/certmanager/certificate.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a certificate CR. -# More document can be found at https://docs.cert-manager.io -# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for -# breaking changes -apiVersion: cert-manager.io/v1alpha2 -kind: Issuer -metadata: - name: selfsigned-issuer - namespace: system -spec: - selfSigned: {} ---- -apiVersion: cert-manager.io/v1alpha2 -kind: Certificate -metadata: - name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize - dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml deleted file mode 100644 index bebea5a..0000000 --- a/config/certmanager/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- certificate.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml deleted file mode 100644 index 90d7c31..0000000 --- a/config/certmanager/kustomizeconfig.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -nameReference: -- kind: Issuer - group: cert-manager.io - fieldSpecs: - - kind: Certificate - group: cert-manager.io - path: spec/issuerRef/name - -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index d08e34e..151eb0e 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,32 +2,31 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: -- bases/formol.desmojim.fr_functions.yaml - bases/formol.desmojim.fr_backupconfigurations.yaml -- bases/formol.desmojim.fr_backupsessions.yaml +- bases/formol.desmojim.fr_functions.yaml - bases/formol.desmojim.fr_repoes.yaml +- bases/formol.desmojim.fr_backupsessions.yaml - bases/formol.desmojim.fr_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizeresource +#+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- patches/webhook_in_tasks.yaml -#- patches/webhook_in_functions.yaml #- patches/webhook_in_backupconfigurations.yaml -#- patches/webhook_in_backupsessions.yaml +#- patches/webhook_in_functions.yaml #- patches/webhook_in_repoes.yaml +#- patches/webhook_in_backupsessions.yaml #- patches/webhook_in_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizewebhookpatch +#+kubebuilder:scaffold:crdkustomizewebhookpatch -# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_functions.yaml #- patches/cainjection_in_backupconfigurations.yaml -#- patches/cainjection_in_backupsessions.yaml +#- patches/cainjection_in_functions.yaml #- patches/cainjection_in_repoes.yaml +#- patches/cainjection_in_backupsessions.yaml #- patches/cainjection_in_restoresessions.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch +#+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index 6f83d9a..ec5c150 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -4,13 +4,15 @@ nameReference: version: v1 fieldSpecs: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/name + path: spec/conversion/webhook/clientConfig/service/name namespace: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/namespace + path: spec/conversion/webhook/clientConfig/service/namespace create: false varReference: diff --git a/config/crd/patches/cainjection_in_backupconfigurations.yaml b/config/crd/patches/cainjection_in_backupconfigurations.yaml index ba16473..30c2d80 100644 --- a/config/crd/patches/cainjection_in_backupconfigurations.yaml +++ b/config/crd/patches/cainjection_in_backupconfigurations.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_backupsessions.yaml b/config/crd/patches/cainjection_in_backupsessions.yaml index f395951..d89ee98 100644 --- a/config/crd/patches/cainjection_in_backupsessions.yaml +++ b/config/crd/patches/cainjection_in_backupsessions.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_functions.yaml b/config/crd/patches/cainjection_in_functions.yaml index c8c1091..faa8295 100644 --- a/config/crd/patches/cainjection_in_functions.yaml +++ b/config/crd/patches/cainjection_in_functions.yaml @@ -1,5 +1,4 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/patches/cainjection_in_repoes.yaml b/config/crd/patches/cainjection_in_repoes.yaml new file mode 100644 index 0000000..c8dd2d8 --- /dev/null +++ b/config/crd/patches/cainjection_in_repoes.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: repoes.formol.desmojim.fr diff --git a/config/crd/patches/cainjection_in_restoresessions.yaml b/config/crd/patches/cainjection_in_restoresessions.yaml index cfed67d..d8747fa 100644 --- a/config/crd/patches/cainjection_in_restoresessions.yaml +++ b/config/crd/patches/cainjection_in_restoresessions.yaml @@ -1,8 +1,7 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: restoresessions.formol.desmojim.fr.desmojim.fr + name: restoresessions.formol.desmojim.fr diff --git a/config/crd/patches/webhook_in_backupconfigurations.yaml b/config/crd/patches/webhook_in_backupconfigurations.yaml index e08ff07..c882396 100644 --- a/config/crd/patches/webhook_in_backupconfigurations.yaml +++ b/config/crd/patches/webhook_in_backupconfigurations.yaml @@ -1,20 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: backupconfigurations.formol.desmojim.fr spec: - preserveUnknownFields: false conversion: strategy: Webhook webhook: - conversionReviewVersions: ["v1", "v1beta1", "v1alpha1"] clientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== service: namespace: system name: webhook-service path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_backupsessions.yaml b/config/crd/patches/webhook_in_backupsessions.yaml index 7ae00b1..1b94114 100644 --- a/config/crd/patches/webhook_in_backupsessions.yaml +++ b/config/crd/patches/webhook_in_backupsessions.yaml @@ -1,18 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: backupsessions.formol.desmojim.fr spec: - preserveUnknownFields: false conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_functions.yaml b/config/crd/patches/webhook_in_functions.yaml index e969e6f..0e4e73b 100644 --- a/config/crd/patches/webhook_in_functions.yaml +++ b/config/crd/patches/webhook_in_functions.yaml @@ -1,5 +1,4 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -7,11 +6,11 @@ metadata: spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_repoes.yaml b/config/crd/patches/webhook_in_repoes.yaml new file mode 100644 index 0000000..898f6c1 --- /dev/null +++ b/config/crd/patches/webhook_in_repoes.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: repoes.formol.desmojim.fr +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_restoresessions.yaml b/config/crd/patches/webhook_in_restoresessions.yaml index 1dc3e58..fa17921 100644 --- a/config/crd/patches/webhook_in_restoresessions.yaml +++ b/config/crd/patches/webhook_in_restoresessions.yaml @@ -1,17 +1,16 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. +# The following patch enables a conversion webhook for the CRD apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: restoresessions.formol.desmojim.fr.desmojim.fr + name: restoresessions.formol.desmojim.fr spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index dbee156..961519a 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -16,21 +16,23 @@ bases: - ../crd - ../rbac - ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -- ../prometheus +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus patchesStrategicMerge: - # Protect the /metrics endpoint by putting it behind auth. - # If you want your controller-manager to expose the /metrics - # endpoint w/o any authn/z, please comment the following line. +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. - manager_auth_proxy_patch.yaml -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in + + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- manager_webhook_patch.yaml @@ -46,7 +48,7 @@ vars: # objref: # kind: Certificate # group: cert-manager.io -# version: v1alpha2 +# version: v1 # name: serving-cert # this name should match the one in certificate.yaml # fieldref: # fieldpath: metadata.namespace @@ -54,7 +56,7 @@ vars: # objref: # kind: Certificate # group: cert-manager.io -# version: v1alpha2 +# version: v1 # name: serving-cert # this name should match the one in certificate.yaml #- name: SERVICE_NAMESPACE # namespace of the service # objref: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index e44a8d4..b751266 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -1,4 +1,4 @@ -# This patch inject a sidecar container which is a HTTP proxy for the +# This patch inject a sidecar container which is a HTTP proxy for the # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. apiVersion: apps/v1 kind: Deployment @@ -8,18 +8,48 @@ metadata: spec: template: spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux containers: - name: kube-rbac-proxy - image: quay.io/brancz/kube-rbac-proxy:v0.8.0-arm + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" - "--logtostderr=true" - - "--v=10" + - "--v=0" ports: - containerPort: 8443 + protocol: TCP name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi - name: manager args: - - "--metrics-addr=127.0.0.1:8080" - - "--enable-leader-election" + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..f6f5891 --- /dev/null +++ b/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml deleted file mode 100644 index 738de35..0000000 --- a/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml deleted file mode 100644 index 7e79bf9..0000000 --- a/config/default/webhookcainjection_patch.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# This patch add annotation to admission webhook config and -# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - name: mutating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml deleted file mode 100644 index 881467f..0000000 --- a/config/manager/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -resources: -- manager.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -images: -- name: controller - newName: desmo999r/formolcontroller - newTag: 0.3.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml deleted file mode 100644 index b6c85a5..0000000 --- a/config/manager/manager.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager -spec: - selector: - matchLabels: - control-plane: controller-manager - replicas: 1 - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - command: - - /manager - args: - - --enable-leader-election - image: controller:latest - name: manager - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi - terminationGracePeriodSeconds: 10 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml deleted file mode 100644 index ed13716..0000000 --- a/config/prometheus/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml deleted file mode 100644 index 9b8047b..0000000 --- a/config/prometheus/monitor.yaml +++ /dev/null @@ -1,16 +0,0 @@ - -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-monitor - namespace: system -spec: - endpoints: - - path: /metrics - port: https - selector: - matchLabels: - control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index 7d62534..0000000 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4..0000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index 48ed1e4..0000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 6cf656b..0000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager diff --git a/config/rbac/backupconfiguration_editor_role.yaml b/config/rbac/backupconfiguration_editor_role.yaml deleted file mode 100644 index 423efa0..0000000 --- a/config/rbac/backupconfiguration_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit backupconfigurations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupconfiguration-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations/status - verbs: - - get diff --git a/config/rbac/backupconfiguration_viewer_role.yaml b/config/rbac/backupconfiguration_viewer_role.yaml deleted file mode 100644 index 60fef40..0000000 --- a/config/rbac/backupconfiguration_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view backupconfigurations. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupconfiguration-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupconfigurations/status - verbs: - - get diff --git a/config/rbac/backupsession_editor_role.yaml b/config/rbac/backupsession_editor_role.yaml deleted file mode 100644 index d884f01..0000000 --- a/config/rbac/backupsession_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit backupsessions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupsession-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions/status - verbs: - - get diff --git a/config/rbac/backupsession_viewer_role.yaml b/config/rbac/backupsession_viewer_role.yaml deleted file mode 100644 index 8817113..0000000 --- a/config/rbac/backupsession_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view backupsessions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: backupsession-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - backupsessions/status - verbs: - - get diff --git a/config/rbac/function_editor_role.yaml b/config/rbac/function_editor_role.yaml deleted file mode 100644 index 963b8c2..0000000 --- a/config/rbac/function_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit functions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: function-editor-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - functions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - functions/status - verbs: - - get diff --git a/config/rbac/function_viewer_role.yaml b/config/rbac/function_viewer_role.yaml deleted file mode 100644 index 27bcc02..0000000 --- a/config/rbac/function_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view functions. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: function-viewer-role -rules: -- apiGroups: - - formol.desmojim.fr - resources: - - functions - verbs: - - get - - list - - watch -- apiGroups: - - formol.desmojim.fr - resources: - - functions/status - verbs: - - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml deleted file mode 100644 index dbcbe1b..0000000 --- a/config/rbac/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -resources: -- role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -#- auth_proxy_service.yaml -#- auth_proxy_role.yaml -#- auth_proxy_role_binding.yaml -#- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml deleted file mode 100644 index eaa7915..0000000 --- a/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index eed1690..0000000 --- a/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml deleted file mode 100644 index 8f26587..0000000 --- a/config/rbac/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: default - namespace: system diff --git a/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml b/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml deleted file mode 100644 index f6782a5..0000000 --- a/config/samples/formol.desmojim.fr_v1alpha1_restoresession.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: formol.desmojim.fr.desmojim.fr/v1alpha1 -kind: RestoreSession -metadata: - name: restoresession-sample -spec: - # Add fields here - foo: bar diff --git a/config/samples/formol_v1alpha1_backupconfiguration.yaml b/config/samples/formol_v1alpha1_backupconfiguration.yaml index cc33bb6..fe70b83 100644 --- a/config/samples/formol_v1alpha1_backupconfiguration.yaml +++ b/config/samples/formol_v1alpha1_backupconfiguration.yaml @@ -1,17 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: BackupConfiguration metadata: - name: backupconf-nginx + labels: + app.kubernetes.io/name: backupconfiguration + app.kubernetes.io/instance: backupconfiguration-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: backupconfiguration-sample spec: - repository: - name: repo-minio - schedule: "*/1 * * * *" - target: - apiVersion: v1 - kind: Deployment - name: nginx-deployment - volumeMounts: - - name: empty - mountPath: /data - paths: - - /data + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_backupsession.yaml b/config/samples/formol_v1alpha1_backupsession.yaml index f953734..77128f4 100644 --- a/config/samples/formol_v1alpha1_backupsession.yaml +++ b/config/samples/formol_v1alpha1_backupsession.yaml @@ -1,8 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: BackupSession metadata: - name: backupsession-nginx + labels: + app.kubernetes.io/name: backupsession + app.kubernetes.io/instance: backupsession-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: backupsession-sample spec: - # Add fields here - ref: - name: backupconf-nginx + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_function.yaml b/config/samples/formol_v1alpha1_function.yaml index 18c29a3..df74db6 100644 --- a/config/samples/formol_v1alpha1_function.yaml +++ b/config/samples/formol_v1alpha1_function.yaml @@ -1,11 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: Function metadata: - name: function-backup-pvc - namespace: backup + labels: + app.kubernetes.io/name: function + app.kubernetes.io/instance: function-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: function-sample spec: - name: function-backup-pvc - image: desmo999r/formolcli - args: - - backup - - volume + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_repo.yaml b/config/samples/formol_v1alpha1_repo.yaml index f7942a0..e1eb2a3 100644 --- a/config/samples/formol_v1alpha1_repo.yaml +++ b/config/samples/formol_v1alpha1_repo.yaml @@ -1,11 +1,12 @@ apiVersion: formol.desmojim.fr/v1alpha1 kind: Repo metadata: - name: repo-minio - namespace: backup + labels: + app.kubernetes.io/name: repo + app.kubernetes.io/instance: repo-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: repo-sample spec: - backend: - s3: - server: raid5.desmojim.fr:9000 - bucket: testbucket2 - repositorySecrets: secret-minio + # TODO(user): Add fields here diff --git a/config/samples/formol_v1alpha1_restoresession.yaml b/config/samples/formol_v1alpha1_restoresession.yaml new file mode 100644 index 0000000..24e7093 --- /dev/null +++ b/config/samples/formol_v1alpha1_restoresession.yaml @@ -0,0 +1,12 @@ +apiVersion: formol.desmojim.fr/v1alpha1 +kind: RestoreSession +metadata: + labels: + app.kubernetes.io/name: restoresession + app.kubernetes.io/instance: restoresession-sample + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: formol + name: restoresession-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/test_deployment.yaml b/config/samples/test_deployment.yaml deleted file mode 100644 index 8b54688..0000000 --- a/config/samples/test_deployment.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment - labels: - app: nginx -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx:1.14.2 - ports: - - containerPort: 80 - volumeMounts: - - name: empty - mountPath: /data - volumes: - - name: empty - emptyDir: {} - diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf2613..0000000 --- a/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3..0000000 --- a/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml deleted file mode 100644 index 31e0f82..0000000 --- a/config/webhook/service.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager diff --git a/controllers/.backupconfiguration_controller.go.un~ b/controllers/.backupconfiguration_controller.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..beb12d25232aea0bde1357d84862cb0ac41688a4 GIT binary patch literal 35950 zcmeI5+ix4k6^CW{qR>jyCcee-C2RRc$_iy!w&U7WV);@#jT;w{HLK!5^8fGP#jqUcMX3go3P?fK2lA&*FkyAnIj z%N$@hXK%B!=R32rJ3BMHUY`HaOTmTkAASA9uU()0%QJm{_|B!1`|H#9r~dQH-M{_x zuYdfc{;!|?c;KU7-|Fk@yF-CxJt_SZF6$MDIrMoSpWD7+9jzJcoBmMmfCI1!f&<@B z8vXp2q_Krp+Bmxf8kVF(-W7*kCw&0R-qMy26^_2XPamh{cPPAv4J1IaF*iDzyQ4O} zO^cicHXXsH+>GnptXI#6!EA9VcH?@jIO)aZ1%Iks@;x_lCY(5``znFA=pdvIK>`n} zv>m@v^PNSt$D6bkr-5LPz3LQ^AJ?P6@#3gbe9^DfEAir;5yy`rbqp>MDMN6~spOdA zm2j?jF^a+{PkW9yL-oL&srXJDI&P`t)O7Z|Fo@l9;73CRmD=l+0&)RCYR8dUc@}4L zPER;dak5m>mzHl{df|-PVc&!klq=5SiF?Ym;mRA3kQTT(a*OrWwPA!_1g@woNpZuf zq{FqpKmo4(XXV=K64wS=xb{ydY@%>1w`#C-)X7{f7p>m8=*ZnTawS)$?VEGCJGwB? zQi1kvI#{=|MLVyjL$%Yv$u?48fdk&70B5#N2O}62Z)w}Ka=QM+>F(0m?o;?F1t93p z<aOH{7w}xyI8=d9EJ0aXAbY@hCf@Ef2H_>JMY)3 zuIHDajW~yzx9Ea)2snVs8pRMe2QEX^Rn{S3k{%?BL+osI2zU-00-khSR;yV(LZb!B z90e9&M%orFTcnpb3))>0Ee^m9TA*EXsRhc-BKN=yw5XHqV-t+fi)2B&ryE*&R-aDZ zzgBxY2FO8?cR;pag+Y2!vw+>Fz^+dop?)QI`367^oh`vKC-OdAGM) zwusT#TiTJl%IK$GAcFZJK=*Zi5x$-uD-@p#X2blD7tUAR$alg@$#H{{6Zo$VQQJ%N zDT$Vv3$$grxkQ`95`sZ*;EpP~Uopl1Xh|!pwz?C;#@s}>%I7Jt zd;}?J0cXpWtzkSSwlj+DhUKrfI5F6cq0&X0=W6R*aQr+{(E`RJykK8}*b=5Zg78+f)wyQBa9lKGT;o4 zQec6j+@ql0hOKk3Sxm*`9DkS2ahFbXp8`9_2z@L_Zq@1Z`N0YZAEUqmyl4}}2LxN* zVFaV%E$teRNYOOSP0}du`w8hXrX9XI>N_{cZ4sFT)e{tuj3o&p7!@L!lO$pKrz!jf zXH_42u;wJM(a->CD+Lw+#6Gsci7jF@CTN>GLHmHty2|1Qa@#~^K@}wJjZ_%LoJu4I zyOA8qd>;k4?INRq3Xry`K^VoHng()T*7^|Kc8Gj}=_oJThgG)38e>>eJy}qUijDQ_ zq^s|q#4-6KyD_Q#@D{`7#}~A*9DRg!rbnFEjpqDVHS4>Yb+f?>Y1IRp0$DG|KuFY+DG6EPY4shPpoeUgY=|Ie~@}+--sWyZ-}Nd zxWX=R`EV2K8p0qwsnQ*!p4l~O57;$?XTTG7i>rs1SkDj!=}DE|Aa&^(d*zE`>(fT` zfIXwx+73t9BQ73JVjV*mq$gE6gVZxS#w!olF&bZC;0JrfwZlioD5zI3`+6cQvldnQ zg4Cs749OeDQEh&l3i%HiBamAkshJ89o1cijSt=xTU^-+qy2u)WI5 z_PK{GvBnsdL>B@`Es4}Gc%og4##rtCo#Hu=@O**RvH1^@zLmyEJD2@R)sGxJ8eVYOeb)L4l;yVYuq z&eq&hjS!nic9lcJRX0f*PwwA})#r)1D69vid||}#DrGuD@#@UWz8635-$tHxCkf;l zL&{D!FC4F;JH+#Cqpwn*KbqadWF+G(kQ_+@3bOO!c#$(I( z73u14G5&nD;>NyOA|T}oB{QItA@!hAJ6RaB0{FA_%9O86Tu6?o>9w`w1CX|{*!MFx za^f>N7+GYtH!B}&v9d@wo0<2lXl5!5@Zg=v8LD9kuG=gqpSd};yThE6x!%mkMMpa} z5?skIHz|AID_iAwGFJ$gUH&3D-k3o zYny+o+UB98wt?x(6uwOX2zUfNum2XUV{|};8i@rMuTij_s@Nh%V{hq`Z^+l}DLUIr z6c}Be9{g7G+y{JzM2>+LIMH{`mP8oAs1VMVyWymz?lr33d z5VL9;SsEqZR%p8q1!AxOIV>^{$QGoaERDo$1)Gg4I35vs2V=`s7{sia#`S1B z*RCV?xVQo%8iH7W*kWaC7?0DiQaeLyaqTu9lc|Ctx_PjGu%*h@FdnB-Ew)qbGRT&p z0wVe_vjDJ#%GNL*r$K$H9qP&hrVpdIKie4Qp?Gh`Fg=eHdi2gh* zz-y7RHH^n;NYSgK1?ft&8!%B2MCUjb(6vO_8ph)^qUhDpLUe`cDi|mL9uxTnTnkhf z#H^YIw9pQ;^YkOk6a2L(b;{?9rBFjK)aSDT&Rnu@D zZZDpl<_2P#poh`DEFf!XvNepyX*3VD)9f(O6T<|&lOoRmYhen5m{rqY9&Cr%K4}-T z1Ut+w!ve0BC0oOIoW}A%JIl8D*BB+xof0_)R*O;?#H^Zz5;JhMyrHztq{k#d?u#O~ zplV49gP2v*NMZ`07LqMfA~HyTLpMqmK(!#*8ph)^kTlCnE6C+jTQWy*J0r3Srk0~H zh*_05R=V{ra@sjAo1v630^65FR)N%F6b3P?reS=t9iyCQmMMbTgvcp~T8hFTW>umX zsKS^_I#k2-=xzNP1$3yUS+J7-B;yU}uFE~D!Ri?-YXL{>X9qB|MU2MY(%&XK-AI|; z-u4nGCVJ)q2x!lWd;%!?Wm|~CDCSgxcv(W^a@Gmr`6vv&63%ph2xwzgXwBF#Aww`25tTAp_l8%2@q=82B0W5n!kOPfDkMngX-Y^pAY8si9^RNm7H3+D^cw&G literal 0 HcmV?d00001 diff --git a/controllers/.backupconfiguration_controller_cronjob.go.un~ b/controllers/.backupconfiguration_controller_cronjob.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..e7f4016c07d2360a9728533c190df255a5a3b6b4 GIT binary patch literal 7322 zcmeI1?`s@I7{@n1+PIBBuxe{#)akY0-FYrXEY`LWYa-I5c#unjz6zV!xyz=T*<)@t zZ3!ZMBMANpg71CrTVE9a0Q*WX0Y#BQUx{GpJAJ;hJDW|7aP9Tp?E~M;&dlyi_W3;X zYi}jo{N{?BkN#gU0IsNIEUptP| zA-(Im?wZf9Cf&JMw!N12b<*=%Nv|tpAObea>c#4=;I@(;4oG995z^Bn7*Z>T@BZE# zN9}~O`Xg$|M$^RnMb(R>7m)jh%ru2eX+!V;|M}K}!7<|e4YwaBoggJ7pg88P5^&|s zi%4iOr((GfuT^VjZW?&F1GKFHWg#j%0 zB(;HSuz~^T>$o7GM~gsTrZP~sK+n2in+c`ZIgH7VgwA<>KofRH#fc0I)hHBFd(E{C z!S$lJv7L=q;5tQ@`Y!T{C} z-gk<4C3V&%S-kJ>8@+Q%_*(qm0M4f6^JcW4oMkeMm>tEyI#GBLAiqF5hO!bfyfA>} zo}@N#jg=RMN%|}dIjbsG$3`hV*@m4&`8tf{k|3BojD)JO^qlkIbuE(Z_<7jKfkGkvrjCVkbHB8&F%4cju!x54-M9NE~izq0g16~-WQFQ!OjE*Vl zY?@@FWAovn!=8c%iwVTQWZ|t5A63$OC@W(EUKplPOx!auF`iEGuTf_;l8uRteq-Wd z5ImlMKmcIgvWDOYN%+sofPfQ*YJ6wwHxm$t(tuc}-Zn@!AXIiMVA^EZ+3b%PJe9d1r^*y#t4^6vn?^>w& zdiGSd+gZNR6{h~A>vF#**aPp?lu;=A~l5;3MxW^7kB|7Uf>xCA@PKGK#0)tBZ5*>2?62(UO`Aa!1?CR**zPtcei_e z#mgLNcIM9gn|r={=KszvE-ioixysS%UvndqGspk&$tOpzojUTZUta(DV_$pezHh#g zzxKk*wXgnb*H6Fk!~f)Rx!1%U%;)p8P?)j@IPVed5N&?GSfJ+#c@}CO%hM|I@YY+a$0fckWFhL~a#F z6vBxQx{&<*SERK^BZEJ<21eyj)$F@Xcu~STxlC#O{x=%{|Y2<*MtmvjC+zs8mH>0~(4g_6d zhQLsm3Jplup$Y&w?gK;wdqWE;1Wn%J&wrl@lT%VLk(~1S)XmU9^b$K=6-A-B7+<^r z$ypUgi2xAPBWwg{Xd#85e}#rtHEf|Bw9q&RdwE%0NA*&7T$LSaa9ZdQg~0JW;;428 zb|@i@48ZQjWy_BC!#(0YFAjD)q)3vzd+V;hzN?cz#vvjr~?Sp7KD%NUo<-%A=n*DAU z(GyXrT8~#J2`?7wYd)8SE$@H|spDoeE&^+mkVXb!{VU5FGei9dtoJ(B`MKh9d^n0O zR_n{v^6F%Er!vam2Se{6Q)$jP~dnP=u^=g%$79Gmc1xiJ~83Wx(%)ka$1Vx>&% zX2LzOK9FEtES8(XwQZ+xR)m@}Yhu9(!cc~)M$-5e;rr!6-4MRf%QA0s2(M^o40On- zRVY)T04Tx)K+iYxS19QcB%1@#j?xRc%VhR3M46;Lt8^NlE%06HnX+2 zEj{v7*K!n}Qt-~!phlzKjC~QO)gnz9{HVD;OGL0Zw2(puVfJUsOq+b#KKz7?LoHWV z!tM%$Qux-r=_jur=~u) zh1A>7fvm+pN#z9y(9y(JNNVUOwh(%&REP0>=JDW9t=x?Uh1#6a%rLt69;-l|~_K|iOcj@r| z0txa}YG~chwiif3IdnCtM$-6}9-8e!Z+*9=r+rCZl!IyhTI17wuaY8u->$=XjZEF>8suC3OX5RiJqT`VmAS=Sh*CNK6-}O*5pX0+KMb)jN$_Q_R+L- za`YXbai=DI$N4LAmBhC7<=|y_NSq2U3jG}+BUX*1@$JiD!}jIq7&j*qrDE#JN>}#* z2Cs}QJ%Pl;TNMCuoReVDs)-@}*6T0l(=hC2(Vw#xtX{Y*F2i5uh2R3>~!4$X&Zx^y}TAy(R}! zAO3h!^7fhm(;iZn;`+?ta$M=04Kx14+X>CBF??<80_+jCy#SL2aHt}y zM$-5e;AZWoA6bBO9ie^%fDbu1D4)};%vV{)$| zQh;}99j3$TfHW>ng-cC}qe2?24ke_K0ieBIuvUla_leUsW$SBs-2qKCI9Tmr+Y20@ z635U$V22XY$RO;JKKmU>myIG)cKgNP+LH~(+KXHi+!rzn@p7#!3t_k|oocA_wn6#0 zNj;eNi=*c-phXFpN^4CK-y4TXPU#(eeFY>F8Ye!uIL-~kg(kbvOeR4=^lO`QlJQ|7 zTHJj^tWU9jvU070)R)KS1 ztJ=ui7q-9YRjZ;+Gb~zAQcF|O_EM-Yt41>Q1?_Kpq1`8FhpZ|qNlibLgVCxsGWYwD zeM-m<*9Ou?l|BMc16b8Y=6*l0ljO$3v~6M7>d%lc4d=i$bDnyGeq32zWf-hrJfJrb zAH)HxQAoBy!TP&iCG4=PAo~s}XdX0A0BMMa`jb&gc0r+g-O}yILp>sp95P7&S3qPG zl5J2B{h=QsYeA$2T%cscC5)_~giJ*c{m~)Xo(!644f9pGE7J_dDf$LlDc+kLsU6`* zgfVe0;y&*rA`HWzg)Bvud;g9#IX;MYj|RoSwtOQdUz|c7DQ^mg%x34A)`YHX#|ZsO^V;rs)QHC?G(rV zT72IkBBP7MeMa0p;$(7adxi&Hq`PIg(-m3R^l0nPP1g9bvXFe6siPnA7Yg>DP)g+` z`4^PJ$@9<0rN*=Ii+s`fuoQ`W$+=Q1mSkDR^TpB&&6@mo%0>5mr;BAUGxCSYf}BXQ zbX8%&tySd~aFk%bi==6RF4vL>nbL$F9w&OGA&bAtqa^FWx&^sfle1gVPHpmW4PU>WW4%~^ zxa)^@U%|H1)MaXoS&G(u<@zy6kbE86y3A%@$)0}Z=wQ8$&y=uKH@zNkgXewx8f>a99Xh7TAZ1y(I)y?kpvUhE5 z)6(=GRT~i6T2$JI6fuJIML}PDRY4Hn;+ujL^i_S;?{{`48G{X*TlQ^!;mgeYW_M@# zee#>xox2+*H9 z%MUV{%neA{I#gCbIq6`jlyQEZ{U1Ca0;Xeumem4G%Yph-jweHl6p2{hSk zR)Hx|;oVgRdIy8pRoI@dn2CSvWosH0<6U4bq_ z(b`8C1Svdnsw5<_&Y`3c;AO~6m5fGGiv#V74($>~WcX>!w;sG9^dG6XPMCM9fxj~t zDGs7l9CXIqQq6M<#lf+l=9TjOhibN$&vjc{qQ6|BAPJM`oxGkj(&iB2pUI@93QL1! z9Ku%@6TXVUC=zZ~?|?+le0>j6Qz}`lWLp|J@)%);or*+*CFeph&!J@WRh?6+e4RMC zUT4S9&p_`((Uhuy36D%TLb{7bNF%`OP>yW_jZsKoaiHxG(3&Dd*D%-&^fAPQD(KUMwAZ0oh)K8x zNtn%6-LO{mtx?;Xa3wc#x*JkPhCGpbG=>|4ki_C}dsgSh&<{i879|@!`;|vRBP6vi zhpGU`W4I4vpfLzZEDp5)?0PC_pJ7xcTbG_}*g^9a_Rflfp&iyHiqAP=x$gEwuNaal z)9@bc#z}Y>GO4HqrQvEEs(pGYGNFd$aHP6gn?XZ7ut;j8v{oL*f&|UNrv)TYH4e2G zbZUziz6NuV6z|pUoP_z=lXxyQ&!Adl8mY#idQ7Lv&>w~tmPLB%$|F;eknV$&5g?B< z6R3(vk}?joNgY}!!_PsNm_SKs9gmSlf)%KlZIfz@LIR6JZAzyWV6a1wdUJRj%O2m| zi-Vr)R`O46-HHuQ;n-F7yz+$W1b#{FeUjb%+TD`tSxD@M8kSmQ8mY!%`H9Z*3`RKz zDVDc+gdrud|0ZvW1bLdpwxPx-B(ONtzSgNRVa`K}S^*ijtPM2sdr-S$vgHNMjH(5t z;c6V7-|IY?N(?`Z>)`S~W8##HzlG13NOV1~CylfxAwyV2BuN>E@DGA;bF@cu9HX6p zK7~klS!*WQZGe=KD}B6>nSB|AR1MqRzw6nmQcJOH&*u2Flauclr1Ow=LnL<^GW*Z9hiq>uSxyk8a?Y_v6DJ$L=%`=V`nN+II-1+E3rPhcW7ps$y z;|FrTv$UHg**77RsajCdtAb{`PV=Y?KSxZQsbu*muO!VvY1pc0 zq+Wt;hhWE-Z;tjK%3Y$PeqJEnugzeek*9UM>Mx%upQ71h?)8% z=3N+nj$xO5w>q)i+OuyosE!9-z1#LGV|I6OzEk}eL(fiJL-6HA#U8t1%8;aKVET(- W+7U5jIz0m^rdGr>y9CBtv;P7hlF@De literal 0 HcmV?d00001 diff --git a/controllers/.backupsession_controller.go.un~ b/controllers/.backupsession_controller.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..fd13c2cd717ac1fb29facda0e4de87811002221f GIT binary patch literal 2760 zcmeH|KTE?<6vba_Rf>W*_-_zHQD{Z%V$=$4H5Lb#B32hI1w&{Hwt?6}9R&M19Nfjv zB7&2OlZv>wh_jya`d*12AX^R`ZgO(TgZtwqhxNwwL9^h#i|E}*`Jr6R<{rO}Z%4;Z zmOh_q=I-#(+hYFm;_CJLSqNdlS`l2(Ln>g^7WRu(iuF1w_o^YPDhnS?sw#dVnea)d z044w&vl538Kht>s1Uv=jc$Q2Eqo0^v35}2j*r7y#37B~ZjY9}jfR^w>d`n8eoMG2Vpa zIE5eua(jMqq?Un6uJ@xOjDDil&A)1FBtZ6%bqI|^2vmT!FQG;LBjTY%Fc(j!jZ(2t aOt#M)FSTKuyKRr3&$?~Y9M3VkA2)wBlWA@M literal 0 HcmV?d00001 diff --git a/controllers/.suite_test.go.un~ b/controllers/.suite_test.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9a5f9f305922288e7f91e62f1fcc4057c0092bc9 GIT binary patch literal 7724 zcmeI1Pe>F|9LL9X`}55Hg5jZT6tdlr6$v3UYi+fg8mvlTuuS7nTf6Skj$)L7)hRu9 zu%JV?ZV`0p-q}M1-nvv^=OBcz-}lY??LH-JaI(|;!gt@i-|XxgexLol`SZr}#TR2! z1Enuk?8xU_;_LgqU+cY%_UP*A<(H{<4JVq#(MwCo4{IM^|FkUY8M#Gtb~@#9Wv0t+ zyM4Bj%9p5{m!Zy?t~UBcJGoBzFtrd`w&kuYevjiDaVxisy`)&s$WQF*Bcrv}Nx>F+ zM*lto?#jX^iNmsfG}F%`^%ux$U+2#LHJZhh}3tJ z`C?cXye8u?$ntA4kdhN7VkA&}NXFYXLk>xZqFy=}b0JiHZA{zRtCqz#Iyt2NF&X57 zr*=dhaYQ{r0MbD+3jpz*76eSWAqx={2HIN*EmlJWXwS)@-75xX%k%`1phbvGAy02O zQ<*!S<9i9$pY5=x_(mp~Ol5DDrVEpa?6m88-ee+~RD?nF5Sazq*U0kEWC(LwcB-Xi zpcP5DJ|H}y(vR_gASUJ%D1hOWBYy02-JpUhLbefDKh3yfcndlpn|AZ f8XY8o8i>fme^C`;JgBzK5RZ$kmUX-5zrFem$?I1Y literal 0 HcmV?d00001 diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 8aaedf4..37c88d3 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,360 +18,108 @@ package controllers import ( "context" - //"time" - formolrbac "github.com/desmo999r/formol/pkg/rbac" - formolutils "github.com/desmo999r/formol/pkg/utils" "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - kbatch_beta1 "k8s.io/api/batch/v1beta1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + formolutils "github.com/desmo999r/formol/pkg/utils" ) // BackupConfigurationReconciler reconciles a BackupConfiguration object type BackupConfigurationReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme + Log logr.Logger + context.Context } -var _ reconcile.Reconciler = &BackupConfigurationReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=* -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Context = ctx + r.Log = log.FromContext(ctx) -func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - var changed bool - log := r.Log.WithValues("backupconfiguration", req.NamespacedName) - //time.Sleep(300 * time.Millisecond) + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, req.NamespacedName, backupConf); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - getDeployment := func(namespace string, name string) (*appsv1.Deployment, error) { - deployment := &appsv1.Deployment{} - err := r.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: name, - }, deployment) - return deployment, err - } - - deleteCronJob := func() error { - _ = formolrbac.DeleteFormolRBAC(r.Client, "default", backupConf.Namespace) - _ = formolrbac.DeleteBackupSessionCreatorRBAC(r.Client, backupConf.Namespace) - cronjob := &kbatch_beta1.CronJob{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) - return r.Delete(context.TODO(), cronjob) - } else { - return err + backupConf := formolv1alpha1.BackupConfiguration{} + err := r.Get(ctx, req.NamespacedName, &backupConf) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil } - } - - addCronJob := func() error { - if err := formolrbac.CreateFormolRBAC(r.Client, "default", backupConf.Namespace); err != nil { - log.Error(err, "unable to create backupsessionlistener RBAC") - return nil - } - - if err := formolrbac.CreateBackupSessionCreatorRBAC(r.Client, backupConf.Namespace); err != nil { - log.Error(err, "unable to create backupsession-creator RBAC") - return nil - } - - cronjob := &kbatch_beta1.CronJob{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - log.V(0).Info("there is already a cronjob") - if backupConf.Spec.Schedule != cronjob.Spec.Schedule { - log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) - cronjob.Spec.Schedule = backupConf.Spec.Schedule - changed = true - } - if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { - log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) - cronjob.Spec.Suspend = backupConf.Spec.Suspend - changed = true - } - if changed == true { - if err := r.Update(context.TODO(), cronjob); err != nil { - log.Error(err, "unable to update cronjob definition") - return err - } - } - return nil - } else if errors.IsNotFound(err) == false { - log.Error(err, "something went wrong") - return err - } - - cronjob = &kbatch_beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "backup-" + backupConf.Name, - Namespace: backupConf.Namespace, - }, - Spec: kbatch_beta1.CronJobSpec{ - Suspend: backupConf.Spec.Suspend, - Schedule: backupConf.Spec.Schedule, - JobTemplate: kbatch_beta1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - ServiceAccountName: "backupsession-creator", - Containers: []corev1.Container{ - corev1.Container{ - Name: "job-createbackupsession-" + backupConf.Name, - Image: backupConf.Spec.Image, - Args: []string{ - "backupsession", - "create", - "--namespace", - backupConf.Namespace, - "--name", - backupConf.Name, - }, - }, - }, - }, - }, - }, - }, - }, - } - if err := ctrl.SetControllerReference(backupConf, cronjob, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) - return err - } - log.V(0).Info("creating the cronjob") - if err := r.Create(context.Background(), cronjob); err != nil { - log.Error(err, "unable to create the cronjob", "cronjob", cronjob) - return err - } else { - changed = true - return nil - } - } - - deleteSidecarContainer := func(target formolv1alpha1.Target) error { - deployment, err := getDeployment(backupConf.Namespace, target.Name) - if err != nil { - return err - } - restorecontainers := []corev1.Container{} - for _, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - continue - } - restorecontainers = append(restorecontainers, container) - } - deployment.Spec.Template.Spec.Containers = restorecontainers - if err := r.Update(context.Background(), deployment); err != nil { - return err - } - if err := formolrbac.DeleteFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil { - return err - } - return nil - } - - addSidecarContainer := func(target formolv1alpha1.Target) error { - deployment, err := getDeployment(backupConf.Namespace, target.Name) - if err != nil { - log.Error(err, "unable to get Deployment") - return err - } - log.V(1).Info("got deployment", "Deployment", deployment) - for i, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - log.V(0).Info("There is already a backup sidecar container. Skipping", "container", container) - return nil - } - if target.ContainerName != "" && target.ContainerName == container.Name { - // Put a tag so we can find what container we are supposed to backup - // and what process we are supposed to chroot to run the init steps - deployment.Spec.Template.Spec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: "True", - }) - } - } - sidecar := corev1.Container{ - Name: formolv1alpha1.SIDECARCONTAINER_NAME, - // TODO: Put the image in the BackupConfiguration YAML file - Image: backupConf.Spec.Image, - Args: []string{"backupsession", "server"}, - //Image: "busybox", - //Command: []string{ - // "sh", - // "-c", - // "sleep 3600; echo done", - //}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAME, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAMESPACE, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.Name, - }, - }, - VolumeMounts: []corev1.VolumeMount{}, - } - - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - sidecar.Env = append(sidecar.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - - for _, volumemount := range target.VolumeMounts { - log.V(1).Info("mounts", "volumemount", volumemount) - volumemount.ReadOnly = true - sidecar.VolumeMounts = append(sidecar.VolumeMounts, *volumemount.DeepCopy()) - } - deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sidecar) - deployment.Spec.Template.Spec.ShareProcessNamespace = func() *bool { b := true; return &b }() - - if err := formolrbac.CreateFormolRBAC(r.Client, deployment.Spec.Template.Spec.ServiceAccountName, deployment.Namespace); err != nil { - log.Error(err, "unable to create backupsessionlistener RBAC") - return nil - } - - log.V(0).Info("Adding a sicar container") - if err := r.Update(context.Background(), deployment); err != nil { - log.Error(err, "unable to update the Deployment") - return err - } else { - changed = true - return nil - } - } - - deleteExternalResources := func() error { - for _, target := range backupConf.Spec.Targets { - switch target.Kind { - case formolv1alpha1.SidecarKind: - _ = deleteSidecarContainer(target) - } - } - // TODO: remove the hardcoded "default" - _ = deleteCronJob() - return nil + return ctrl.Result{}, err } finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { - log.V(0).Info("backupconf being deleted", "backupconf", backupConf.Name) + r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { - _ = deleteExternalResources() + _ = r.DeleteSidecar(backupConf) + _ = r.DeleteCronJob(backupConf) backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) - if err := r.Update(context.Background(), backupConf); err != nil { - log.Error(err, "unable to remove finalizer") - return reconcile.Result{}, err + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to remove finalizer") + return ctrl.Result{}, err } } // We have been deleted. Return here - log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) - return reconcile.Result{}, nil + r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) + return ctrl.Result{}, nil } // Add finalizer if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) - err := r.Update(context.Background(), backupConf) - if err != nil { - log.Error(err, "unable to append finalizer") + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to append finalizer") + return ctrl.Result{}, err } - return reconcile.Result{}, err + // backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job. + return ctrl.Result{}, nil } - if err := addCronJob(); err != nil { - return reconcile.Result{}, nil + if err := r.AddCronJob(backupConf); err != nil { + return ctrl.Result{}, err } else { backupConf.Status.ActiveCronJob = true } - for _, target := range backupConf.Spec.Targets { - switch target.Kind { - case formolv1alpha1.SidecarKind: - if err := addSidecarContainer(target); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } else { - backupConf.Status.ActiveSidecar = true - } - } + if err := r.AddSidecar(backupConf); err != nil { + r.Log.Error(err, "unable to add sidecar container") + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveSidecar = true } - //backupConf.Status.Suspended = false - if changed == true { - log.V(1).Info("updating backupconf") - if err := r.Status().Update(ctx, backupConf); err != nil { - log.Error(err, "unable to update backupconf", "backupconf", backupConf) - return reconcile.Result{}, err - } + if err := r.Status().Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "Unable to update BackupConfiguration status") + return ctrl.Result{}, err } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.BackupConfiguration{}). - WithOptions(controller.Options{MaxConcurrentReconciles: 3}). - //WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated - //Owns(&formolv1alpha1.BackupSession{}). - Owns(&kbatch_beta1.CronJob{}). Complete(r) } diff --git a/controllers/backupconfiguration_controller.go~ b/controllers/backupconfiguration_controller.go~ new file mode 100644 index 0000000..b983514 --- /dev/null +++ b/controllers/backupconfiguration_controller.go~ @@ -0,0 +1,129 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + //appsv1 "k8s.io/api/apps/v1" + //batchv1 "k8s.io/api/batch/v1" + //corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + formolutils "github.com/desmo999r/formol/pkg/utils" +) + +// BackupConfigurationReconciler reconciles a BackupConfiguration object +type BackupConfigurationReconciler struct { + client.Client + Scheme *runtime.Scheme + Log logr.Logger + context.Context +} + +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Context = ctx + r.Log = log.FromContext(ctx) + + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) + + backupConf := formolv1alpha1.BackupConfiguration{} + err := r.Get(ctx, req.NamespacedName, &backupConf) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" + + if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { + r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) + if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + _ = r.DeleteSidecar(backupConf) + _ = r.DeleteCronJob(backupConf) + backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to remove finalizer") + return ctrl.Result{}, err + } + } + // We have been deleted. Return here + r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) + return ctrl.Result{}, nil + } + + // Add finalizer + if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) + backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) + if err := r.Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "unable to append finalizer") + return ctrl.Result{}, err + } + // backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job. + return ctrl.Result{}, nil + } + + if err := r.AddCronJob(backupConf); err != nil { + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveCronJob = true + } + + if err := r.AddSidecar(backupConf); err != nil { + r.Log.Error(err, "unable to add sidecar container") + return ctrl.Result{}, err + } else { + backupConf.Status.ActiveSidecar = true + } + + if err := r.Status().Update(ctx, &backupConf); err != nil { + r.Log.Error(err, "Unable to update BackupConfiguration status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&formolv1alpha1.BackupConfiguration{}). + Complete(r) +} diff --git a/controllers/backupconfiguration_controller_cronjob.go b/controllers/backupconfiguration_controller_cronjob.go new file mode 100644 index 0000000..3a424fb --- /dev/null +++ b/controllers/backupconfiguration_controller_cronjob.go @@ -0,0 +1,103 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) + return r.Delete(r.Context, cronjob) + } else { + return err + } +} + +func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("there is already a cronjob") + var changed bool + if backupConf.Spec.Schedule != cronjob.Spec.Schedule { + r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) + cronjob.Spec.Schedule = backupConf.Spec.Schedule + changed = true + } + if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { + r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) + cronjob.Spec.Suspend = backupConf.Spec.Suspend + changed = true + } + if changed == true { + if err := r.Update(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to update cronjob definition") + return err + } + backupConf.Status.Suspended = *backupConf.Spec.Suspend + } + return nil + } else if errors.IsNotFound(err) == false { + r.Log.Error(err, "something went wrong") + return err + } + + cronjob = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-" + backupConf.Name, + Namespace: backupConf.Namespace, + }, + Spec: batchv1.CronJobSpec{ + Suspend: backupConf.Spec.Suspend, + Schedule: backupConf.Spec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: "backupsession-creator", + Containers: []corev1.Container{ + corev1.Container{ + Name: "job-createbackupsession-" + backupConf.Name, + Image: backupConf.Spec.Image, + Args: []string{ + "backupsession", + "create", + "--namespace", + backupConf.Namespace, + "--name", + backupConf.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { + r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) + return err + } + r.Log.V(0).Info("creating the cronjob") + if err := r.Create(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) + return err + } else { + backupConf.Status.Suspended = *backupConf.Spec.Suspend + return nil + } +} diff --git a/controllers/backupconfiguration_controller_cronjob.go~ b/controllers/backupconfiguration_controller_cronjob.go~ new file mode 100644 index 0000000..459d613 --- /dev/null +++ b/controllers/backupconfiguration_controller_cronjob.go~ @@ -0,0 +1,102 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) + return r.Delete(r.Context, cronjob) + } else { + return err + } +} + +func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("there is already a cronjob") + var changed bool + if backupConf.Spec.Schedule != cronjob.Spec.Schedule { + r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) + cronjob.Spec.Schedule = backupConf.Spec.Schedule + changed = true + } + if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { + r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) + cronjob.Spec.Suspend = backupConf.Spec.Suspend + changed = true + } + if changed == true { + if err := r.Update(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to update cronjob definition") + return err + } + backupConf.Status.Suspended = *backupConf.Spec.Suspend + } + return nil + } else if errors.IsNotFound(err) == false { + r.Log.Error(err, "something went wrong") + return err + } + + cronjob = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-" + backupConf.Name, + Namespace: backupConf.Namespace, + }, + Spec: batchv1.CronJobSpec{ + Suspend: backupConf.Spec.Suspend, + Schedule: backupConf.Spec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: "backupsession-creator", + Containers: []corev1.Container{ + corev1.Container{ + Name: "job-createbackupsession-" + backupConf.Name, + Image: backupConf.Spec.Image, + Args: []string{ + "backupsession", + "create", + "--namespace", + backupConf.Namespace, + "--name", + backupConf.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { + r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) + return err + } + r.Log.V(0).Info("creating the cronjob") + if err := r.Create(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) + return err + } else { + return nil + } +} diff --git a/controllers/backupconfiguration_controller_sidecar.go b/controllers/backupconfiguration_controller_sidecar.go new file mode 100644 index 0000000..688e339 --- /dev/null +++ b/controllers/backupconfiguration_controller_sidecar.go @@ -0,0 +1,137 @@ +package controllers + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + } else { + for j, e := range container.Env { + if e.Name == formolv1alpha1.TARGETCONTAINER_TAG { + container.Env[j] = container.Env[len(container.Env)-1] + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + break + } + } + } + } + } + } + } + for _, target := range backupConf.Spec.Targets { + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + restoreContainers := []corev1.Container{} + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + continue + } + restoreContainers = append(restoreContainers, container) + } + deployment.Spec.Template.Spec.Containers = restoreContainers + removeTags(&deployment.Spec.Template.Spec, target) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "unable to update deployment", "deployment", deployment) + return err + } + } + } + + return nil +} + +func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + // Go through all the 'targets' + // the backupType: Online needs a sidecar container for every single listed 'container' + // if the backupType is something else than Online, the 'container' will still need a sidecar + // if it has 'steps' + addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { + for i, container := range podSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + return false + } + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + } + } + } + return true + } + + for _, target := range backupConf.Spec.Targets { + addSidecar := false + for _, targetContainer := range target.Containers { + if len(targetContainer.Steps) > 0 { + addSidecar = true + } + } + if target.BackupType == formolv1alpha1.OnlineKind { + addSidecar = true + } + if addSidecar { + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) + env := repo.GetResticEnv(backupConf) + sideCar := corev1.Container{ + Name: formolv1alpha1.SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"backupsession", "server"}, + Env: append(env, corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }), + VolumeMounts: []corev1.VolumeMount{}, + } + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + if addTags(&deployment.Spec.Template.Spec, target) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) + r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "cannot update deployment", "Deployment", deployment) + return err + } + } + } + } + } + + return nil +} diff --git a/controllers/backupconfiguration_controller_sidecar.go~ b/controllers/backupconfiguration_controller_sidecar.go~ new file mode 100644 index 0000000..817691b --- /dev/null +++ b/controllers/backupconfiguration_controller_sidecar.go~ @@ -0,0 +1,134 @@ +package controllers + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + } else { + for j, e := range container.Env { + if e.Name == formolv1alpha1.TARGETCONTAINER_TAG { + container.Env[j] = container.Env[len(container.Env)-1] + podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] + break + } + } + } + } + } + } + } + for _, target := range backupConf.Spec.Targets { + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + restoreContainers := []corev1.Container{} + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + continue + } + restoreContainers = append(restoreContainers, container) + } + deployment.Spec.Template.Spec.Containers = restoreContainers + removeTags(&deployment.Spec.Template.Spec, target) + return r.Update(r.Context, deployment) + } + } + + return nil +} + +func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { + // Go through all the 'targets' + // the backupType: Online needs a sidecar container for every single listed 'container' + // if the backupType is something else than Online, the 'container' will still need a sidecar + // if it has 'steps' + addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { + for i, container := range podSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + return false + } + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + } + } + } + return true + } + + for _, target := range backupConf.Spec.Targets { + addSidecar := false + for _, targetContainer := range target.Containers { + if len(targetContainer.Steps) > 0 { + addSidecar = true + } + } + if target.BackupType == formolv1alpha1.OnlineKind { + addSidecar = true + } + if addSidecar { + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) + env := repo.GetResticEnv(backupConf) + sideCar := corev1.Container{ + Name: formolv1alpha1.SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"backupsession", "server"}, + Env: append(env, corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }), + VolumeMounts: []corev1.VolumeMount{}, + } + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + if addTags(&deployment.Spec.Template.Spec, target) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) + r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "cannot update deployment", "Deployment", deployment) + return err + } + } + } + } + } + + return nil +} diff --git a/controllers/backupconfiguration_controller_test.go b/controllers/backupconfiguration_controller_test.go index 1bd6ac9..406027f 100644 --- a/controllers/backupconfiguration_controller_test.go +++ b/controllers/backupconfiguration_controller_test.go @@ -1,67 +1,64 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "context" - //"k8s.io/apimachinery/pkg/types" - //"reflect" - //"fmt" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - //batchv1 "k8s.io/api/batch/v1" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - corev1 "k8s.io/api/core/v1" - //"k8s.io/apimachinery/pkg/api/errors" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + //"time" + //appsv1 "k8s.io/api/apps/v1" + //corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) -var _ = Describe("Testing BackupConf controller", func() { - const ( - BCBackupConfName = "test-backupconf-controller" - ) +var _ = Describe("BackupConfiguration controller", func() { + const BACKUPCONF_NAME = "test-backupconf-controller" + var ( - key = types.NamespacedName{ - Name: BCBackupConfName, - Namespace: TestNamespace, - } + backupConf *formolv1alpha1.BackupConfiguration ctx = context.Background() - backupConf = &formolv1alpha1.BackupConfiguration{} + key = types.NamespacedName{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + } ) BeforeEach(func() { backupConf = &formolv1alpha1.BackupConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: BCBackupConfName, - Namespace: TestNamespace, + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, }, Spec: formolv1alpha1.BackupConfigurationSpec{ - Repository: TestRepoName, + Repository: REPO_NAME, Schedule: "1 * * * *", - Image: "desmo999r/formolcli:latest", + Image: "desmo999r/formolcli:v0.3.2", Targets: []formolv1alpha1.Target{ formolv1alpha1.Target{ - Kind: formolv1alpha1.SidecarKind, - Name: TestDeploymentName, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: TestDataVolume, - MountPath: TestDataMountPath, - }, - }, - Paths: []string{ - TestDataMountPath, - }, - }, - formolv1alpha1.Target{ - Kind: formolv1alpha1.JobKind, - Name: TestBackupFuncName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestBackupFuncName, + BackupType: formolv1alpha1.OnlineKind, + TargetKind: formolv1alpha1.Deployment, + TargetName: DEPLOYMENT_NAME, + Containers: []formolv1alpha1.TargetContainer{ + formolv1alpha1.TargetContainer{ + Name: CONTAINER_NAME, }, }, }, @@ -69,7 +66,8 @@ var _ = Describe("Testing BackupConf controller", func() { }, } }) - Context("Creating a backupconf", func() { + + Context("Creating a BackupConf", func() { JustBeforeEach(func() { Eventually(func() error { return k8sClient.Create(ctx, backupConf) @@ -81,97 +79,87 @@ var _ = Describe("Testing BackupConf controller", func() { It("Has a schedule", func() { realBackupConf := &formolv1alpha1.BackupConfiguration{} Eventually(func() bool { - err := k8sClient.Get(ctx, key, realBackupConf) - if err != nil { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { return false } return true }, timeout, interval).Should(BeTrue()) Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *")) - Expect(realBackupConf.Spec.Targets[0].Retry).Should(Equal(2)) }) - It("Should also create a CronJob", func() { - cronJob := &batchv1beta1.CronJob{} - Eventually(func() bool { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - return err == nil - }, timeout, interval).Should(BeTrue()) - Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) - }) - It("Should also create a sidecar container", func() { - realDeployment := &appsv1.Deployment{} - Eventually(func() (int, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: TestDeploymentName, - Namespace: TestNamespace, - }, realDeployment) - if err != nil { - return 0, err - } - return len(realDeployment.Spec.Template.Spec.Containers), nil - }, timeout, interval).Should(Equal(2)) - }) - It("Should also update the CronJob", func() { + It("Should create a CronJob", func() { realBackupConf := &formolv1alpha1.BackupConfiguration{} - time.Sleep(300 * time.Millisecond) Eventually(func() bool { - err := k8sClient.Get(ctx, key, realBackupConf) - if err != nil { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { return false } return true }, timeout, interval).Should(BeTrue()) + Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should update the CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) realBackupConf.Spec.Schedule = "1 0 * * *" suspend := true realBackupConf.Spec.Suspend = &suspend Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed()) - cronJob := &batchv1beta1.CronJob{} - Eventually(func() (string, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - if err != nil { - return "", err + cronJob := &batchv1.CronJob{} + Eventually(func() string { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return "" } - return cronJob.Spec.Schedule, nil + return cronJob.Spec.Schedule }, timeout, interval).Should(Equal("1 0 * * *")) - Eventually(func() (bool, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BCBackupConfName, - Namespace: TestNamespace, - }, cronJob) - if err != nil { - return false, err - } - return *cronJob.Spec.Suspend == true, nil - }, timeout, interval).Should(BeTrue()) + Expect(*cronJob.Spec.Suspend).Should(BeTrue()) }) }) - Context("Deleting a backupconf", func() { + Context("Deleting a BackupConf", func() { JustBeforeEach(func() { Eventually(func() error { return k8sClient.Create(ctx, backupConf) }, timeout, interval).Should(Succeed()) }) - It("Should also delete the sidecar container", func() { - Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) - realDeployment := &appsv1.Deployment{} - Eventually(func() (int, error) { - err := k8sClient.Get(ctx, types.NamespacedName{ - Name: TestDeploymentName, - Namespace: TestNamespace, - }, realDeployment) - if err != nil { - return 0, err + It("Should delete the CronJob", func() { + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false } - return len(realDeployment.Spec.Template.Spec.Containers), nil - }, timeout, interval).Should(Equal(1)) + return true + }, timeout, interval).Should(BeTrue()) + By("The CronJob has been created. Now deleting the BackupConfiguration") + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeFalse()) }) }) - }) diff --git a/controllers/backupconfiguration_controller_test.go~ b/controllers/backupconfiguration_controller_test.go~ new file mode 100644 index 0000000..44eb982 --- /dev/null +++ b/controllers/backupconfiguration_controller_test.go~ @@ -0,0 +1,165 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + //"time" + //appsv1 "k8s.io/api/apps/v1" + //corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("BackupConfiguration controller", func() { + const BACKUPCONF_NAME = "test-backupconf-controller" + + var ( + backupConf *formolv1alpha1.BackupConfiguration + ctx = context.Background() + key = types.NamespacedName{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + } + ) + + BeforeEach(func() { + backupConf = &formolv1alpha1.BackupConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, + Spec: formolv1alpha1.BackupConfigurationSpec{ + Repository: REPO_NAME, + Schedule: "1 * * * *", + Image: "desmo999r/formolcli:v0.3.2", + Targets: []formolv1alpha1.Target{ + formolv1alpha1.Target{ + BackupType: formolv1alpha1.OnlineKind, + TargetKind: formolv1alpha1.Deployment, + TargetName: DEPLOYMENT_NAME, + Containers: []formolv1alpha1.TargetContainer{ + formolv1alpha1.Container{ + Name: CONTAINER_NAME, + }, + }, + }, + }, + }, + } + }) + + Context("Creating a BackupConf", func() { + JustBeforeEach(func() { + Eventually(func() error { + return k8sClient.Create(ctx, backupConf) + }, timeout, interval).Should(Succeed()) + }) + AfterEach(func() { + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + }) + It("Has a schedule", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should create a CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) + }) + It("Should update the CronJob", func() { + realBackupConf := &formolv1alpha1.BackupConfiguration{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { + return false + } + return realBackupConf.Status.ActiveCronJob + }, timeout, interval).Should(BeTrue()) + realBackupConf.Spec.Schedule = "1 0 * * *" + suspend := true + realBackupConf.Spec.Suspend = &suspend + Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed()) + cronJob := &batchv1.CronJob{} + Eventually(func() string { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return "" + } + return cronJob.Spec.Schedule + }, timeout, interval).Should(Equal("1 0 * * *")) + Expect(*cronJob.Spec.Suspend).Should(BeTrue()) + }) + }) + Context("Deleting a BackupConf", func() { + JustBeforeEach(func() { + Eventually(func() error { + return k8sClient.Create(ctx, backupConf) + }, timeout, interval).Should(Succeed()) + }) + It("Should delete the CronJob", func() { + cronJob := &batchv1.CronJob{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + By("The CronJob has been created. Now deleting the BackupConfiguration") + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "backup-" + BACKUPCONF_NAME, + Namespace: NAMESPACE_NAME, + }, cronJob); err != nil { + return false + } + return true + }, timeout, interval).Should(BeFalse()) + + }) + }) +}) diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 029a15c..79ba0c3 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,458 +18,49 @@ package controllers import ( "context" - "fmt" - "sort" - "strings" - "time" "github.com/go-logr/logr" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" -) - -const ( - sessionState string = ".metadata.state" - finalizerName string = "finalizer.backupsession.formol.desmojim.fr" - JOBTTL int32 = 7200 ) // BackupSessionReconciler reconciles a BackupSession object type BackupSessionReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme + Log logr.Logger + context.Context } -var _ reconcile.Reconciler = &BackupSessionReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch;create;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=functions,verbs=get;list;watch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete;watch -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Log = log.FromContext(ctx) + r.Context = ctx -func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.Log.WithValues("backupsession", req.NamespacedName) + r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - backupSession := &formolv1alpha1.BackupSession{} - if err := r.Get(ctx, req.NamespacedName, backupSession); err != nil { - log.Error(err, "unable to get backupsession") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Namespace, - Name: backupSession.Spec.Ref.Name, - }, backupConf); err != nil { - log.Error(err, "unable to get backupConfiguration") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - // helper functions - // is there a backup operation ongoing - isBackupOngoing := func() bool { - backupSessionList := &formolv1alpha1.BackupSessionList{} - if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: "Running"})}); err != nil { - log.Error(err, "unable to get backupsessionlist") - return true - } - return len(backupSessionList.Items) > 0 - } - - // delete session specific backup resources - deleteExternalResources := func() error { - log := r.Log.WithValues("deleteExternalResources", backupSession.Name) - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - env := formolutils.ConfigureResticEnvVar(backupConf, repo) - // container that will delete the restic snapshot(s) matching the backupsession - deleteSnapshots := []corev1.Container{} - for _, target := range backupSession.Status.Targets { - if target.SessionState == formolv1alpha1.Success { - deleteSnapshots = append(deleteSnapshots, corev1.Container{ - Name: target.Name, - Image: backupConf.Spec.Image, - Args: []string{"snapshot", "delete", "--snapshot-id", target.SnapshotId}, - Env: env, - }) - } - } - // create a job to delete the restic snapshot(s) with the backupsession name tag - if len(deleteSnapshots) > 0 { - jobTtl := JOBTTL - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("delete-%s-", backupSession.Name), - Namespace: backupSession.Namespace, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &jobTtl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: deleteSnapshots[1:], - Containers: []corev1.Container{deleteSnapshots[0]}, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - log.V(0).Info("creating a job to delete restic snapshots") - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to delete job", "job", job) - return err - } - } - return nil - } - - // create a backup job - createBackupJob := func(target formolv1alpha1.Target) error { - log := r.Log.WithValues("createbackupjob", target.Name) - ctx := context.Background() - backupSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "TARGET_NAME", - Value: target.Name, - }, - corev1.EnvVar{ - Name: "BACKUPSESSION_NAME", - Value: backupSession.Name, - }, - corev1.EnvVar{ - Name: "BACKUPSESSION_NAMESPACE", - Value: backupSession.Namespace, - }, - } - - output := corev1.VolumeMount{ - Name: "output", - MountPath: "/output", - } - restic := corev1.Container{ - Name: "restic", - Image: backupConf.Spec.Image, - Args: []string{"volume", "backup", "--tag", backupSession.Name, "--path", "/output"}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: backupSessionEnv, - } - log.V(1).Info("creating a tagged backup job", "container", restic) - // Gather information from the repo - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - jobTtl := JOBTTL - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-%s-", backupSession.Name, target.Name), - Namespace: backupConf.Namespace, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &jobTtl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{}, - Containers: []corev1.Container{restic}, - Volumes: []corev1.Volume{ - corev1.Volume{Name: "output"}, - }, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - for _, step := range target.Steps { - function := &formolv1alpha1.Function{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: step.Name, - }, function); err != nil { - log.Error(err, "unable to get function", "Function", step) - return err - } - function.Spec.Name = function.Name - function.Spec.Env = append(function.Spec.Env, backupSessionEnv...) - function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output) - job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec) - } - if err := ctrl.SetControllerReference(backupConf, job, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "job", job, "backupconf", backupConf) - return err - } - log.V(0).Info("creating a backup job", "target", target) - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to create job", "job", job) - return err - } - return nil - } - - // start the next task - startNextTask := func() (*formolv1alpha1.TargetStatus, error) { - nextTarget := len(backupSession.Status.Targets) - if nextTarget < len(backupConf.Spec.Targets) { - target := backupConf.Spec.Targets[nextTarget] - targetStatus := formolv1alpha1.TargetStatus{ - Name: target.Name, - Kind: target.Kind, - SessionState: formolv1alpha1.New, - StartTime: &metav1.Time{Time: time.Now()}, - Try: 1, - } - backupSession.Status.Targets = append(backupSession.Status.Targets, targetStatus) - switch target.Kind { - case formolv1alpha1.JobKind: - if err := createBackupJob(target); err != nil { - log.V(0).Info("unable to create task", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - } - return &targetStatus, nil - } else { - return nil, nil - } - } - - // cleanup existing backupsessions - cleanupSessions := func() { - backupSessionList := &formolv1alpha1.BackupSessionList{} - if err := r.List(ctx, backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: string(formolv1alpha1.Success)})}); err != nil { - log.Error(err, "unable to get backupsessionlist") - return - } - if len(backupSessionList.Items) < 2 { - // Not enough backupSession to proceed - log.V(1).Info("Not enough successful backup jobs") - return - } - - sort.Slice(backupSessionList.Items, func(i, j int) bool { - return backupSessionList.Items[i].Status.StartTime.Time.Unix() > backupSessionList.Items[j].Status.StartTime.Time.Unix() - }) - - type KeepBackup struct { - Counter int32 - Last time.Time - } - - var lastBackups, dailyBackups, weeklyBackups, monthlyBackups, yearlyBackups KeepBackup - lastBackups.Counter = backupConf.Spec.Keep.Last - dailyBackups.Counter = backupConf.Spec.Keep.Daily - weeklyBackups.Counter = backupConf.Spec.Keep.Weekly - monthlyBackups.Counter = backupConf.Spec.Keep.Monthly - yearlyBackups.Counter = backupConf.Spec.Keep.Yearly - for _, session := range backupSessionList.Items { - if session.Spec.Ref.Name != backupConf.Name { - continue - } - deleteSession := true - keep := []string{} - if lastBackups.Counter > 0 { - log.V(1).Info("Keep backup", "last", session.Status.StartTime) - lastBackups.Counter-- - keep = append(keep, "last") - deleteSession = false - } - if dailyBackups.Counter > 0 { - if session.Status.StartTime.Time.YearDay() != dailyBackups.Last.YearDay() { - log.V(1).Info("Keep backup", "daily", session.Status.StartTime) - dailyBackups.Counter-- - dailyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "daily") - deleteSession = false - } - } - if weeklyBackups.Counter > 0 { - if session.Status.StartTime.Time.Weekday().String() == "Sunday" && session.Status.StartTime.Time.YearDay() != weeklyBackups.Last.YearDay() { - log.V(1).Info("Keep backup", "weekly", session.Status.StartTime) - weeklyBackups.Counter-- - weeklyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "weekly") - deleteSession = false - } - } - if monthlyBackups.Counter > 0 { - if session.Status.StartTime.Time.Day() == 1 && session.Status.StartTime.Time.Month() != monthlyBackups.Last.Month() { - log.V(1).Info("Keep backup", "monthly", session.Status.StartTime) - monthlyBackups.Counter-- - monthlyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "monthly") - deleteSession = false - } - } - if yearlyBackups.Counter > 0 { - if session.Status.StartTime.Time.YearDay() == 1 && session.Status.StartTime.Time.Year() != yearlyBackups.Last.Year() { - log.V(1).Info("Keep backup", "yearly", session.Status.StartTime) - yearlyBackups.Counter-- - yearlyBackups.Last = session.Status.StartTime.Time - keep = append(keep, "yearly") - deleteSession = false - } - } - if deleteSession { - log.V(1).Info("Delete session", "delete", session.Status.StartTime) - if err := r.Delete(ctx, &session); err != nil { - log.Error(err, "unable to delete backupsession", "session", session.Name) - // we don't return anything, we keep going - } - } else { - session.Status.Keep = strings.Join(keep, ",") // + " " + time.Now().Format("2006 Jan 02 15:04:05 -0700 MST") - if err := r.Status().Update(ctx, &session); err != nil { - log.Error(err, "unable to update session status", "session", session) - } - } - } - } - // end helper functions - - log.V(0).Info("backupSession", "backupSession.ObjectMeta", backupSession.ObjectMeta, "backupSession.Status", backupSession.Status) - if backupSession.ObjectMeta.DeletionTimestamp.IsZero() { - switch backupSession.Status.SessionState { - case formolv1alpha1.New: - // Check if the finalizer has been registered - if !controllerutil.ContainsFinalizer(backupSession, finalizerName) { - controllerutil.AddFinalizer(backupSession, finalizerName) - // We update the BackupSession to add the finalizer - // Reconcile will be called again - // return now - err := r.Update(ctx, backupSession) - if err != nil { - log.Error(err, "unable to add finalizer") - } - return reconcile.Result{}, err - } - // Brand new backupsession - if isBackupOngoing() { - log.V(0).Info("There is an ongoing backup. Let's reschedule this operation") - return reconcile.Result{RequeueAfter: 30 * time.Second}, nil - } - // start the first task - backupSession.Status.SessionState = formolv1alpha1.Running - targetStatus, err := startNextTask() - if err != nil { - return reconcile.Result{}, err - } - log.V(0).Info("New backup. Start the first task", "task", targetStatus) - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - case formolv1alpha1.Running: - // Backup ongoing. Check the status of the last task to decide what to do - currentTargetStatus := &backupSession.Status.Targets[len(backupSession.Status.Targets)-1] - switch currentTargetStatus.SessionState { - case formolv1alpha1.Running: - // The current task is still running. Nothing to do - log.V(0).Info("task is still running", "targetStatus", currentTargetStatus) - case formolv1alpha1.Success: - // The last task succeed. Let's try to start the next one - targetStatus, err := startNextTask() - log.V(0).Info("last task was a success. start a new one", "currentTargetStatus", currentTargetStatus, "targetStatus", targetStatus) - if err != nil { - return reconcile.Result{}, err - } - if targetStatus == nil { - // No more task to start. The backup is a success - backupSession.Status.SessionState = formolv1alpha1.Success - log.V(0).Info("Backup is successful. Let's try to do some cleanup") - cleanupSessions() - } - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - case formolv1alpha1.Failure: - // last task failed. Try to run it again - currentTarget := backupConf.Spec.Targets[len(backupSession.Status.Targets)-1] - if currentTargetStatus.Try < currentTarget.Retry { - log.V(0).Info("last task was a failure. try again", "currentTargetStatus", currentTargetStatus) - currentTargetStatus.Try++ - currentTargetStatus.SessionState = formolv1alpha1.New - currentTargetStatus.StartTime = &metav1.Time{Time: time.Now()} - switch currentTarget.Kind { - case formolv1alpha1.JobKind: - if err := createBackupJob(currentTarget); err != nil { - log.V(0).Info("unable to create task", "task", currentTarget) - currentTargetStatus.SessionState = formolv1alpha1.Failure - return reconcile.Result{}, err - } - } - } else { - log.V(0).Info("task failed again and for the last time", "currentTargetStatus", currentTargetStatus) - backupSession.Status.SessionState = formolv1alpha1.Failure - } - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update BackupSession status") - return reconcile.Result{}, err - } - } - case formolv1alpha1.Success: - // Should never go there - case formolv1alpha1.Failure: - // The backup failed - case "": - // BackupSession has just been created - backupSession.Status.SessionState = formolv1alpha1.New - backupSession.Status.StartTime = &metav1.Time{Time: time.Now()} - if err := r.Status().Update(ctx, backupSession); err != nil { - log.Error(err, "unable to update backupSession") - return reconcile.Result{}, err - } - } - } else { - log.V(0).Info("backupsession being deleted", "backupsession", backupSession.Name) - if controllerutil.ContainsFinalizer(backupSession, finalizerName) { - if err := deleteExternalResources(); err != nil { - return reconcile.Result{}, err - } - } - controllerutil.RemoveFinalizer(backupSession, finalizerName) - if err := r.Update(ctx, backupSession); err != nil { - log.Error(err, "unable to remove finalizer") - return reconcile.Result{}, err - } - // We have been deleted. Return here - return reconcile.Result{}, nil - } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { - if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &formolv1alpha1.BackupSession{}, sessionState, func(rawObj client.Object) []string { - session := rawObj.(*formolv1alpha1.BackupSession) - return []string{string(session.Status.SessionState)} - }); err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.BackupSession{}). - //WithEventFilter(predicate.GenerationChangedPredicate{}). // Don't reconcile when status gets updated - Owns(&batchv1.Job{}). Complete(r) } diff --git a/controllers/backupsession_controller.go~ b/controllers/backupsession_controller.go~ new file mode 100644 index 0000000..64a8b06 --- /dev/null +++ b/controllers/backupsession_controller.go~ @@ -0,0 +1,62 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +// BackupSessionReconciler reconciles a BackupSession object +type BackupSessionReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BackupSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&formolv1alpha1.BackupSession{}). + Complete(r) +} diff --git a/controllers/backupsession_controller_test.go b/controllers/backupsession_controller_test.go deleted file mode 100644 index b283522..0000000 --- a/controllers/backupsession_controller_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package controllers - -import ( - "context" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - //corev1 "k8s.io/api/core/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var _ = Describe("Testing BackupSession controller", func() { - const ( - BSBackupSessionName = "test-backupsession-controller" - ) - var ( - ctx = context.Background() - key = types.NamespacedName{ - Name: BSBackupSessionName, - Namespace: TestNamespace, - } - backupSession = &formolv1alpha1.BackupSession{} - ) - BeforeEach(func() { - backupSession = &formolv1alpha1.BackupSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: BSBackupSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupSessionSpec{ - Ref: corev1.ObjectReference{ - Name: TestBackupConfName, - }, - }, - } - }) - Context("Creating a backupsession", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, backupSession) - }, timeout, interval).Should(Succeed()) - realBackupSession := &formolv1alpha1.BackupSession{} - Eventually(func() error { - err := k8sClient.Get(ctx, key, realBackupSession) - return err - }, timeout, interval).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - if err := k8sClient.Get(ctx, key, realBackupSession); err != nil { - return "" - } else { - return realBackupSession.Status.SessionState - } - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - }) - AfterEach(func() { - Expect(k8sClient.Delete(ctx, backupSession)).Should(Succeed()) - }) - - It("Should have a new task", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - _ = k8sClient.Get(ctx, key, realBackupSession) - Expect(realBackupSession.Status.Targets[0].Name).Should(Equal(TestDeploymentName)) - Expect(realBackupSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New)) - Expect(realBackupSession.Status.Targets[0].Kind).Should(Equal(formolv1alpha1.SidecarKind)) - Expect(realBackupSession.Status.Targets[0].Try).Should(Equal(1)) - }) - - It("Should move to the next task when the first one is a success", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Expect(realBackupSession.Status.Targets[1].Name).Should(Equal(TestBackupFuncName)) - Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New)) - Expect(realBackupSession.Status.Targets[1].Kind).Should(Equal(formolv1alpha1.JobKind)) - }) - - It("Should be a success when the last task is a success", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Success)) - }) - - It("Should retry when the task is a failure", func() { - realBackupSession := &formolv1alpha1.BackupSession{} - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return len(realBackupSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.Targets[1].Try - }, timeout, interval).Should(Equal(2)) - Expect(k8sClient.Get(ctx, key, realBackupSession)).Should(Succeed()) - Expect(realBackupSession.Status.Targets[1].SessionState).Should(Equal(formolv1alpha1.New)) - realBackupSession.Status.Targets[1].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, realBackupSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realBackupSession) - return realBackupSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Failure)) - }) - - It("should create a backup job", func() { - }) - }) - Context("When other BackupSession exist", func() { - const ( - bs1Name = "test-backupsession-controller1" - bs2Name = "test-backupsession-controller2" - bs3Name = "test-backupsession-controller3" - ) - var () - BeforeEach(func() { - }) - JustBeforeEach(func() { - }) - It("Should clean up old sessions", func() { - }) - }) -}) diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index 7a125d0..b9d8da1 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,429 +18,45 @@ package controllers import ( "context" - "fmt" - "strings" - "time" - "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" -) - -const ( - RESTORESESSION string = "restoresession" - UPDATESTATUS string = "updatestatus" - jobOwnerKey string = ".metadata.controller" ) // RestoreSessionReconciler reconciles a RestoreSession object type RestoreSessionReconciler struct { client.Client - Log logr.Logger Scheme *runtime.Scheme } -var _ reconcile.Reconciler = &RestoreSessionReconciler{} +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/finalizers,verbs=update -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the RestoreSession object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile +func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) -func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := log.FromContext(ctx).WithValues("restoresession", req.NamespacedName) + // TODO(user): your logic here - // Get the RestoreSession - restoreSession := &formolv1alpha1.RestoreSession{} - if err := r.Get(ctx, req.NamespacedName, restoreSession); err != nil { - log.Error(err, "unable to get restoresession") - return reconcile.Result{}, client.IgnoreNotFound(err) - } - log = r.Log.WithValues("restoresession", req.NamespacedName, "version", restoreSession.ObjectMeta.ResourceVersion) - // Get the BackupSession the RestoreSession references - backupSession := &formolv1alpha1.BackupSession{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: restoreSession.Spec.BackupSessionRef.Ref.Name, - }, backupSession); err != nil { - if errors.IsNotFound(err) { - backupSession = &formolv1alpha1.BackupSession{ - Spec: restoreSession.Spec.BackupSessionRef.Spec, - Status: restoreSession.Spec.BackupSessionRef.Status, - } - log.V(1).Info("generated backupsession", "spec", backupSession.Spec, "status", backupSession.Status) - } else { - log.Error(err, "unable to get backupsession", "restoresession", restoreSession.Spec) - return reconcile.Result{}, client.IgnoreNotFound(err) - } - } - // Get the BackupConfiguration linked to the BackupSession - backupConf := &formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Spec.Ref.Namespace, - Name: backupSession.Spec.Ref.Name, - }, backupConf); err != nil { - log.Error(err, "unable to get backupConfiguration", "name", backupSession.Spec.Ref, "namespace", backupSession.Namespace) - return reconcile.Result{}, client.IgnoreNotFound(err) - } - - // Helper functions - createRestoreJob := func(target formolv1alpha1.Target, snapshotId string) error { - // TODO: Get the list of existing jobs and see if there is already one scheduled for the target - var jobList batchv1.JobList - if err := r.List(ctx, &jobList, client.InNamespace(restoreSession.Namespace), client.MatchingFields{jobOwnerKey: restoreSession.Name}); err != nil { - log.Error(err, "unable to get job list") - return err - } - log.V(1).Info("Found jobs", "jobs", jobList.Items) - for _, job := range jobList.Items { - if job.Annotations["targetName"] == target.Name && job.Annotations["snapshotId"] == snapshotId { - log.V(0).Info("there is already a cronjob to restore that target", "targetName", target.Name, "snapshotId", snapshotId) - return nil - } - } - restoreSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: "TARGET_NAME", - Value: target.Name, - }, - corev1.EnvVar{ - Name: "RESTORESESSION_NAME", - Value: restoreSession.Name, - }, - corev1.EnvVar{ - Name: "RESTORESESSION_NAMESPACE", - Value: restoreSession.Namespace, - }, - } - - output := corev1.VolumeMount{ - Name: "output", - MountPath: "/output", - } - restic := corev1.Container{ - Name: "restic", - Image: backupConf.Spec.Image, - Args: []string{"volume", "restore", "--snapshot-id", snapshotId}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: restoreSessionEnv, - } - finalizer := corev1.Container{ - Name: "finalizer", - Image: backupConf.Spec.Image, - Args: []string{"target", "finalize"}, - VolumeMounts: []corev1.VolumeMount{output}, - Env: restoreSessionEnv, - } - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - var ttl int32 = 300 - restic.Env = append(restic.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-%s-", restoreSession.Name, target.Name), - Namespace: restoreSession.Namespace, - Annotations: map[string]string{ - "targetName": target.Name, - "snapshotId": snapshotId, - }, - }, - Spec: batchv1.JobSpec{ - TTLSecondsAfterFinished: &ttl, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{restic}, - Containers: []corev1.Container{finalizer}, - Volumes: []corev1.Volume{ - corev1.Volume{Name: "output"}, - }, - RestartPolicy: corev1.RestartPolicyOnFailure, - }, - }, - }, - } - for _, step := range target.Steps { - function := &formolv1alpha1.Function{} - // get the backup function - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: step.Name, - }, function); err != nil { - log.Error(err, "unable to get backup function", "name", step.Name) - return err - } - var restoreName string - if function.Annotations["restoreFunction"] != "" { - restoreName = function.Annotations["restoreFunction"] - } else { - restoreName = strings.Replace(step.Name, "backup", "restore", 1) - } - if err := r.Get(ctx, client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: restoreName, - }, function); err != nil { - log.Error(err, "unable to get function", "function", step) - return err - } - function.Spec.Name = function.Name - function.Spec.Env = append(function.Spec.Env, restoreSessionEnv...) - function.Spec.VolumeMounts = append(function.Spec.VolumeMounts, output) - job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, function.Spec) - } - if err := ctrl.SetControllerReference(restoreSession, job, r.Scheme); err != nil { - log.Error(err, "unable to set controller on job", "job", job, "restoresession", restoreSession) - return err - } - log.V(0).Info("creating a restore job", "target", target.Name) - if err := r.Create(ctx, job); err != nil { - log.Error(err, "unable to create job", "job", job) - return err - } - return nil - } - - deleteRestoreInitContainer := func(target formolv1alpha1.Target) (err error) { - deployment := &appsv1.Deployment{} - if err = r.Get(context.Background(), client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return err - } - log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name) - newInitContainers := []corev1.Container{} - for _, initContainer := range deployment.Spec.Template.Spec.InitContainers { - if initContainer.Name == RESTORESESSION { - log.V(0).Info("Found our restoresession container. Removing it from the list of init containers", "container", initContainer) - defer func() { - if err = r.Update(ctx, deployment); err != nil { - log.Error(err, "unable to update deployment") - } - }() - } else { - newInitContainers = append(newInitContainers, initContainer) - } - } - deployment.Spec.Template.Spec.InitContainers = newInitContainers - return nil - } - - createRestoreInitContainer := func(target formolv1alpha1.Target, snapshotId string) error { - deployment := &appsv1.Deployment{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return err - } - log.V(1).Info("got deployment", "namespace", deployment.Namespace, "name", deployment.Name) - for _, initContainer := range deployment.Spec.Template.Spec.InitContainers { - if initContainer.Name == RESTORESESSION { - log.V(0).Info("there is already a restoresession initcontainer", "deployment", deployment.Spec.Template.Spec.InitContainers) - return nil - } - } - restoreSessionEnv := []corev1.EnvVar{ - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.Name, - }, - corev1.EnvVar{ - Name: formolv1alpha1.RESTORESESSION_NAME, - Value: restoreSession.Name, - }, - corev1.EnvVar{ - Name: formolv1alpha1.RESTORESESSION_NAMESPACE, - Value: restoreSession.Namespace, - }, - } - initContainer := corev1.Container{ - Name: RESTORESESSION, - Image: backupConf.Spec.Image, - Args: []string{"volume", "restore", "--snapshot-id", snapshotId}, - VolumeMounts: target.VolumeMounts, - Env: restoreSessionEnv, - } - repo := &formolv1alpha1.Repo{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, repo); err != nil { - log.Error(err, "unable to get Repo from BackupConfiguration") - return err - } - // S3 backing storage - initContainer.Env = append(initContainer.Env, formolutils.ConfigureResticEnvVar(backupConf, repo)...) - deployment.Spec.Template.Spec.InitContainers = append([]corev1.Container{initContainer}, - deployment.Spec.Template.Spec.InitContainers...) - if err := r.Update(ctx, deployment); err != nil { - log.Error(err, "unable to update deployment") - return err - } - - return nil - } - - startNextTask := func() (*formolv1alpha1.TargetStatus, error) { - nextTarget := len(restoreSession.Status.Targets) - if nextTarget < len(backupConf.Spec.Targets) { - target := backupConf.Spec.Targets[nextTarget] - targetStatus := formolv1alpha1.TargetStatus{ - Name: target.Name, - Kind: target.Kind, - SessionState: formolv1alpha1.New, - StartTime: &metav1.Time{Time: time.Now()}, - } - restoreSession.Status.Targets = append(restoreSession.Status.Targets, targetStatus) - switch target.Kind { - case formolv1alpha1.SidecarKind: - log.V(0).Info("Next task is a Sidecard restore", "target", target) - if err := createRestoreInitContainer(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil { - log.V(0).Info("unable to create restore init container", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - case formolv1alpha1.JobKind: - log.V(0).Info("Next task is a Job restore", "target", target) - if err := createRestoreJob(target, backupSession.Status.Targets[nextTarget].SnapshotId); err != nil { - log.V(0).Info("unable to create restore job", "task", target) - targetStatus.SessionState = formolv1alpha1.Failure - return nil, err - } - } - return &targetStatus, nil - } else { - return nil, nil - } - } - - endTask := func() error { - target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1] - switch target.Kind { - case formolv1alpha1.SidecarKind: - if err := deleteRestoreInitContainer(target); err != nil { - log.Error(err, "unable to delete restore init container") - return err - } - } - return nil - } - - switch restoreSession.Status.SessionState { - case formolv1alpha1.New: - restoreSession.Status.SessionState = formolv1alpha1.Running - if targetStatus, err := startNextTask(); err != nil { - log.Error(err, "unable to start next restore task") - return reconcile.Result{}, err - } else { - log.V(0).Info("New restore. Start the first task", "task", targetStatus.Name) - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - } - case formolv1alpha1.Running: - currentTargetStatus := &restoreSession.Status.Targets[len(restoreSession.Status.Targets)-1] - switch currentTargetStatus.SessionState { - case formolv1alpha1.Failure: - log.V(0).Info("last restore task failed. Stop here", "target", currentTargetStatus.Name) - restoreSession.Status.SessionState = formolv1alpha1.Failure - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - case formolv1alpha1.Running: - log.V(0).Info("task is still running", "target", currentTargetStatus.Name) - return reconcile.Result{}, nil - case formolv1alpha1.Waiting: - target := backupConf.Spec.Targets[len(restoreSession.Status.Targets)-1] - if target.Kind == formolv1alpha1.SidecarKind { - deployment := &appsv1.Deployment{} - if err := r.Get(context.Background(), client.ObjectKey{ - Namespace: restoreSession.Namespace, - Name: target.Name, - }, deployment); err != nil { - log.Error(err, "unable to get deployment") - return reconcile.Result{}, err - } - - if deployment.Status.ReadyReplicas == *deployment.Spec.Replicas { - log.V(0).Info("The deployment is ready. We can resume the backup") - currentTargetStatus.SessionState = formolv1alpha1.Finalize - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{}, err - } - } else { - log.V(0).Info("Waiting for the sidecar to come back") - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - } else { - log.V(0).Info("not a SidecarKind. Ignoring Waiting") - } - case formolv1alpha1.Success: - _ = endTask() - log.V(0).Info("last task was a success. start a new one", "target", currentTargetStatus, "restoreSession version", restoreSession.ObjectMeta.ResourceVersion) - targetStatus, err := startNextTask() - if err != nil { - return reconcile.Result{}, err - } - if targetStatus == nil { - // No more task to start. The restore is over - restoreSession.Status.SessionState = formolv1alpha1.Success - } - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoresession") - return reconcile.Result{RequeueAfter: 300 * time.Millisecond}, nil - } - } - case "": - // Restore session has just been created - restoreSession.Status.SessionState = formolv1alpha1.New - restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()} - if err := r.Status().Update(ctx, restoreSession); err != nil { - log.Error(err, "unable to update restoreSession") - return reconcile.Result{}, err - } - } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } +// SetupWithManager sets up the controller with the Manager. func (r *RestoreSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, jobOwnerKey, func(rawObj client.Object) []string { - job := rawObj.(*batchv1.Job) - owner := metav1.GetControllerOf(job) - if owner == nil { - return nil - } - if owner.APIVersion != formolv1alpha1.GroupVersion.String() || owner.Kind != "RestoreSession" { - return nil - } - return []string{owner.Name} - }); err != nil { - return err - } return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.RestoreSession{}). - Owns(&batchv1.Job{}). Complete(r) } diff --git a/controllers/restoresession_controller_test.go b/controllers/restoresession_controller_test.go deleted file mode 100644 index 2a3750c..0000000 --- a/controllers/restoresession_controller_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package controllers - -import ( - "context" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var _ = Describe("Testing RestoreSession controller", func() { - const ( - RSRestoreSessionName = "test-restoresession-controller" - ) - var ( - ctx = context.Background() - key = types.NamespacedName{ - Name: RSRestoreSessionName, - Namespace: TestNamespace, - } - restoreSession = &formolv1alpha1.RestoreSession{} - ) - BeforeEach(func() { - restoreSession = &formolv1alpha1.RestoreSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: RSRestoreSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.RestoreSessionSpec{ - BackupSessionRef: formolv1alpha1.BackupSessionRef{ - Ref: corev1.ObjectReference{ - Name: TestBackupSessionName, - }, - }, - }, - } - }) - Context("Creating a RestoreSession", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, restoreSession) - }, timeout, interval).Should(Succeed()) - realRestoreSession := &formolv1alpha1.RestoreSession{} - Eventually(func() error { - return k8sClient.Get(ctx, key, realRestoreSession) - }, timeout, interval).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, realRestoreSession) - return realRestoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - }) - AfterEach(func() { - Expect(k8sClient.Delete(ctx, restoreSession)).Should(Succeed()) - }) - It("Should have a new task and should fail if the task fails", func() { - restoreSession := &formolv1alpha1.RestoreSession{} - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Expect(len(restoreSession.Status.Targets)).Should(Equal(1)) - Expect(restoreSession.Status.Targets[0].SessionState).Should(Equal(formolv1alpha1.New)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Running - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.Targets[0].SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Running)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Failure - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Failure)) - }) - It("Should move to the new task if the first one is a success and be a success if all the tasks succeed", func() { - restoreSession := &formolv1alpha1.RestoreSession{} - Expect(k8sClient.Get(ctx, key, restoreSession)).Should(Succeed()) - Expect(len(restoreSession.Status.Targets)).Should(Equal(1)) - restoreSession.Status.Targets[0].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Eventually(func() int { - _ = k8sClient.Get(ctx, key, restoreSession) - return len(restoreSession.Status.Targets) - }, timeout, interval).Should(Equal(2)) - restoreSession.Status.Targets[1].SessionState = formolv1alpha1.Success - Expect(k8sClient.Status().Update(ctx, restoreSession)).Should(Succeed()) - Eventually(func() formolv1alpha1.SessionState { - _ = k8sClient.Get(ctx, key, restoreSession) - return restoreSession.Status.SessionState - }, timeout, interval).Should(Equal(formolv1alpha1.Success)) - }) - }) -}) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 0eb694a..88ebe55 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,55 +22,48 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -const ( - TestBackupFuncName = "test-backup-func" - TestFunc = "test-norestore-func" - TestRestoreFuncName = "test-restore-func" - TestNamespace = "test-namespace" - TestRepoName = "test-repo" - TestDeploymentName = "test-deployment" - TestBackupConfName = "test-backupconf" - TestBackupSessionName = "test-backupsession" - TestDataVolume = "data" - TestDataMountPath = "/data" - timeout = time.Second * 10 - interval = time.Millisecond * 250 -) -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment +const ( + NAMESPACE_NAME = "test-namespace" + REPO_NAME = "test-repo" + DEPLOYMENT_NAME = "test-deployment" + CONTAINER_NAME = "test-container" + DATAVOLUME_NAME = "data" + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) var ( namespace = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: TestNamespace, + Name: NAMESPACE_NAME, }, } deployment = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: TestDeploymentName, - Namespace: TestNamespace, + Namespace: NAMESPACE_NAME, + Name: DEPLOYMENT_NAME, }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ @@ -89,239 +82,74 @@ var ( }, Volumes: []corev1.Volume{ corev1.Volume{ - Name: TestDataVolume, + Name: DATAVOLUME_NAME, }, }, }, }, }, } - sa = &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: TestNamespace, - }, - } - secret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: TestNamespace, - }, - Data: map[string][]byte{ - "RESTIC_PASSWORD": []byte("toto"), - "AWS_ACCESS_KEY_ID": []byte("titi"), - "AWS_SECRET_ACCESS_KEY": []byte("tata"), - }, - } - repo = &formolv1alpha1.Repo{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestRepoName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.RepoSpec{ - Backend: formolv1alpha1.Backend{ - S3: formolv1alpha1.S3{ - Server: "raid5.desmojim.fr:9000", - Bucket: "testbucket2", - }, - }, - RepositorySecrets: "test-secret", - }, - } - function = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestFunc, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "norestore-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - }, - } - backupFunc = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestRestoreFuncName, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "restore-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - }, - } - restoreFunc = &formolv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupFuncName, - Namespace: TestNamespace, - }, - Spec: corev1.Container{ - Name: "backup-func", - Image: "myimage", - Args: []string{"a", "set", "of", "args"}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: "foo", - Value: "bar", - }, - }, - }, - } - testBackupConf = &formolv1alpha1.BackupConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupConfName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupConfigurationSpec{ - Repository: TestRepoName, - Image: "desmo999r/formolcli:latest", - Schedule: "1 * * * *", - Keep: formolv1alpha1.Keep{ - Last: 2, - }, - Targets: []formolv1alpha1.Target{ - formolv1alpha1.Target{ - Kind: formolv1alpha1.SidecarKind, - Name: TestDeploymentName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestFunc, - }, - }, - Paths: []string{ - TestDataMountPath, - }, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: TestDataVolume, - MountPath: TestDataMountPath, - }, - }, - }, - formolv1alpha1.Target{ - Kind: formolv1alpha1.JobKind, - Name: TestBackupFuncName, - Steps: []formolv1alpha1.Step{ - formolv1alpha1.Step{ - Name: TestFunc, - }, - formolv1alpha1.Step{ - Name: TestBackupFuncName, - }, - }, - }, - }, - }, - } - testBackupSession = &formolv1alpha1.BackupSession{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestBackupSessionName, - Namespace: TestNamespace, - }, - Spec: formolv1alpha1.BackupSessionSpec{ - Ref: corev1.ObjectReference{ - Name: TestBackupConfName, - Namespace: TestNamespace, - }, - }, - } + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc ) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + ctx, cancel = context.WithCancel(context.TODO()) By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, } - cfg, err := testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) err = formolv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - // +kubebuilder:scaffold:scheme + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme, }) - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) err = (&BackupConfigurationReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"), }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&BackupSessionReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("BackupSession"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - err = (&RestoreSessionReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") }() - - k8sClient = k8sManager.GetClient() - ctx := context.Background() - Expect(k8sClient).ToNot(BeNil()) - Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) - Expect(k8sClient.Create(ctx, sa)).Should(Succeed()) - Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) - Expect(k8sClient.Create(ctx, repo)).Should(Succeed()) - Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) - Expect(k8sClient.Create(ctx, function)).Should(Succeed()) - Expect(k8sClient.Create(ctx, backupFunc)).Should(Succeed()) - Expect(k8sClient.Create(ctx, restoreFunc)).Should(Succeed()) - Expect(k8sClient.Create(ctx, testBackupConf)).Should(Succeed()) - Expect(k8sClient.Create(ctx, testBackupSession)).Should(Succeed()) - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKey{ - Name: TestBackupSessionName, - Namespace: TestNamespace, - }, testBackupSession) - }, timeout, interval).Should(Succeed()) - testBackupSession.Status.SessionState = formolv1alpha1.Success - testBackupSession.Status.Targets = []formolv1alpha1.TargetStatus{ - formolv1alpha1.TargetStatus{ - Name: TestDeploymentName, - Kind: formolv1alpha1.SidecarKind, - SessionState: formolv1alpha1.Success, - SnapshotId: "12345abcdef", - }, - formolv1alpha1.TargetStatus{ - Name: TestBackupFuncName, - Kind: formolv1alpha1.JobKind, - SessionState: formolv1alpha1.Success, - SnapshotId: "67890ghijk", - }, - } - Expect(k8sClient.Status().Update(ctx, testBackupSession)).Should(Succeed()) -}, 60) +}) var _ = AfterSuite(func() { + cancel() By("tearing down the test environment") err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) }) diff --git a/controllers/suite_test.go~ b/controllers/suite_test.go~ new file mode 100644 index 0000000..762734a --- /dev/null +++ b/controllers/suite_test.go~ @@ -0,0 +1,155 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + //+kubebuilder:scaffold:imports + + //appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +const ( + NAMESPACE_NAME = "test-namespace" + REPO_NAME = "test-repo" + DEPLOYMENT_NAME = "test-deployment" + CONTAINER_NAME = "test-container" + DATAVOLUME_NAME = "data" + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) + +var ( + namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: NAMESPACE_NAME, + }, + } + deployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: NAMESPACE_NAME, + Name: DEPLOYMENT_NAME, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test-deployment"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test-deployment"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + corev1.Container{ + Name: "test-container", + Image: "test-image", + }, + }, + Volumes: []corev1.Volume{ + corev1.Volume{ + Name: DATAVOLUME_NAME, + }, + }, + }, + }, + }, + } + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = formolv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&BackupConfigurationReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/go.mod b/go.mod index 68f2be1..6eb9d3f 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,81 @@ module github.com/desmo999r/formol -go 1.13 +go 1.19 require ( - github.com/go-logr/logr v0.3.0 - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.10.2 - k8s.io/api v0.20.2 - k8s.io/apimachinery v0.20.2 - k8s.io/client-go v0.20.2 - sigs.k8s.io/controller-runtime v0.8.3 + github.com/go-logr/logr v1.2.3 + github.com/onsi/ginkgo/v2 v2.1.4 + github.com/onsi/gomega v1.19.0 + k8s.io/api v0.25.0 + k8s.io/apimachinery v0.25.0 + k8s.io/client-go v0.25.0 + sigs.k8s.io/controller-runtime v0.13.1 +) + +require ( + cloud.google.com/go v0.97.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.27 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.12.2 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.25.0 // indirect + k8s.io/component-base v0.25.0 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 767efde..65b8622 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/main.go b/main.go index 88c58f6..0ae8782 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,16 +20,20 @@ import ( "flag" "os" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - formoldesmojimfrv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" "github.com/desmo999r/formol/controllers" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports ) var ( @@ -38,30 +42,47 @@ var ( ) func init() { - _ = clientgoscheme.AddToScheme(scheme) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - _ = formolv1alpha1.AddToScheme(scheme) - _ = formoldesmojimfrv1alpha1.AddToScheme(scheme) - // +kubebuilder:scaffold:scheme + utilruntime.Must(formolv1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme } func main() { var metricsAddr string var enableLeaderElection bool - flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) flag.Parse() - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - LeaderElection: enableLeaderElection, - LeaderElectionID: "6846258d.desmojim.fr", + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "6846258d.desmojim.fr", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, }) if err != nil { setupLog.Error(err, "unable to start manager") @@ -70,7 +91,6 @@ func main() { if err = (&controllers.BackupConfigurationReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("BackupConfiguration"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "BackupConfiguration") @@ -78,7 +98,6 @@ func main() { } if err = (&controllers.BackupSessionReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("BackupSession"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "BackupSession") @@ -86,27 +105,21 @@ func main() { } if err = (&controllers.RestoreSessionReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("RestoreSession"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RestoreSession") os.Exit(1) } - // if os.Getenv("ENABLE_WEBHOOKS") != "false" { - // if err = (&formolv1alpha1.BackupSession{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "BackupSession") - // os.Exit(1) - // } - // if err = (&formolv1alpha1.BackupConfiguration{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "BackupConfiguration") - // os.Exit(1) - // } - // if err = (&formoldesmojimfrv1alpha1.Function{}).SetupWebhookWithManager(mgr); err != nil { - // setupLog.Error(err, "unable to create webhook", "webhook", "Function") - // os.Exit(1) - // } - // } - // +kubebuilder:scaffold:builder + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { diff --git a/pkg/rbac/backupconfiguration.go b/pkg/rbac/backupconfiguration.go deleted file mode 100644 index a3d729d..0000000 --- a/pkg/rbac/backupconfiguration.go +++ /dev/null @@ -1,438 +0,0 @@ -package rbac - -import ( - "context" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - formolRole = "formol-sidecar-role" - backupListenerRole = "backup-listener-role" - backupListenerRoleBinding = "backup-listener-rolebinding" - backupSessionCreatorSA = "backupsession-creator" - backupSessionCreatorRole = "backupsession-creator-role" - backupSessionCreatorRoleBinding = "backupsession-creator-rolebinding" - backupSessionStatusUpdaterRole = "backupsession-statusupdater-role" - backupSessionStatusUpdaterRoleBinding = "backupsession-statusupdater-rolebinding" -) - -func DeleteBackupSessionCreatorRBAC(cl client.Client, namespace string) error { - serviceaccount := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, serviceaccount); err == nil { - if err = cl.Delete(context.Background(), serviceaccount); err != nil { - return err - } - } - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func CreateBackupSessionCreatorRBAC(cl client.Client, namespace string) error { - serviceaccount := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorSA, - }, serviceaccount); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), serviceaccount); err != nil { - return err - } - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions/status"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupconfigurations"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: backupSessionCreatorSA, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupSessionCreatorRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionCreatorRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteFormolRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - formolRoleBinding := namespace + "-" + saName + "-formol-sidecar-rolebinding" - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: formolRole, - }, - } - if err := cl.Delete(context.Background(), clusterRoleBinding); err != nil { - return client.IgnoreNotFound(err) - } - return nil -} - -func CreateFormolRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"*"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"*"}, - //APIGroups: []string{"formol.desmojim.fr"}, - //Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{""}, - Resources: []string{"pods", "secrets", "configmaps"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments", "replicasets"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Name: formolRole, - }, clusterRole); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), clusterRole); err != nil { - return err - } - } - formolRoleBinding := namespace + "-" + saName + "-formol-rolebinding" - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: formolRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: formolRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Name: formolRoleBinding, - }, clusterRoleBinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), clusterRoleBinding); err != nil { - return err - } - } - return nil -} - -func CreateBackupSessionListenerRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{""}, - Resources: []string{"pods", "secrets", "configmaps"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments", "replicasets"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions", "backupconfigurations"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"update", "delete"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupListenerRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func DeleteBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - - role := &rbacv1.Role{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRole, - }, role); err == nil { - if err = cl.Delete(context.Background(), role); err != nil { - return err - } - } - - rolebinding := &rbacv1.RoleBinding{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRoleBinding, - }, rolebinding); err == nil { - if err = cl.Delete(context.Background(), rolebinding); err != nil { - return err - } - } - - return nil -} - -func CreateBackupSessionStatusUpdaterRBAC(cl client.Client, saName string, namespace string) error { - if saName == "" { - saName = "default" - } - sa := &corev1.ServiceAccount{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: saName, - }, sa); err != nil { - return err - } - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupSessionStatusUpdaterRole, - }, - Rules: []rbacv1.PolicyRule{ - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch", "patch", "update"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions/status", "backupsessions/status"}, - }, - rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions"}, - }, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRole, - }, role); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), role); err != nil { - return err - } - } - rolebinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, - Subjects: []rbacv1.Subject{ - rbacv1.Subject{ - Kind: "ServiceAccount", - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: backupListenerRole, - }, - } - if err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: backupListenerRoleBinding, - }, rolebinding); err != nil && errors.IsNotFound(err) { - if err = cl.Create(context.Background(), rolebinding); err != nil { - return err - } - } - return nil -} diff --git a/pkg/utils/.root.go.un~ b/pkg/utils/.root.go.un~ new file mode 100644 index 0000000000000000000000000000000000000000..d01deb2076c97073850e7880e7d159e40f367722 GIT binary patch literal 2166 zcmeHIPiqrF6yGMbc0A}w6oqzNtt60cDMgf&9MW`!(qh6+no>lWO&hV`v5AArR+A*RXTm~Wbk)}%A&ykKx z3$Bp9@Orbh7jE0L2AkU(6zR1Jl{^#%3%O$~*v>8wHbF)A!0b68UFp$sg}QzW2-yi@ zx1dLJXylxOSQITHtKAtS+~}8{41OXM9cQN0D&5yWR#ErMwlc7DoNWVy36}m9BBtzoZf$s zoC4in0%N_p#@f|d&19^-Y_7ML7M0(tLlrY?mf6f8l=>p7(~KHb#@6aqVs{3jtEOBB z#4%2Y`KL#OG%^1hC{ahcD}`UCH~**#6J6v78#^Ee8!ma^K=INbM=3E_F|kcEJ^T_a_$H$c1n4IrBRs))0v;fLA>kpx zGWG#QXO&24$BAeSbijI!8xL*V?j?Fmc3vQh0Pm92I=&YN$PNNZ)n*2^v+k8Ot$>kurZ>;H37}~ G^6VENZxDq5 literal 0 HcmV?d00001 diff --git a/pkg/utils/root.go b/pkg/utils/root.go index 565b692..027ca5b 100644 --- a/pkg/utils/root.go +++ b/pkg/utils/root.go @@ -1,10 +1,8 @@ package utils import ( - "fmt" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" corev1 "k8s.io/api/core/v1" - "strings" ) func ContainsString(slice []string, s string) bool { @@ -29,29 +27,5 @@ func RemoveString(slice []string, s string) (result []string) { func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { env := []corev1.EnvVar{} // S3 backing storage - if (formolv1alpha1.S3{}) != repo.Spec.Backend.S3 { - url := fmt.Sprintf("s3:http://%s/%s/%s-%s", repo.Spec.Backend.S3.Server, repo.Spec.Backend.S3.Bucket, strings.ToUpper(backupConf.Namespace), strings.ToLower(backupConf.Name)) - env = append(env, corev1.EnvVar{ - Name: "RESTIC_REPOSITORY", - Value: url, - }) - for _, key := range []string{ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "RESTIC_PASSWORD", - } { - env = append(env, corev1.EnvVar{ - Name: key, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: repo.Spec.RepositorySecrets, - }, - Key: key, - }, - }, - }) - } - } return env } diff --git a/pkg/utils/root.go~ b/pkg/utils/root.go~ new file mode 100644 index 0000000..dd17272 --- /dev/null +++ b/pkg/utils/root.go~ @@ -0,0 +1,33 @@ +package utils + +import ( + "fmt" + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "strings" +) + +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return +} + +func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { + env := []corev1.EnvVar{} + // S3 backing storage + return env +} diff --git a/test/.00-setup.yaml.un~ b/test/.00-setup.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..343bbecbbf34a12beb19474c4bf80965c6dbe956 GIT binary patch literal 12972 zcmeI2%WoS+9LJqBp%0q$-BMDiD?mLU?AoS|Vsk(tPlv>5oOu1Kk4WBdGee|1| z-^{3%{Y9=m>HHDupW1mp_xvX>-+lG=OC$Y_?|=IJ?&aUgpZ=|lXKsIXa42Ev}imN1tNKpmBgHJGves)N@S9ql?|3?q# z+aKl`j!@{Ir|7+K{VCF~Ng0xE?GdW^iZqmXkAVXykmdr2C;1-*RosjuCW-n}KfURk2#$vtJ zPAL_w=9)e^ka|*R9Vkk2fMfGfs*H%KOWfsEv|-Ny?Wk0jj%9Nj4yMJ5dRayb-AA+aG_+!1Z^)@%hI$ z^i_a6--$c7q3INSt1V5}Y^TO&D}aU7X)XYn=6@tS?uo}NyPl-ySA=({+}7{?{FgY| z&C-P&Dd2T*;uUke#_uWt=`<-90L+jwkP;eq%VU=CKbN<-L8S=oUlG~@U0fvnkrWYk zYFESw%U|GghLj5!E|JQmXzr1Nl#jw~N914eEMV#mn`ydUUm=r}Lz8}P!W-SFBzJ0k42=UCwL|hvlqSH2?EQ`LC=Q= zSUASQ1t0;Bjf@f=cS}$u!F%>$p?B_qq35#$tBXR*z!B`&98|IsJQIl7i{04=vGdUZ z)zd<&fD!0e+9%Noo(aV4g^qUcu$bvDs&x8HpBzYy2#o?okVEn(kB7ktfl4oOr}vZd z!2uLMV2dUKB4#>6_#;COUI9}q{t4~L3VcN8BlNqNcrvoOfEPJMQjI(DfJa5X57ro2sp{*{qwiHW0C#tA;~M z1x?+lSFF{Wu2ZM`hdOsFivQIA>);|s)@xy+M1u3 z+bk9`TSh$DE={CI)3sb;iSActrW1Oun8?-UCX8q;z7(rQ)7GX{%E#8U{3hPBUdm6m z%K7Pbv7K1g^3hf?pQ!4#I$nyids@YK(!(>wf*RG>yBo7=oj`1@##Ulk*UTkdbw$nQ yrxLDOHD)sTjGA9j)$&whvNZXMyHK0DrEQH{l|n4Zp>X#oN;n**N{Y%qzxxliAamsa literal 0 HcmV?d00001 diff --git a/test/.01-deployment.yaml.un~ b/test/.01-deployment.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9024c4b7a7103a605a5045a481e276da23514416 GIT binary patch literal 1928 zcmWH`%$*;a=aT=FfvKJUqHdPQH$^+&k8d~Li#cTfbH!JSGJC<#s|-EGxLk!97#L!J zSXu!JGIJBtQ>_&8(lhfatPJ%GP4tX_(vm>T55$Z>%mTz90HR@-A^Cc5Jqt*dnIZlH zNEQf~AQYGcNr}Mt|A7E(6ii^0rg8`*U+>WcM+YOrR|%lmpr98A;%FcSMFkrqw&8MO zK(QzwW-x68M;|06K>`o}ib{|g4iI-V`aq#TO8|_%tKjHkf<&Jv&@xc;fpRJ+YD8hV zH7_w2m|2Tb)02x*Q=oANiYr(i1u}s-5|MvL;|>%eGzGwtumT)+43M}3Wj`q(hQ%Ge zk`@|+pcq32px6YJz=|Nj(cA|L1zG}N4Aw#GGH?Nj9)qf|+y|{IQ}UCuQ;YO6^YxKy JP1DBbs{m=5U>yJe literal 0 HcmV?d00001 diff --git a/test/.02-backupconf.yaml.un~ b/test/.02-backupconf.yaml.un~ new file mode 100644 index 0000000000000000000000000000000000000000..9615446d689186f3bf6b31290ccdaddd340f89b0 GIT binary patch literal 9847 zcmeI2O^6&t6o4n2-Hj%je>E{NNtq}Ta12qS?A!#sDG2IbrR}NNZD#t%p5Be)O%OeL z@!&-S@g&B-ix)vbkAlJ?3VIU}JPBq`3hVo-t7dx!GYm8Itsc}=S66pU&->nc^{RT- zqWHZRk|)#O8w>wF`ugq@Z(rW|`K`{4?D0>&y7|#}PoKT_@;~4F_}ibo?o*#O8jVX* zPAiq`EbT`{nh#p$xEV)DluF-0DJP}ulX9mNgxZxx`0ImjF4DWyc<~2%MHb4+eA0DT z$|9{@5i=AGFEwZ&-2Qvl;Ak{kW1UbUK^Cp)ydR}WOKn}XZU?9(F0%qK56)?$_FBMQ90zw)zSV`xMZlRr3Q~HQ zFdG&gu$F0C0VLOWuX7rywH9~}RpH?`Oji!x8Q>*BEYUJqkCIoKZJqVfL9CO)0tDEB z$}<8JfMh*>(nATgiU7Pi2_XDb5#;N-n1~TRB_~ zFNN7}*>EhwD1FDJ;G-^#g6nqLRzS)9Mm#>Ji5Y7&>9os=;DjA#%^eHA_pA80;BF}o zNWrrXg0ElbtlyH>JtfSh4XkkxC529cB-G6`k^Z|z`>#jEMpGwS(qqCgX>94suBVfy zvGkrBy>DdPPG@gmr8-d(^rBZKsu%gDwvim0F;QD;gkQ`r=Lh%@fD-1BX>90&Nj9`n z(j8jS`Lexd4I{jTnRGKL`Yjs=;KZJX6Bu%%j{{C4^VD;5lWh#d z`E*;yX|o*^fu#p67Ftdq2|bhC`Aop6WyX3MdLK{nx?#|a@wsP%Vxy(5RPedYM)!Q6 zbYANxVW*W@Ycy%5AF0n&j!ERqc}e6x8^?E6I?1)!AT)_wCe%zKci6dNm7v?@kgJWb zr1s&{Dr4?y8iJosB52d7A;Y63Ljr1*qT2{@56q(Lf@}CN>NGK9jV4VxAvLb$b{_CU zb1Z)Nb(P|I>2T`j#sf-qwa!>g9Rgr4=oc+@O`3*pJBWJn8t!3f8y~%{b=RFZ!u4^Q z6dRm*%w00jLC%Rs>^$mL+0t)>U9n+Sz2&RA$ba*J~=WSfM%*St;j&`?R85^nFjW1uaRh)&6up?-uV$4OidA^MKyJ&?MC z>#RVL1>`x}o*Y`JQ^KF#{mYQsUy{QgSqJi}26C5_isE2hHjLW3AE&FUtNE2qn#XA` z>_x3!Q0TsUApkl4+CZv|K$W3P+ml2kbxQcl_g@;4izP|73*$)M@}DX;gjSu9=LDt4 zS0}NNvE{!&Zy0<_)8o@|Rtzk@>g)$fyw2krQE{CO@xCt%KAWUW<9#}d!S~09{{aog B-&z0w literal 0 HcmV?d00001 diff --git a/test/00-setup.yaml b/test/00-setup.yaml index f6d11b1..665603d 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -6,6 +6,15 @@ metadata: --- apiVersion: v1 kind: Secret +metadata: + name: regcred + namespace: demo +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 +--- +apiVersion: v1 +kind: Secret metadata: namespace: demo name: demo-chap-secret @@ -35,16 +44,9 @@ spec: storage: 50Mi accessModes: - ReadWriteOnce - iscsi: - targetPortal: 192.168.1.159 - iqn: iqn.2020-08.raid5:demo - lun: 1 - fsType: ext4 - readOnly: false - chapAuthDiscovery: true - chapAuthSession: true - secretRef: - name: demo-chap-secret + hostPath: + path: /tmp/demo + type: DirectoryOrCreate --- apiVersion: v1 kind: PersistentVolumeClaim @@ -71,6 +73,16 @@ data: --- apiVersion: formol.desmojim.fr/v1alpha1 kind: Repo +metadata: + name: repo-empty + namespace: demo +spec: + backend: + nfs: "toto" + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo metadata: name: repo-minio namespace: demo diff --git a/test/00-setup.yaml~ b/test/00-setup.yaml~ new file mode 100644 index 0000000..b62e6b9 --- /dev/null +++ b/test/00-setup.yaml~ @@ -0,0 +1,173 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: demo +--- +apiVersion: v1 +kind: Secret +metadata: + name: regcred + namespace: demo +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: demo + name: demo-chap-secret +type: "kubernetes.io/iscsi-chap" +data: + discovery.sendtargets.auth.username: ZGVtbw== + discovery.sendtargets.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= + node.session.auth.username: ZGVtbw== + node.session.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: demo + name: with-envfrom-secret +data: + title: dmVyeXNlY3JldA== +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: demo-pv + namespace: demo +spec: + storageClassName: manual + capacity: + storage: 50Mi + accessModes: + - ReadWriteOnce + hostPath: + path: /tmp/demo + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: demo-pvc + namespace: demo +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-minio + namespace: demo +data: + RESTIC_PASSWORD: bHIyOXhtOTU= + AWS_ACCESS_KEY_ID: OWFTSXZBSEVzWlNVMmkyTU9zVGxWSk1lL1NjPQ== + AWS_SECRET_ACCESS_KEY: WVN5ck9ncVllcjBWNFNLdlVOcmx2OGhjTllhZGZuN2xaNjBIaXRlL3djWT0= +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-empty + namespace: demo +spec: + backend: + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-minio + namespace: demo +spec: + backend: + s3: + server: raid5.desmojim.fr:9000 + bucket: testbucket2 + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: restore-pg + namespace: demo +spec: + name: restore-pg + image: desmo999r/formolcli:latest + args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] + env: + - name: PGHOST + value: postgres + - name: PGDATABASE + value: demopostgres + - name: PGUSER + value: demopostgres + - name: PGPASSWD + value: password123! +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-envfrom + namespace: demo +spec: + name: with-envfrom + command: ["touch", $(title)] + envFrom: + - secretRef: + name: with-envfrom-secret +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-env + namespace: demo +spec: + name: with-env + command: ["touch", $(TESTFILE)] + env: + - name: TESTFILE + value: /data/testfile +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: backup-pg + namespace: demo +spec: + name: backup-pg + image: desmo999r/formolcli:latest + args: ["postgres", "backup", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] + env: + - name: PGHOST + value: postgres + - name: PGDATABASE + value: demopostgres + - name: PGUSER + value: demopostgres + - name: PGPASSWD + value: password123! +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-off + namespace: demo +spec: + name: maintenance-off + command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-on + namespace: demo +spec: + name: maintenance-on + command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] diff --git a/test/01-deployment.yaml b/test/01-deployment.yaml index eff5dd1..a08040e 100644 --- a/test/01-deployment.yaml +++ b/test/01-deployment.yaml @@ -18,9 +18,11 @@ spec: labels: app: nginx spec: + imagePullSecrets: + - name: regcred containers: - name: nginx - image: nginx:1.14.2 + image: docker.io/nginx:1.23.3 ports: - containerPort: 80 volumeMounts: diff --git a/test/01-deployment.yaml~ b/test/01-deployment.yaml~ new file mode 100644 index 0000000..f6e9cc3 --- /dev/null +++ b/test/01-deployment.yaml~ @@ -0,0 +1,92 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: demo + labels: + app: nginx +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + imagePullSecrets: + - name: regcred + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + volumeMounts: + - name: demo-data + mountPath: /data + volumes: + - name: demo-data + persistentVolumeClaim: + claimName: demo-pvc +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config-demo + namespace: demo + labels: + app: postgres +data: + POSTGRES_DB: demopostgres + POSTGRES_USER: demopostgres + POSTGRES_PASSWORD: password123! +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: demo + labels: + app: postgres +spec: + ports: + - port: 5432 + name: postgres + clusterIP: None + selector: + app: postgres +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres-demo + namespace: demo +spec: + serviceName: "postgres" + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:12 + envFrom: + - configMapRef: + name: postgres-config-demo + ports: + - containerPort: 5432 + name: postgredb + volumeMounts: + - name: postgredb + mountPath: /var/lib/postgresql/data + volumes: + - name: postgredb diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index f037c82..2ef6b15 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -6,31 +6,30 @@ metadata: namespace: demo spec: suspend: true - image: desmo999r/formolcli:latest + image: desmo999r/formolcli:0.3.2 repository: repo-minio schedule: "15 * * * *" - targets: - - kind: Sidecar - apiVersion: v1 - name: nginx-deployment - steps: - - name: maintenance-on - - name: with-env - - name: with-envfrom - - name: maintenance-off - finalize: true - volumeMounts: - - name: demo-data - mountPath: /data - paths: - - /data -# - kind: Job -# name: backup-pg -# steps: -# - name: backup-pg keep: last: 5 daily: 2 weekly: 2 monthly: 6 yearly: 3 + targets: + - backupType: Online + targetKind: Deployment + targetName: nginx-deployment + containers: + - name: nginx + steps: + - name: maintenance-on + - name: with-env + - name: with-envfrom + - name: maintenance-off + finalize: true + paths: + - /data +# - kind: Job +# name: backup-pg +# steps: +# - name: backup-pg diff --git a/test/02-backupconf.yaml~ b/test/02-backupconf.yaml~ new file mode 100644 index 0000000..d9d4d03 --- /dev/null +++ b/test/02-backupconf.yaml~ @@ -0,0 +1,35 @@ +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: BackupConfiguration +metadata: + name: backup-demo + namespace: demo +spec: + suspend: true + image: desmo999r/formolcli:0.3.2 + repository: repo-empty + schedule: "15 * * * *" + keep: + last: 5 + daily: 2 + weekly: 2 + monthly: 6 + yearly: 3 + targets: + - backupType: Online + targetKind: Deployment + targetName: nginx-deployment + containers: + - name: nginx + steps: + - name: maintenance-on + - name: with-env + - name: with-envfrom + - name: maintenance-off + finalize: true + paths: + - /data +# - kind: Job +# name: backup-pg +# steps: +# - name: backup-pg From 7d9b4100fbfdeed789a9b9081da501703cf42510 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 3 Feb 2023 22:36:17 +0100 Subject: [PATCH 02/69] Use controllerutil Finalizers functions --- controllers/backupconfiguration_controller.go | 11 ++--- controllers/backupsession_controller.go | 42 +++++++++++++++++++ 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 37c88d3..0db5a17 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -24,10 +24,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" + //formolutils "github.com/desmo999r/formol/pkg/utils" ) // BackupConfigurationReconciler reconciles a BackupConfiguration object @@ -70,10 +71,10 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) - if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + if controllerutil.ContainsFinalizer(&backupConf, finalizerName) { _ = r.DeleteSidecar(backupConf) _ = r.DeleteCronJob(backupConf) - backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) + controllerutil.RemoveFinalizer(&backupConf, finalizerName) if err := r.Update(ctx, &backupConf); err != nil { r.Log.Error(err, "unable to remove finalizer") return ctrl.Result{}, err @@ -85,9 +86,9 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. } // Add finalizer - if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { + if !controllerutil.ContainsFinalizer(&backupConf, finalizerName) { r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) - backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) + controllerutil.AddFinalizer(&backupConf, finalizerName) if err := r.Update(ctx, &backupConf); err != nil { r.Log.Error(err, "unable to append finalizer") return ctrl.Result{}, err diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 79ba0c3..3f744d7 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -20,14 +20,20 @@ import ( "context" "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) +const ( + finalizerName string = "finalizer.backupsession.formol.desmojim.fr" +) + // BackupSessionReconciler reconciles a BackupSession object type BackupSessionReconciler struct { client.Client @@ -54,6 +60,42 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques r.Context = ctx r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) + backupSession := formolv1alpha1.BackupSession{} + err := r.Get(ctx, req.NamespacedName, &backupSession) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + backupConf := formolv1alpha1.BackupConfiguration{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: backupSession.Spec.Ref.Namespace, + Name: backupSession.Spec.Ref.Name, + }, &backupConf); err != nil { + r.Log.Error(err, "unable to get BackupConfiguration") + return ctrl.Result{}, err + } + + if !backupSession.ObjectMeta.DeletionTimestamp.IsZero() { + r.Log.V(0).Info("BackupSession is being deleted") + if controllerutil.ContainsFinalizer(&backupSession, finalizerName) { + controllerutil.RemoveFinalizer(&backupSession, finalizerName) + err := r.Update(ctx, &backupSession) + if err != nil { + r.Log.Error(err, "unable to remove finalizer") + } + return ctrl.Result{}, err + } + } + if !controllerutil.ContainsFinalizer(&backupSession, finalizerName) { + controllerutil.AddFinalizer(&backupSession, finalizerName) + err := r.Update(ctx, &backupSession) + if err != nil { + r.Log.Error(err, "unable to add finalizer") + } + return ctrl.Result{}, err + } return ctrl.Result{}, nil } From 912f3bb06a1000b94c83e0f20c18b0601eadd012 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 3 Feb 2023 22:37:00 +0100 Subject: [PATCH 03/69] Removed ~ files --- .../backupconfiguration_controller.go~ | 129 -------------- ...backupconfiguration_controller_cronjob.go~ | 102 ----------- ...backupconfiguration_controller_sidecar.go~ | 134 -------------- .../backupconfiguration_controller_test.go~ | 165 ------------------ controllers/backupsession_controller.go~ | 62 ------- controllers/suite_test.go~ | 155 ---------------- 6 files changed, 747 deletions(-) delete mode 100644 controllers/backupconfiguration_controller.go~ delete mode 100644 controllers/backupconfiguration_controller_cronjob.go~ delete mode 100644 controllers/backupconfiguration_controller_sidecar.go~ delete mode 100644 controllers/backupconfiguration_controller_test.go~ delete mode 100644 controllers/backupsession_controller.go~ delete mode 100644 controllers/suite_test.go~ diff --git a/controllers/backupconfiguration_controller.go~ b/controllers/backupconfiguration_controller.go~ deleted file mode 100644 index b983514..0000000 --- a/controllers/backupconfiguration_controller.go~ +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - - "github.com/go-logr/logr" - //appsv1 "k8s.io/api/apps/v1" - //batchv1 "k8s.io/api/batch/v1" - //corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - formolutils "github.com/desmo999r/formol/pkg/utils" -) - -// BackupConfigurationReconciler reconciles a BackupConfiguration object -type BackupConfigurationReconciler struct { - client.Client - Scheme *runtime.Scheme - Log logr.Logger - context.Context -} - -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupConfiguration object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile -func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - r.Context = ctx - r.Log = log.FromContext(ctx) - - r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) - - backupConf := formolv1alpha1.BackupConfiguration{} - err := r.Get(ctx, req.NamespacedName, &backupConf) - if err != nil { - if errors.IsNotFound(err) { - return ctrl.Result{}, nil - } - return ctrl.Result{}, err - } - - finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" - - if !backupConf.ObjectMeta.DeletionTimestamp.IsZero() { - r.Log.V(0).Info("backupconf being deleted", "backupconf", backupConf.ObjectMeta.Finalizers) - if formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { - _ = r.DeleteSidecar(backupConf) - _ = r.DeleteCronJob(backupConf) - backupConf.ObjectMeta.Finalizers = formolutils.RemoveString(backupConf.ObjectMeta.Finalizers, finalizerName) - if err := r.Update(ctx, &backupConf); err != nil { - r.Log.Error(err, "unable to remove finalizer") - return ctrl.Result{}, err - } - } - // We have been deleted. Return here - r.Log.V(0).Info("backupconf deleted", "backupconf", backupConf.Name) - return ctrl.Result{}, nil - } - - // Add finalizer - if !formolutils.ContainsString(backupConf.ObjectMeta.Finalizers, finalizerName) { - r.Log.V(0).Info("adding finalizer", "backupconf", backupConf) - backupConf.ObjectMeta.Finalizers = append(backupConf.ObjectMeta.Finalizers, finalizerName) - if err := r.Update(ctx, &backupConf); err != nil { - r.Log.Error(err, "unable to append finalizer") - return ctrl.Result{}, err - } - // backupConf has been updated. Exit here. The reconciler will be called again so we can finish the job. - return ctrl.Result{}, nil - } - - if err := r.AddCronJob(backupConf); err != nil { - return ctrl.Result{}, err - } else { - backupConf.Status.ActiveCronJob = true - } - - if err := r.AddSidecar(backupConf); err != nil { - r.Log.Error(err, "unable to add sidecar container") - return ctrl.Result{}, err - } else { - backupConf.Status.ActiveSidecar = true - } - - if err := r.Status().Update(ctx, &backupConf); err != nil { - r.Log.Error(err, "Unable to update BackupConfiguration status") - return ctrl.Result{}, err - } - - return ctrl.Result{}, nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *BackupConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&formolv1alpha1.BackupConfiguration{}). - Complete(r) -} diff --git a/controllers/backupconfiguration_controller_cronjob.go~ b/controllers/backupconfiguration_controller_cronjob.go~ deleted file mode 100644 index 459d613..0000000 --- a/controllers/backupconfiguration_controller_cronjob.go~ +++ /dev/null @@ -1,102 +0,0 @@ -package controllers - -import ( - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { - cronjob := &batchv1.CronJob{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) - return r.Delete(r.Context, cronjob) - } else { - return err - } -} - -func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { - cronjob := &batchv1.CronJob{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - r.Log.V(0).Info("there is already a cronjob") - var changed bool - if backupConf.Spec.Schedule != cronjob.Spec.Schedule { - r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) - cronjob.Spec.Schedule = backupConf.Spec.Schedule - changed = true - } - if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { - r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) - cronjob.Spec.Suspend = backupConf.Spec.Suspend - changed = true - } - if changed == true { - if err := r.Update(r.Context, cronjob); err != nil { - r.Log.Error(err, "unable to update cronjob definition") - return err - } - backupConf.Status.Suspended = *backupConf.Spec.Suspend - } - return nil - } else if errors.IsNotFound(err) == false { - r.Log.Error(err, "something went wrong") - return err - } - - cronjob = &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "backup-" + backupConf.Name, - Namespace: backupConf.Namespace, - }, - Spec: batchv1.CronJobSpec{ - Suspend: backupConf.Spec.Suspend, - Schedule: backupConf.Spec.Schedule, - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - ServiceAccountName: "backupsession-creator", - Containers: []corev1.Container{ - corev1.Container{ - Name: "job-createbackupsession-" + backupConf.Name, - Image: backupConf.Spec.Image, - Args: []string{ - "backupsession", - "create", - "--namespace", - backupConf.Namespace, - "--name", - backupConf.Name, - }, - }, - }, - }, - }, - }, - }, - }, - } - if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { - r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) - return err - } - r.Log.V(0).Info("creating the cronjob") - if err := r.Create(r.Context, cronjob); err != nil { - r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) - return err - } else { - return nil - } -} diff --git a/controllers/backupconfiguration_controller_sidecar.go~ b/controllers/backupconfiguration_controller_sidecar.go~ deleted file mode 100644 index 817691b..0000000 --- a/controllers/backupconfiguration_controller_sidecar.go~ +++ /dev/null @@ -1,134 +0,0 @@ -package controllers - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" -) - -func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { - removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { - for i, container := range podSpec.Containers { - for _, targetContainer := range target.Containers { - if targetContainer.Name == container.Name { - if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { - podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] - } else { - for j, e := range container.Env { - if e.Name == formolv1alpha1.TARGETCONTAINER_TAG { - container.Env[j] = container.Env[len(container.Env)-1] - podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] - break - } - } - } - } - } - } - } - for _, target := range backupConf.Spec.Targets { - switch target.TargetKind { - case formolv1alpha1.Deployment: - deployment := &appsv1.Deployment{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err - } - restoreContainers := []corev1.Container{} - for _, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - continue - } - restoreContainers = append(restoreContainers, container) - } - deployment.Spec.Template.Spec.Containers = restoreContainers - removeTags(&deployment.Spec.Template.Spec, target) - return r.Update(r.Context, deployment) - } - } - - return nil -} - -func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { - // Go through all the 'targets' - // the backupType: Online needs a sidecar container for every single listed 'container' - // if the backupType is something else than Online, the 'container' will still need a sidecar - // if it has 'steps' - addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { - for i, container := range podSpec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - return false - } - for _, targetContainer := range target.Containers { - if targetContainer.Name == container.Name { - podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: container.Name, - }) - } - } - } - return true - } - - for _, target := range backupConf.Spec.Targets { - addSidecar := false - for _, targetContainer := range target.Containers { - if len(targetContainer.Steps) > 0 { - addSidecar = true - } - } - if target.BackupType == formolv1alpha1.OnlineKind { - addSidecar = true - } - if addSidecar { - repo := formolv1alpha1.Repo{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, &repo); err != nil { - r.Log.Error(err, "unable to get Repo") - return err - } - r.Log.V(1).Info("Got Repository", "repo", repo) - env := repo.GetResticEnv(backupConf) - sideCar := corev1.Container{ - Name: formolv1alpha1.SIDECARCONTAINER_NAME, - Image: backupConf.Spec.Image, - Args: []string{"backupsession", "server"}, - Env: append(env, corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.TargetName, - }), - VolumeMounts: []corev1.VolumeMount{}, - } - switch target.TargetKind { - case formolv1alpha1.Deployment: - deployment := &appsv1.Deployment{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err - } - if addTags(&deployment.Spec.Template.Spec, target) { - deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) - r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) - if err := r.Update(r.Context, deployment); err != nil { - r.Log.Error(err, "cannot update deployment", "Deployment", deployment) - return err - } - } - } - } - } - - return nil -} diff --git a/controllers/backupconfiguration_controller_test.go~ b/controllers/backupconfiguration_controller_test.go~ deleted file mode 100644 index 44eb982..0000000 --- a/controllers/backupconfiguration_controller_test.go~ +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - batchv1 "k8s.io/api/batch/v1" - //"time" - //appsv1 "k8s.io/api/apps/v1" - //corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -var _ = Describe("BackupConfiguration controller", func() { - const BACKUPCONF_NAME = "test-backupconf-controller" - - var ( - backupConf *formolv1alpha1.BackupConfiguration - ctx = context.Background() - key = types.NamespacedName{ - Name: BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - } - ) - - BeforeEach(func() { - backupConf = &formolv1alpha1.BackupConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - }, - Spec: formolv1alpha1.BackupConfigurationSpec{ - Repository: REPO_NAME, - Schedule: "1 * * * *", - Image: "desmo999r/formolcli:v0.3.2", - Targets: []formolv1alpha1.Target{ - formolv1alpha1.Target{ - BackupType: formolv1alpha1.OnlineKind, - TargetKind: formolv1alpha1.Deployment, - TargetName: DEPLOYMENT_NAME, - Containers: []formolv1alpha1.TargetContainer{ - formolv1alpha1.Container{ - Name: CONTAINER_NAME, - }, - }, - }, - }, - }, - } - }) - - Context("Creating a BackupConf", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, backupConf) - }, timeout, interval).Should(Succeed()) - }) - AfterEach(func() { - Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) - }) - It("Has a schedule", func() { - realBackupConf := &formolv1alpha1.BackupConfiguration{} - Eventually(func() bool { - if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { - return false - } - return true - }, timeout, interval).Should(BeTrue()) - Expect(realBackupConf.Spec.Schedule).Should(Equal("1 * * * *")) - }) - It("Should create a CronJob", func() { - realBackupConf := &formolv1alpha1.BackupConfiguration{} - Eventually(func() bool { - if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { - return false - } - return realBackupConf.Status.ActiveCronJob - }, timeout, interval).Should(BeTrue()) - cronJob := &batchv1.CronJob{} - Eventually(func() bool { - if err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - }, cronJob); err != nil { - return false - } - return true - }, timeout, interval).Should(BeTrue()) - Expect(cronJob.Spec.Schedule).Should(Equal("1 * * * *")) - }) - It("Should update the CronJob", func() { - realBackupConf := &formolv1alpha1.BackupConfiguration{} - Eventually(func() bool { - if err := k8sClient.Get(ctx, key, realBackupConf); err != nil { - return false - } - return realBackupConf.Status.ActiveCronJob - }, timeout, interval).Should(BeTrue()) - realBackupConf.Spec.Schedule = "1 0 * * *" - suspend := true - realBackupConf.Spec.Suspend = &suspend - Expect(k8sClient.Update(ctx, realBackupConf)).Should(Succeed()) - cronJob := &batchv1.CronJob{} - Eventually(func() string { - if err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - }, cronJob); err != nil { - return "" - } - return cronJob.Spec.Schedule - }, timeout, interval).Should(Equal("1 0 * * *")) - Expect(*cronJob.Spec.Suspend).Should(BeTrue()) - }) - }) - Context("Deleting a BackupConf", func() { - JustBeforeEach(func() { - Eventually(func() error { - return k8sClient.Create(ctx, backupConf) - }, timeout, interval).Should(Succeed()) - }) - It("Should delete the CronJob", func() { - cronJob := &batchv1.CronJob{} - Eventually(func() bool { - if err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - }, cronJob); err != nil { - return false - } - return true - }, timeout, interval).Should(BeTrue()) - By("The CronJob has been created. Now deleting the BackupConfiguration") - Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) - Eventually(func() bool { - if err := k8sClient.Get(ctx, types.NamespacedName{ - Name: "backup-" + BACKUPCONF_NAME, - Namespace: NAMESPACE_NAME, - }, cronJob); err != nil { - return false - } - return true - }, timeout, interval).Should(BeFalse()) - - }) - }) -}) diff --git a/controllers/backupsession_controller.go~ b/controllers/backupsession_controller.go~ deleted file mode 100644 index 64a8b06..0000000 --- a/controllers/backupsession_controller.go~ +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" -) - -// BackupSessionReconciler reconciles a BackupSession object -type BackupSessionReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupSession object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile -func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - - // TODO(user): your logic here - - return ctrl.Result{}, nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&formolv1alpha1.BackupSession{}). - Complete(r) -} diff --git a/controllers/suite_test.go~ b/controllers/suite_test.go~ deleted file mode 100644 index 762734a..0000000 --- a/controllers/suite_test.go~ +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "path/filepath" - "testing" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - //+kubebuilder:scaffold:imports - - //appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -const ( - NAMESPACE_NAME = "test-namespace" - REPO_NAME = "test-repo" - DEPLOYMENT_NAME = "test-deployment" - CONTAINER_NAME = "test-container" - DATAVOLUME_NAME = "data" - timeout = time.Second * 10 - interval = time.Millisecond * 250 -) - -var ( - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: NAMESPACE_NAME, - }, - } - deployment = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: NAMESPACE_NAME, - Name: DEPLOYMENT_NAME, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test-deployment"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test-deployment"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - corev1.Container{ - Name: "test-container", - Image: "test-image", - }, - }, - Volumes: []corev1.Volume{ - corev1.Volume{ - Name: DATAVOLUME_NAME, - }, - }, - }, - }, - }, - } - cfg *rest.Config - k8sClient client.Client - testEnv *envtest.Environment - ctx context.Context - cancel context.CancelFunc -) - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Controller Suite") -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - ctx, cancel = context.WithCancel(context.TODO()) - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = formolv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) - Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) - - k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).NotTo(HaveOccurred()) - - err = (&BackupConfigurationReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager) - Expect(err).NotTo(HaveOccurred()) - - go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred(), "failed to run manager") - }() -}) - -var _ = AfterSuite(func() { - cancel() - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) From e69b25bae01e029a0cacd802a48c77a1877ce0e3 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 4 Feb 2023 11:19:37 +0100 Subject: [PATCH 04/69] removed *~ --- .../.backupconfiguration_controller.go.un~ | Bin 35950 -> 0 bytes ...ackupconfiguration_controller_cronjob.go.un~ | Bin 7322 -> 0 bytes ...ackupconfiguration_controller_sidecar.go.un~ | Bin 26628 -> 0 bytes .../.backupconfiguration_controller_test.go.un~ | Bin 15460 -> 0 bytes controllers/.backupsession_controller.go.un~ | Bin 2760 -> 0 bytes controllers/.suite_test.go.un~ | Bin 7724 -> 0 bytes 6 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 controllers/.backupconfiguration_controller.go.un~ delete mode 100644 controllers/.backupconfiguration_controller_cronjob.go.un~ delete mode 100644 controllers/.backupconfiguration_controller_sidecar.go.un~ delete mode 100644 controllers/.backupconfiguration_controller_test.go.un~ delete mode 100644 controllers/.backupsession_controller.go.un~ delete mode 100644 controllers/.suite_test.go.un~ diff --git a/controllers/.backupconfiguration_controller.go.un~ b/controllers/.backupconfiguration_controller.go.un~ deleted file mode 100644 index beb12d25232aea0bde1357d84862cb0ac41688a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35950 zcmeI5+ix4k6^CW{qR>jyCcee-C2RRc$_iy!w&U7WV);@#jT;w{HLK!5^8fGP#jqUcMX3go3P?fK2lA&*FkyAnIj z%N$@hXK%B!=R32rJ3BMHUY`HaOTmTkAASA9uU()0%QJm{_|B!1`|H#9r~dQH-M{_x zuYdfc{;!|?c;KU7-|Fk@yF-CxJt_SZF6$MDIrMoSpWD7+9jzJcoBmMmfCI1!f&<@B z8vXp2q_Krp+Bmxf8kVF(-W7*kCw&0R-qMy26^_2XPamh{cPPAv4J1IaF*iDzyQ4O} zO^cicHXXsH+>GnptXI#6!EA9VcH?@jIO)aZ1%Iks@;x_lCY(5``znFA=pdvIK>`n} zv>m@v^PNSt$D6bkr-5LPz3LQ^AJ?P6@#3gbe9^DfEAir;5yy`rbqp>MDMN6~spOdA zm2j?jF^a+{PkW9yL-oL&srXJDI&P`t)O7Z|Fo@l9;73CRmD=l+0&)RCYR8dUc@}4L zPER;dak5m>mzHl{df|-PVc&!klq=5SiF?Ym;mRA3kQTT(a*OrWwPA!_1g@woNpZuf zq{FqpKmo4(XXV=K64wS=xb{ydY@%>1w`#C-)X7{f7p>m8=*ZnTawS)$?VEGCJGwB? zQi1kvI#{=|MLVyjL$%Yv$u?48fdk&70B5#N2O}62Z)w}Ka=QM+>F(0m?o;?F1t93p z<aOH{7w}xyI8=d9EJ0aXAbY@hCf@Ef2H_>JMY)3 zuIHDajW~yzx9Ea)2snVs8pRMe2QEX^Rn{S3k{%?BL+osI2zU-00-khSR;yV(LZb!B z90e9&M%orFTcnpb3))>0Ee^m9TA*EXsRhc-BKN=yw5XHqV-t+fi)2B&ryE*&R-aDZ zzgBxY2FO8?cR;pag+Y2!vw+>Fz^+dop?)QI`367^oh`vKC-OdAGM) zwusT#TiTJl%IK$GAcFZJK=*Zi5x$-uD-@p#X2blD7tUAR$alg@$#H{{6Zo$VQQJ%N zDT$Vv3$$grxkQ`95`sZ*;EpP~Uopl1Xh|!pwz?C;#@s}>%I7Jt zd;}?J0cXpWtzkSSwlj+DhUKrfI5F6cq0&X0=W6R*aQr+{(E`RJykK8}*b=5Zg78+f)wyQBa9lKGT;o4 zQec6j+@ql0hOKk3Sxm*`9DkS2ahFbXp8`9_2z@L_Zq@1Z`N0YZAEUqmyl4}}2LxN* zVFaV%E$teRNYOOSP0}du`w8hXrX9XI>N_{cZ4sFT)e{tuj3o&p7!@L!lO$pKrz!jf zXH_42u;wJM(a->CD+Lw+#6Gsci7jF@CTN>GLHmHty2|1Qa@#~^K@}wJjZ_%LoJu4I zyOA8qd>;k4?INRq3Xry`K^VoHng()T*7^|Kc8Gj}=_oJThgG)38e>>eJy}qUijDQ_ zq^s|q#4-6KyD_Q#@D{`7#}~A*9DRg!rbnFEjpqDVHS4>Yb+f?>Y1IRp0$DG|KuFY+DG6EPY4shPpoeUgY=|Ie~@}+--sWyZ-}Nd zxWX=R`EV2K8p0qwsnQ*!p4l~O57;$?XTTG7i>rs1SkDj!=}DE|Aa&^(d*zE`>(fT` zfIXwx+73t9BQ73JVjV*mq$gE6gVZxS#w!olF&bZC;0JrfwZlioD5zI3`+6cQvldnQ zg4Cs749OeDQEh&l3i%HiBamAkshJ89o1cijSt=xTU^-+qy2u)WI5 z_PK{GvBnsdL>B@`Es4}Gc%og4##rtCo#Hu=@O**RvH1^@zLmyEJD2@R)sGxJ8eVYOeb)L4l;yVYuq z&eq&hjS!nic9lcJRX0f*PwwA})#r)1D69vid||}#DrGuD@#@UWz8635-$tHxCkf;l zL&{D!FC4F;JH+#Cqpwn*KbqadWF+G(kQ_+@3bOO!c#$(I( z73u14G5&nD;>NyOA|T}oB{QItA@!hAJ6RaB0{FA_%9O86Tu6?o>9w`w1CX|{*!MFx za^f>N7+GYtH!B}&v9d@wo0<2lXl5!5@Zg=v8LD9kuG=gqpSd};yThE6x!%mkMMpa} z5?skIHz|AID_iAwGFJ$gUH&3D-k3o zYny+o+UB98wt?x(6uwOX2zUfNum2XUV{|};8i@rMuTij_s@Nh%V{hq`Z^+l}DLUIr z6c}Be9{g7G+y{JzM2>+LIMH{`mP8oAs1VMVyWymz?lr33d z5VL9;SsEqZR%p8q1!AxOIV>^{$QGoaERDo$1)Gg4I35vs2V=`s7{sia#`S1B z*RCV?xVQo%8iH7W*kWaC7?0DiQaeLyaqTu9lc|Ctx_PjGu%*h@FdnB-Ew)qbGRT&p z0wVe_vjDJ#%GNL*r$K$H9qP&hrVpdIKie4Qp?Gh`Fg=eHdi2gh* zz-y7RHH^n;NYSgK1?ft&8!%B2MCUjb(6vO_8ph)^qUhDpLUe`cDi|mL9uxTnTnkhf z#H^YIw9pQ;^YkOk6a2L(b;{?9rBFjK)aSDT&Rnu@D zZZDpl<_2P#poh`DEFf!XvNepyX*3VD)9f(O6T<|&lOoRmYhen5m{rqY9&Cr%K4}-T z1Ut+w!ve0BC0oOIoW}A%JIl8D*BB+xof0_)R*O;?#H^Zz5;JhMyrHztq{k#d?u#O~ zplV49gP2v*NMZ`07LqMfA~HyTLpMqmK(!#*8ph)^kTlCnE6C+jTQWy*J0r3Srk0~H zh*_05R=V{ra@sjAo1v630^65FR)N%F6b3P?reS=t9iyCQmMMbTgvcp~T8hFTW>umX zsKS^_I#k2-=xzNP1$3yUS+J7-B;yU}uFE~D!Ri?-YXL{>X9qB|MU2MY(%&XK-AI|; z-u4nGCVJ)q2x!lWd;%!?Wm|~CDCSgxcv(W^a@Gmr`6vv&63%ph2xwzgXwBF#Aww`25tTAp_l8%2@q=82B0W5n!kOPfDkMngX-Y^pAY8si9^RNm7H3+D^cw&G diff --git a/controllers/.backupconfiguration_controller_cronjob.go.un~ b/controllers/.backupconfiguration_controller_cronjob.go.un~ deleted file mode 100644 index e7f4016c07d2360a9728533c190df255a5a3b6b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7322 zcmeI1?`s@I7{@n1+PIBBuxe{#)akY0-FYrXEY`LWYa-I5c#unjz6zV!xyz=T*<)@t zZ3!ZMBMANpg71CrTVE9a0Q*WX0Y#BQUx{GpJAJ;hJDW|7aP9Tp?E~M;&dlyi_W3;X zYi}jo{N{?BkN#gU0IsNIEUptP| zA-(Im?wZf9Cf&JMw!N12b<*=%Nv|tpAObea>c#4=;I@(;4oG995z^Bn7*Z>T@BZE# zN9}~O`Xg$|M$^RnMb(R>7m)jh%ru2eX+!V;|M}K}!7<|e4YwaBoggJ7pg88P5^&|s zi%4iOr((GfuT^VjZW?&F1GKFHWg#j%0 zB(;HSuz~^T>$o7GM~gsTrZP~sK+n2in+c`ZIgH7VgwA<>KofRH#fc0I)hHBFd(E{C z!S$lJv7L=q;5tQ@`Y!T{C} z-gk<4C3V&%S-kJ>8@+Q%_*(qm0M4f6^JcW4oMkeMm>tEyI#GBLAiqF5hO!bfyfA>} zo}@N#jg=RMN%|}dIjbsG$3`hV*@m4&`8tf{k|3BojD)JO^qlkIbuE(Z_<7jKfkGkvrjCVkbHB8&F%4cju!x54-M9NE~izq0g16~-WQFQ!OjE*Vl zY?@@FWAovn!=8c%iwVTQWZ|t5A63$OC@W(EUKplPOx!auF`iEGuTf_;l8uRteq-Wd z5ImlMKmcIgvWDOYN%+sofPfQ*YJ6wwHxm$t(tuc}-Zn@!AXIiMVA^EZ+3b%PJe9d1r^*y#t4^6vn?^>w& zdiGSd+gZNR6{h~A>vF#**aPp?lu;=A~l5;3MxW^7kB|7Uf>xCA@PKGK#0)tBZ5*>2?62(UO`Aa!1?CR**zPtcei_e z#mgLNcIM9gn|r={=KszvE-ioixysS%UvndqGspk&$tOpzojUTZUta(DV_$pezHh#g zzxKk*wXgnb*H6Fk!~f)Rx!1%U%;)p8P?)j@IPVed5N&?GSfJ+#c@}CO%hM|I@YY+a$0fckWFhL~a#F z6vBxQx{&<*SERK^BZEJ<21eyj)$F@Xcu~STxlC#O{x=%{|Y2<*MtmvjC+zs8mH>0~(4g_6d zhQLsm3Jplup$Y&w?gK;wdqWE;1Wn%J&wrl@lT%VLk(~1S)XmU9^b$K=6-A-B7+<^r z$ypUgi2xAPBWwg{Xd#85e}#rtHEf|Bw9q&RdwE%0NA*&7T$LSaa9ZdQg~0JW;;428 zb|@i@48ZQjWy_BC!#(0YFAjD)q)3vzd+V;hzN?cz#vvjr~?Sp7KD%NUo<-%A=n*DAU z(GyXrT8~#J2`?7wYd)8SE$@H|spDoeE&^+mkVXb!{VU5FGei9dtoJ(B`MKh9d^n0O zR_n{v^6F%Er!vam2Se{6Q)$jP~dnP=u^=g%$79Gmc1xiJ~83Wx(%)ka$1Vx>&% zX2LzOK9FEtES8(XwQZ+xR)m@}Yhu9(!cc~)M$-5e;rr!6-4MRf%QA0s2(M^o40On- zRVY)T04Tx)K+iYxS19QcB%1@#j?xRc%VhR3M46;Lt8^NlE%06HnX+2 zEj{v7*K!n}Qt-~!phlzKjC~QO)gnz9{HVD;OGL0Zw2(puVfJUsOq+b#KKz7?LoHWV z!tM%$Qux-r=_jur=~u) zh1A>7fvm+pN#z9y(9y(JNNVUOwh(%&REP0>=JDW9t=x?Uh1#6a%rLt69;-l|~_K|iOcj@r| z0txa}YG~chwiif3IdnCtM$-6}9-8e!Z+*9=r+rCZl!IyhTI17wuaY8u->$=XjZEF>8suC3OX5RiJqT`VmAS=Sh*CNK6-}O*5pX0+KMb)jN$_Q_R+L- za`YXbai=DI$N4LAmBhC7<=|y_NSq2U3jG}+BUX*1@$JiD!}jIq7&j*qrDE#JN>}#* z2Cs}QJ%Pl;TNMCuoReVDs)-@}*6T0l(=hC2(Vw#xtX{Y*F2i5uh2R3>~!4$X&Zx^y}TAy(R}! zAO3h!^7fhm(;iZn;`+?ta$M=04Kx14+X>CBF??<80_+jCy#SL2aHt}y zM$-5e;AZWoA6bBO9ie^%fDbu1D4)};%vV{)$| zQh;}99j3$TfHW>ng-cC}qe2?24ke_K0ieBIuvUla_leUsW$SBs-2qKCI9Tmr+Y20@ z635U$V22XY$RO;JKKmU>myIG)cKgNP+LH~(+KXHi+!rzn@p7#!3t_k|oocA_wn6#0 zNj;eNi=*c-phXFpN^4CK-y4TXPU#(eeFY>F8Ye!uIL-~kg(kbvOeR4=^lO`QlJQ|7 zTHJj^tWU9jvU070)R)KS1 ztJ=ui7q-9YRjZ;+Gb~zAQcF|O_EM-Yt41>Q1?_Kpq1`8FhpZ|qNlibLgVCxsGWYwD zeM-m<*9Ou?l|BMc16b8Y=6*l0ljO$3v~6M7>d%lc4d=i$bDnyGeq32zWf-hrJfJrb zAH)HxQAoBy!TP&iCG4=PAo~s}XdX0A0BMMa`jb&gc0r+g-O}yILp>sp95P7&S3qPG zl5J2B{h=QsYeA$2T%cscC5)_~giJ*c{m~)Xo(!644f9pGE7J_dDf$LlDc+kLsU6`* zgfVe0;y&*rA`HWzg)Bvud;g9#IX;MYj|RoSwtOQdUz|c7DQ^mg%x34A)`YHX#|ZsO^V;rs)QHC?G(rV zT72IkBBP7MeMa0p;$(7adxi&Hq`PIg(-m3R^l0nPP1g9bvXFe6siPnA7Yg>DP)g+` z`4^PJ$@9<0rN*=Ii+s`fuoQ`W$+=Q1mSkDR^TpB&&6@mo%0>5mr;BAUGxCSYf}BXQ zbX8%&tySd~aFk%bi==6RF4vL>nbL$F9w&OGA&bAtqa^FWx&^sfle1gVPHpmW4PU>WW4%~^ zxa)^@U%|H1)MaXoS&G(u<@zy6kbE86y3A%@$)0}Z=wQ8$&y=uKH@zNkgXewx8f>a99Xh7TAZ1y(I)y?kpvUhE5 z)6(=GRT~i6T2$JI6fuJIML}PDRY4Hn;+ujL^i_S;?{{`48G{X*TlQ^!;mgeYW_M@# zee#>xox2+*H9 z%MUV{%neA{I#gCbIq6`jlyQEZ{U1Ca0;Xeumem4G%Yph-jweHl6p2{hSk zR)Hx|;oVgRdIy8pRoI@dn2CSvWosH0<6U4bq_ z(b`8C1Svdnsw5<_&Y`3c;AO~6m5fGGiv#V74($>~WcX>!w;sG9^dG6XPMCM9fxj~t zDGs7l9CXIqQq6M<#lf+l=9TjOhibN$&vjc{qQ6|BAPJM`oxGkj(&iB2pUI@93QL1! z9Ku%@6TXVUC=zZ~?|?+le0>j6Qz}`lWLp|J@)%);or*+*CFeph&!J@WRh?6+e4RMC zUT4S9&p_`((Uhuy36D%TLb{7bNF%`OP>yW_jZsKoaiHxG(3&Dd*D%-&^fAPQD(KUMwAZ0oh)K8x zNtn%6-LO{mtx?;Xa3wc#x*JkPhCGpbG=>|4ki_C}dsgSh&<{i879|@!`;|vRBP6vi zhpGU`W4I4vpfLzZEDp5)?0PC_pJ7xcTbG_}*g^9a_Rflfp&iyHiqAP=x$gEwuNaal z)9@bc#z}Y>GO4HqrQvEEs(pGYGNFd$aHP6gn?XZ7ut;j8v{oL*f&|UNrv)TYH4e2G zbZUziz6NuV6z|pUoP_z=lXxyQ&!Adl8mY#idQ7Lv&>w~tmPLB%$|F;eknV$&5g?B< z6R3(vk}?joNgY}!!_PsNm_SKs9gmSlf)%KlZIfz@LIR6JZAzyWV6a1wdUJRj%O2m| zi-Vr)R`O46-HHuQ;n-F7yz+$W1b#{FeUjb%+TD`tSxD@M8kSmQ8mY!%`H9Z*3`RKz zDVDc+gdrud|0ZvW1bLdpwxPx-B(ONtzSgNRVa`K}S^*ijtPM2sdr-S$vgHNMjH(5t z;c6V7-|IY?N(?`Z>)`S~W8##HzlG13NOV1~CylfxAwyV2BuN>E@DGA;bF@cu9HX6p zK7~klS!*WQZGe=KD}B6>nSB|AR1MqRzw6nmQcJOH&*u2Flauclr1Ow=LnL<^GW*Z9hiq>uSxyk8a?Y_v6DJ$L=%`=V`nN+II-1+E3rPhcW7ps$y z;|FrTv$UHg**77RsajCdtAb{`PV=Y?KSxZQsbu*muO!VvY1pc0 zq+Wt;hhWE-Z;tjK%3Y$PeqJEnugzeek*9UM>Mx%upQ71h?)8% z=3N+nj$xO5w>q)i+OuyosE!9-z1#LGV|I6OzEk}eL(fiJL-6HA#U8t1%8;aKVET(- W+7U5jIz0m^rdGr>y9CBtv;P7hlF@De diff --git a/controllers/.backupsession_controller.go.un~ b/controllers/.backupsession_controller.go.un~ deleted file mode 100644 index fd13c2cd717ac1fb29facda0e4de87811002221f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2760 zcmeH|KTE?<6vba_Rf>W*_-_zHQD{Z%V$=$4H5Lb#B32hI1w&{Hwt?6}9R&M19Nfjv zB7&2OlZv>wh_jya`d*12AX^R`ZgO(TgZtwqhxNwwL9^h#i|E}*`Jr6R<{rO}Z%4;Z zmOh_q=I-#(+hYFm;_CJLSqNdlS`l2(Ln>g^7WRu(iuF1w_o^YPDhnS?sw#dVnea)d z044w&vl538Kht>s1Uv=jc$Q2Eqo0^v35}2j*r7y#37B~ZjY9}jfR^w>d`n8eoMG2Vpa zIE5eua(jMqq?Un6uJ@xOjDDil&A)1FBtZ6%bqI|^2vmT!FQG;LBjTY%Fc(j!jZ(2t aOt#M)FSTKuyKRr3&$?~Y9M3VkA2)wBlWA@M diff --git a/controllers/.suite_test.go.un~ b/controllers/.suite_test.go.un~ deleted file mode 100644 index 9a5f9f305922288e7f91e62f1fcc4057c0092bc9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7724 zcmeI1Pe>F|9LL9X`}55Hg5jZT6tdlr6$v3UYi+fg8mvlTuuS7nTf6Skj$)L7)hRu9 zu%JV?ZV`0p-q}M1-nvv^=OBcz-}lY??LH-JaI(|;!gt@i-|XxgexLol`SZr}#TR2! z1Enuk?8xU_;_LgqU+cY%_UP*A<(H{<4JVq#(MwCo4{IM^|FkUY8M#Gtb~@#9Wv0t+ zyM4Bj%9p5{m!Zy?t~UBcJGoBzFtrd`w&kuYevjiDaVxisy`)&s$WQF*Bcrv}Nx>F+ zM*lto?#jX^iNmsfG}F%`^%ux$U+2#LHJZhh}3tJ z`C?cXye8u?$ntA4kdhN7VkA&}NXFYXLk>xZqFy=}b0JiHZA{zRtCqz#Iyt2NF&X57 zr*=dhaYQ{r0MbD+3jpz*76eSWAqx={2HIN*EmlJWXwS)@-75xX%k%`1phbvGAy02O zQ<*!S<9i9$pY5=x_(mp~Ol5DDrVEpa?6m88-ee+~RD?nF5Sazq*U0kEWC(LwcB-Xi zpcP5DJ|H}y(vR_gASUJ%D1hOWBYy02-JpUhLbefDKh3yfcndlpn|AZ f8XY8o8i>fme^C`;JgBzK5RZ$kmUX-5zrFem$?I1Y From c75de6e609a474f27ea55e1588238253d9d9480c Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 4 Feb 2023 17:24:42 +0100 Subject: [PATCH 05/69] should test []EnvVar length --- controllers/backupconfiguration_controller_sidecar.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_sidecar.go b/controllers/backupconfiguration_controller_sidecar.go index 688e339..6953302 100644 --- a/controllers/backupconfiguration_controller_sidecar.go +++ b/controllers/backupconfiguration_controller_sidecar.go @@ -13,7 +13,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. for i, container := range podSpec.Containers { for _, targetContainer := range target.Containers { if targetContainer.Name == container.Name { - if container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { + if len(container.Env) > 1 && container.Env[len(container.Env)-1].Name == formolv1alpha1.TARGETCONTAINER_TAG { podSpec.Containers[i].Env = container.Env[:len(container.Env)-1] } else { for j, e := range container.Env { From 517a6c732429f58178cf0a7e9e961ae2c7c99764 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 4 Feb 2023 17:25:00 +0100 Subject: [PATCH 06/69] Sidecar tests --- .../backupconfiguration_controller_test.go | 48 ++++++++++++++++++- controllers/suite_test.go | 29 +++++++++++ 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_test.go b/controllers/backupconfiguration_controller_test.go index 406027f..e6719d4 100644 --- a/controllers/backupconfiguration_controller_test.go +++ b/controllers/backupconfiguration_controller_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" batchv1 "k8s.io/api/batch/v1" //"time" - //appsv1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" //corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -130,6 +130,24 @@ var _ = Describe("BackupConfiguration controller", func() { }, timeout, interval).Should(Equal("1 0 * * *")) Expect(*cronJob.Spec.Suspend).Should(BeTrue()) }) + When("The BackupType is an OnlineKind", func() { + It("Should create a sidecar container", func() { + deployment := &appsv1.Deployment{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: DEPLOYMENT_NAME, + Namespace: NAMESPACE_NAME, + }, deployment); err != nil { + return false + } + return len(deployment.Spec.Template.Spec.Containers) == 2 + }, timeout, interval).Should(BeTrue()) + + By("Should add Env labels") + Expect(deployment.Spec.Template.Spec.Containers[0].Env[0].Name).Should(Equal(formolv1alpha1.TARGETCONTAINER_TAG)) + Expect(deployment.Spec.Template.Spec.Containers[1].Name).Should(Equal(formolv1alpha1.SIDECARCONTAINER_NAME)) + }) + }) }) Context("Deleting a BackupConf", func() { JustBeforeEach(func() { @@ -161,5 +179,33 @@ var _ = Describe("BackupConfiguration controller", func() { }, timeout, interval).Should(BeFalse()) }) + When("The BackupType is an OnlineKind", func() { + It("Should delete the sidecar container", func() { + deployment := &appsv1.Deployment{} + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: DEPLOYMENT_NAME, + Namespace: NAMESPACE_NAME, + }, deployment); err != nil { + return false + } + return len(deployment.Spec.Template.Spec.Containers) == 2 + }, timeout, interval).Should(BeTrue()) + Expect(deployment.Spec.Template.Spec.Containers[0].Env[0].Name).Should(Equal(formolv1alpha1.TARGETCONTAINER_TAG)) + Expect(deployment.Spec.Template.Spec.Containers[1].Name).Should(Equal(formolv1alpha1.SIDECARCONTAINER_NAME)) + By("The sidecar container has been created. Now deleting the BackupConfiguration") + Expect(k8sClient.Delete(ctx, backupConf)).Should(Succeed()) + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: DEPLOYMENT_NAME, + Namespace: NAMESPACE_NAME, + }, deployment); err != nil { + return false + } + return len(deployment.Spec.Template.Spec.Containers) == 1 + }, timeout, interval).Should(BeTrue()) + Expect(len(deployment.Spec.Template.Spec.Containers[0].Env)).Should(Equal(0)) + }) + }) }) }) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 88ebe55..31c9f87 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -50,6 +50,7 @@ const ( DEPLOYMENT_NAME = "test-deployment" CONTAINER_NAME = "test-container" DATAVOLUME_NAME = "data" + SECRET_NAME = "test-secret" timeout = time.Second * 10 interval = time.Millisecond * 250 ) @@ -89,6 +90,32 @@ var ( }, }, } + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: SECRET_NAME, + Namespace: NAMESPACE_NAME, + }, + Data: map[string][]byte{ + "RESTIC_PASSWORD": []byte("toto"), + "AWS_ACCESS_KEY_ID": []byte("titi"), + "AWS_SECRET_ACCESS_KEY": []byte("tata"), + }, + } + repo = &formolv1alpha1.Repo{ + ObjectMeta: metav1.ObjectMeta{ + Name: REPO_NAME, + Namespace: NAMESPACE_NAME, + }, + Spec: formolv1alpha1.RepoSpec{ + Backend: formolv1alpha1.Backend{ + S3: &formolv1alpha1.S3{ + Server: "raid5.desmojim.fr:9000", + Bucket: "testbucket2", + }, + }, + RepositorySecrets: "test-secret", + }, + } cfg *rest.Config k8sClient client.Client testEnv *envtest.Environment @@ -127,6 +154,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) Expect(k8sClient.Create(ctx, namespace)).Should(Succeed()) + Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) + Expect(k8sClient.Create(ctx, repo)).Should(Succeed()) Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ From 854f290a025a2e4f78d31d65edc2b29165c913cc Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 4 Feb 2023 17:27:45 +0100 Subject: [PATCH 07/69] removed pkg/utils no longer needed --- pkg/utils/.root.go.un~ | Bin 2166 -> 0 bytes pkg/utils/root.go | 31 ------------------------------- pkg/utils/root.go~ | 33 --------------------------------- 3 files changed, 64 deletions(-) delete mode 100644 pkg/utils/.root.go.un~ delete mode 100644 pkg/utils/root.go delete mode 100644 pkg/utils/root.go~ diff --git a/pkg/utils/.root.go.un~ b/pkg/utils/.root.go.un~ deleted file mode 100644 index d01deb2076c97073850e7880e7d159e40f367722..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2166 zcmeHIPiqrF6yGMbc0A}w6oqzNtt60cDMgf&9MW`!(qh6+no>lWO&hV`v5AArR+A*RXTm~Wbk)}%A&ykKx z3$Bp9@Orbh7jE0L2AkU(6zR1Jl{^#%3%O$~*v>8wHbF)A!0b68UFp$sg}QzW2-yi@ zx1dLJXylxOSQITHtKAtS+~}8{41OXM9cQN0D&5yWR#ErMwlc7DoNWVy36}m9BBtzoZf$s zoC4in0%N_p#@f|d&19^-Y_7ML7M0(tLlrY?mf6f8l=>p7(~KHb#@6aqVs{3jtEOBB z#4%2Y`KL#OG%^1hC{ahcD}`UCH~**#6J6v78#^Ee8!ma^K=INbM=3E_F|kcEJ^T_a_$H$c1n4IrBRs))0v;fLA>kpx zGWG#QXO&24$BAeSbijI!8xL*V?j?Fmc3vQh0Pm92I=&YN$PNNZ)n*2^v+k8Ot$>kurZ>;H37}~ G^6VENZxDq5 diff --git a/pkg/utils/root.go b/pkg/utils/root.go deleted file mode 100644 index 027ca5b..0000000 --- a/pkg/utils/root.go +++ /dev/null @@ -1,31 +0,0 @@ -package utils - -import ( - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -func ContainsString(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} - -func RemoveString(slice []string, s string) (result []string) { - for _, item := range slice { - if item == s { - continue - } - result = append(result, item) - } - return -} - -func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { - env := []corev1.EnvVar{} - // S3 backing storage - return env -} diff --git a/pkg/utils/root.go~ b/pkg/utils/root.go~ deleted file mode 100644 index dd17272..0000000 --- a/pkg/utils/root.go~ +++ /dev/null @@ -1,33 +0,0 @@ -package utils - -import ( - "fmt" - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - corev1 "k8s.io/api/core/v1" - "strings" -) - -func ContainsString(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} - -func RemoveString(slice []string, s string) (result []string) { - for _, item := range slice { - if item == s { - continue - } - result = append(result, item) - } - return -} - -func ConfigureResticEnvVar(backupConf *formolv1alpha1.BackupConfiguration, repo *formolv1alpha1.Repo) []corev1.EnvVar { - env := []corev1.EnvVar{} - // S3 backing storage - return env -} From 9ed45d852827ae65cf4ef4e55052714fc259c762 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 4 Feb 2023 18:11:30 +0100 Subject: [PATCH 08/69] Reorganized files one more time --- controllers/backupconfiguration_controller.go | 10 -- .../backupconfiguration_controller_cronjob.go | 103 ------------------ ...backupconfiguration_controller_helpers.go} | 96 ++++++++++++++++ 3 files changed, 96 insertions(+), 113 deletions(-) delete mode 100644 controllers/backupconfiguration_controller_cronjob.go rename controllers/{backupconfiguration_controller_sidecar.go => backupconfiguration_controller_helpers.go} (59%) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 0db5a17..7a1378c 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - //formolutils "github.com/desmo999r/formol/pkg/utils" ) // BackupConfigurationReconciler reconciles a BackupConfiguration object @@ -43,15 +42,6 @@ type BackupConfigurationReconciler struct { //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupConfiguration object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Context = ctx r.Log = log.FromContext(ctx) diff --git a/controllers/backupconfiguration_controller_cronjob.go b/controllers/backupconfiguration_controller_cronjob.go deleted file mode 100644 index 3a424fb..0000000 --- a/controllers/backupconfiguration_controller_cronjob.go +++ /dev/null @@ -1,103 +0,0 @@ -package controllers - -import ( - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { - cronjob := &batchv1.CronJob{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) - return r.Delete(r.Context, cronjob) - } else { - return err - } -} - -func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { - cronjob := &batchv1.CronJob{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: "backup-" + backupConf.Name, - }, cronjob); err == nil { - r.Log.V(0).Info("there is already a cronjob") - var changed bool - if backupConf.Spec.Schedule != cronjob.Spec.Schedule { - r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) - cronjob.Spec.Schedule = backupConf.Spec.Schedule - changed = true - } - if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { - r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) - cronjob.Spec.Suspend = backupConf.Spec.Suspend - changed = true - } - if changed == true { - if err := r.Update(r.Context, cronjob); err != nil { - r.Log.Error(err, "unable to update cronjob definition") - return err - } - backupConf.Status.Suspended = *backupConf.Spec.Suspend - } - return nil - } else if errors.IsNotFound(err) == false { - r.Log.Error(err, "something went wrong") - return err - } - - cronjob = &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "backup-" + backupConf.Name, - Namespace: backupConf.Namespace, - }, - Spec: batchv1.CronJobSpec{ - Suspend: backupConf.Spec.Suspend, - Schedule: backupConf.Spec.Schedule, - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - ServiceAccountName: "backupsession-creator", - Containers: []corev1.Container{ - corev1.Container{ - Name: "job-createbackupsession-" + backupConf.Name, - Image: backupConf.Spec.Image, - Args: []string{ - "backupsession", - "create", - "--namespace", - backupConf.Namespace, - "--name", - backupConf.Name, - }, - }, - }, - }, - }, - }, - }, - }, - } - if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { - r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) - return err - } - r.Log.V(0).Info("creating the cronjob") - if err := r.Create(r.Context, cronjob); err != nil { - r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) - return err - } else { - backupConf.Status.Suspended = *backupConf.Spec.Suspend - return nil - } -} diff --git a/controllers/backupconfiguration_controller_sidecar.go b/controllers/backupconfiguration_controller_helpers.go similarity index 59% rename from controllers/backupconfiguration_controller_sidecar.go rename to controllers/backupconfiguration_controller_helpers.go index 6953302..ec13f9b 100644 --- a/controllers/backupconfiguration_controller_sidecar.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -2,12 +2,108 @@ package controllers import ( appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) +func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("Deleting cronjob", "cronjob", cronjob.Name) + return r.Delete(r.Context, cronjob) + } else { + return err + } +} + +func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.BackupConfiguration) error { + cronjob := &batchv1.CronJob{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: "backup-" + backupConf.Name, + }, cronjob); err == nil { + r.Log.V(0).Info("there is already a cronjob") + var changed bool + if backupConf.Spec.Schedule != cronjob.Spec.Schedule { + r.Log.V(0).Info("cronjob schedule has changed", "old schedule", cronjob.Spec.Schedule, "new schedule", backupConf.Spec.Schedule) + cronjob.Spec.Schedule = backupConf.Spec.Schedule + changed = true + } + if backupConf.Spec.Suspend != nil && backupConf.Spec.Suspend != cronjob.Spec.Suspend { + r.Log.V(0).Info("cronjob suspend has changed", "before", cronjob.Spec.Suspend, "new", backupConf.Spec.Suspend) + cronjob.Spec.Suspend = backupConf.Spec.Suspend + changed = true + } + if changed == true { + if err := r.Update(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to update cronjob definition") + return err + } + backupConf.Status.Suspended = *backupConf.Spec.Suspend + } + return nil + } else if errors.IsNotFound(err) == false { + r.Log.Error(err, "something went wrong") + return err + } + + cronjob = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-" + backupConf.Name, + Namespace: backupConf.Namespace, + }, + Spec: batchv1.CronJobSpec{ + Suspend: backupConf.Spec.Suspend, + Schedule: backupConf.Spec.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: "backupsession-creator", + Containers: []corev1.Container{ + corev1.Container{ + Name: "job-createbackupsession-" + backupConf.Name, + Image: backupConf.Spec.Image, + Args: []string{ + "backupsession", + "create", + "--namespace", + backupConf.Namespace, + "--name", + backupConf.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + if err := ctrl.SetControllerReference(&backupConf, cronjob, r.Scheme); err != nil { + r.Log.Error(err, "unable to set controller on job", "cronjob", cronjob, "backupconf", backupConf) + return err + } + r.Log.V(0).Info("creating the cronjob") + if err := r.Create(r.Context, cronjob); err != nil { + r.Log.Error(err, "unable to create the cronjob", "cronjob", cronjob) + return err + } else { + backupConf.Status.Suspended = *backupConf.Spec.Suspend + return nil + } +} + func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { for i, container := range podSpec.Containers { From 67739c43091c7cd4a7ded3cedd64126d24dd4fae Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 5 Feb 2023 23:19:23 +0100 Subject: [PATCH 09/69] started backupsession controller --- .../backupconfiguration_controller_helpers.go | 16 +++++++ controllers/backupsession_controller.go | 42 +++++++++++++---- .../backupsession_controller_helpers.go | 45 +++++++++++++++++++ 3 files changed, 94 insertions(+), 9 deletions(-) create mode 100644 controllers/backupsession_controller_helpers.go diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index ec13f9b..7b46a9f 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -1,3 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 3f744d7..3f09074 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -18,9 +18,11 @@ package controllers import ( "context" + "time" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,6 +33,7 @@ import ( ) const ( + sessionState string = ".metadata.state" finalizerName string = "finalizer.backupsession.formol.desmojim.fr" ) @@ -46,15 +49,6 @@ type BackupSessionReconciler struct { //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BackupSession object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log = log.FromContext(ctx) r.Context = ctx @@ -97,11 +91,41 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + switch backupSession.Status.SessionState { + case formolv1alpha1.New: + if r.isBackupOngoing(backupConf) { + r.Log.V(0).Info("there is an ongoing backup. Let's reschedule this operation") + return ctrl.Result{ + RequeueAfter: 30 * time.Second, + }, nil + } + default: + // BackupSession has just been created + backupSession.Status.SessionState = formolv1alpha1.New + backupSession.Status.StartTime = &metav1.Time{Time: time.Now()} + if err := r.Status().Update(ctx, &backupSession); err != nil { + r.Log.Error(err, "unable to update BackupSession.Status") + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *BackupSessionReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &formolv1alpha1.BackupSession{}, + sessionState, + func(rawObj client.Object) []string { + session := rawObj.(*formolv1alpha1.BackupSession) + return []string{ + string(session.Status.SessionState), + } + }); err != nil { + return err + } return ctrl.NewControllerManagedBy(mgr). For(&formolv1alpha1.BackupSession{}). Complete(r) diff --git a/controllers/backupsession_controller_helpers.go b/controllers/backupsession_controller_helpers.go new file mode 100644 index 0000000..52341e4 --- /dev/null +++ b/controllers/backupsession_controller_helpers.go @@ -0,0 +1,45 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + "k8s.io/apimachinery/pkg/fields" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.BackupConfiguration) bool { + backupSessionList := &formolv1alpha1.BackupSessionList{} + if err := r.List(r.Context, backupSessionList, + client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{ + Selector: fields.SelectorFromSet(fields.Set{ + sessionState: "Running", + }), + }); err != nil { + r.Log.Error(err, "unable to get backupsessionlist") + return true + } + return len(backupSessionList.Items) > 0 +} + +func (r *BackupSessionReconciler) startNextTask(backupSession formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) (formolv1alpha1.TargetStatus, error) { + nextTargetIndex := len(backupSession.Status.Targets) + if nextTargetIndex < len(backupConf.Spec.Targets) { + nextTarget := backupConf.Spec.Targets[nextTargetIndex] + } + return nil, nil +} From e22c29f783966cce761cfe57ece3877a65cc95bc Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 7 Feb 2023 17:11:16 +0100 Subject: [PATCH 10/69] Cleanup --- api/v1alpha1/.backupsession_types.go.un~ | Bin 7478 -> 0 bytes api/v1alpha1/backupsession_types.go~ | 89 ----------------------- 2 files changed, 89 deletions(-) delete mode 100644 api/v1alpha1/.backupsession_types.go.un~ delete mode 100644 api/v1alpha1/backupsession_types.go~ diff --git a/api/v1alpha1/.backupsession_types.go.un~ b/api/v1alpha1/.backupsession_types.go.un~ deleted file mode 100644 index a0433fe1ba5ebb94a6e220659ba125b7020a746f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7478 zcmeI1&ubGw6vsDd)Q(yeY^(TlyI^g#&6E*?oJx9zDMnUuu4K z%5h_4>D!yN*SBvyem4E&?Ob{=UA(e(|G|q_7eAee=D#|QbAw#jCJM^ryJP~RawlBB z@Uc>&OvSnLPJUsCMxQQ53=kNd}{4KD-f=qKO}HC&iB1EH>BS~E_h~{ag{ms zNI(Om3b`@>#CHTl3r)#lY9wfG9<-YhL6t?k6QniQTREFl>rFps)uSN1SC4sn)o-ob zMI!O>B|ois5-K2~bd>?Hg{ssrKK6q;%QnCi3^!{_2Vcun6U1!TkLe66UZy~}MO!xT zTBMpFWzf zbFvrCqpZ+l3+QdfywX5+l-xdxDn$kv|ApqPPII@EA3q8N2Uyr_^WRTUA1Uq06Uatz zfi&Q(k&CBMTaZeY4UUpsxhKgLxk*uo zoz)gks`Ej_lU5M&B}92;2zY3_D+5?80nlw3LIk5mFy9N9L-{bh++8&fq+XF&GN9sI zws$yC!?@8Ks}BNX<<~0`$PWVo_wV^#~D+8r9=|{aA$b az$nf?aU$X#MP&fTr`ITMp$6@jmp=jh6MG5( diff --git a/api/v1alpha1/backupsession_types.go~ b/api/v1alpha1/backupsession_types.go~ deleted file mode 100644 index 06e0ca1..0000000 --- a/api/v1alpha1/backupsession_types.go~ +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -type SessionState string - -const ( - New SessionState = "New" - Init SessionState = "Initializing" - Running SessionState = "Running" - Waiting SessionState = "Waiting" - Finalize SessionState = "Finalizing" - Success SessionState = "Success" - Failure SessionState = "Failure" - Deleted SessionState = "Deleted" -) - -type TargetStatus struct { - Name string `json:"name"` - Kind string `json:"kind"` - SessionState `json:"state"` - SnapshotId string `json:"snapshotId"` - StartTime *metav1.Time `json:"startTime"` - Duration *metav1.Duration `json:"duration"` - Try int `json:"try"` -} - -// BackupSessionSpec defines the desired state of BackupSession -type BackupSessionSpec struct { - Ref corev1.ObjectReference `json:"ref"` -} - -// BackupSessionStatus defines the observed state of BackupSession -type BackupSessionStatus struct { - SessionState `json:"state"` - StartTime *metav1.Time `json:"startTime"` - Targets []TargetStatus `json:"target"` - Keep string `json:"keep"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -// +kubebuilder:resource:shortName="bs" -// +kubebuilder:printcolumn:name="Ref",type=string,JSONPath=`.spec.ref.name` -// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` -// +kubebuilder:printcolumn:name="Started",type=string,format=date-time,JSONPath=`.status.startTime` -// +kubebuilder:printcolumn:name="Keep",type=string,JSONPath=`.status.keep` - -// BackupSession is the Schema for the backupsessions API -type BackupSession struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BackupSessionSpec `json:"spec,omitempty"` - Status BackupSessionStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// BackupSessionList contains a list of BackupSession -type BackupSessionList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []BackupSession `json:"items"` -} - -func init() { - SchemeBuilder.Register(&BackupSession{}, &BackupSessionList{}) -} From aba4ae462061e18e739b1d09ad981f4de44e3752 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 7 Feb 2023 17:12:17 +0100 Subject: [PATCH 11/69] Added sidecar volume mounts --- .../backupconfiguration_controller_helpers.go | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 7b46a9f..0e91ef5 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "fmt" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -24,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "strings" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) @@ -175,17 +177,34 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac // the backupType: Online needs a sidecar container for every single listed 'container' // if the backupType is something else than Online, the 'container' will still need a sidecar // if it has 'steps' - addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { + addTags := func(sideCar *corev1.Container, podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { for i, container := range podSpec.Containers { if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { return false } for _, targetContainer := range target.Containers { if targetContainer.Name == container.Name { + // Found a target container. Tag it. podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ Name: formolv1alpha1.TARGETCONTAINER_TAG, Value: container.Name, }) + // targetContainer.Paths are the paths to backup + // We have to find what volumes are mounted under those paths + // and mount them under a path that exists in the sidecar container + for i, path := range targetContainer.Paths { + vm := corev1.VolumeMount{ReadOnly: true} + for _, volumeMount := range container.VolumeMounts { + var longest int = 0 + if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + longest = len(volumeMount.MountPath) + vm.Name = volumeMount.Name + vm.MountPath = fmt.Sprintf("/backup%d", i) + vm.SubPath = volumeMount.SubPath + } + } + sideCar.VolumeMounts = append(sideCar.VolumeMounts, vm) + } } } } @@ -233,7 +252,7 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) return err } - if addTags(&deployment.Spec.Template.Spec, target) { + if addTags(&sideCar, &deployment.Spec.Template.Spec, target) { deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) if err := r.Update(r.Context, deployment); err != nil { From 888284017e869f45714a8039454af9d0184764b2 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 7 Feb 2023 17:12:43 +0100 Subject: [PATCH 12/69] Set backupsession state to NEW --- api/v1alpha1/backupsession_types.go | 5 +++-- .../backupsession_controller_helpers.go | 22 +++++++++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 0650e80..836f866 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -35,8 +35,9 @@ const ( ) type TargetStatus struct { - Name string `json:"name"` - Kind string `json:"kind"` + BackupType `json:"backupType"` + TargetName string `json:"targetName"` + TargetKind `json:"targetKind"` SessionState `json:"state"` SnapshotId string `json:"snapshotId"` StartTime *metav1.Time `json:"startTime"` diff --git a/controllers/backupsession_controller_helpers.go b/controllers/backupsession_controller_helpers.go index 52341e4..ef4596e 100644 --- a/controllers/backupsession_controller_helpers.go +++ b/controllers/backupsession_controller_helpers.go @@ -18,8 +18,10 @@ package controllers import ( formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "sigs.k8s.io/controller-runtime/pkg/client" + "time" ) func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.BackupConfiguration) bool { @@ -36,10 +38,26 @@ func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.Back return len(backupSessionList.Items) > 0 } -func (r *BackupSessionReconciler) startNextTask(backupSession formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) (formolv1alpha1.TargetStatus, error) { +func (r *BackupSessionReconciler) startNextTask(backupSession formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) (*formolv1alpha1.TargetStatus, error) { nextTargetIndex := len(backupSession.Status.Targets) if nextTargetIndex < len(backupConf.Spec.Targets) { nextTarget := backupConf.Spec.Targets[nextTargetIndex] + nextTargetStatus := formolv1alpha1.TargetStatus{ + BackupType: nextTarget.BackupType, + TargetName: nextTarget.TargetName, + TargetKind: nextTarget.TargetKind, + SessionState: formolv1alpha1.New, + StartTime: &metav1.Time{Time: time.Now()}, + Try: 1, + } + switch nextTarget.BackupType { + case formolv1alpha1.JobKind: + r.Log.V(0).Info("Starts a new JobKind task", "target", nextTarget) + case formolv1alpha1.SnapshotKind: + r.Log.V(0).Info("Starts a new SnapshotKind task", "target", nextTarget) + } + return &nextTargetStatus, nil + } else { + return nil, nil } - return nil, nil } From 8f180f5f52f0c6bde2b890e00a409b533be2ae08 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 7 Feb 2023 17:53:58 +0100 Subject: [PATCH 13/69] Moved BackupSession to Running. Set the first task to New. --- api/v1alpha1/backupsession_types.go | 4 ++-- controllers/backupsession_controller.go | 19 +++++++++++++++++++ .../backupsession_controller_helpers.go | 9 ++++++--- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 836f866..6ba8c27 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -41,7 +41,7 @@ type TargetStatus struct { SessionState `json:"state"` SnapshotId string `json:"snapshotId"` StartTime *metav1.Time `json:"startTime"` - Duration *metav1.Duration `json:"duration"` + Duration *metav1.Duration `json:"duration,omitempty"` Try int `json:"try"` } @@ -54,7 +54,7 @@ type BackupSessionSpec struct { type BackupSessionStatus struct { SessionState `json:"state"` StartTime *metav1.Time `json:"startTime"` - Targets []TargetStatus `json:"target"` + Targets []TargetStatus `json:"target,omitempty"` Keep string `json:"keep"` } diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 3f09074..cf0fc62 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -99,6 +99,25 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques RequeueAfter: 30 * time.Second, }, nil } + if nextTargetStatus := r.startNextTask(&backupSession, backupConf); nextTargetStatus != nil { + r.Log.V(0).Info("New backup. Start the first task", "task", nextTargetStatus) + backupSession.Status.SessionState = formolv1alpha1.Running + if err := r.Status().Update(ctx, &backupSession); err != nil { + r.Log.Error(err, "unable to update BackupSession status") + } + return ctrl.Result{}, err + } else { + r.Log.V(0).Info("No first target? That should not happen. Mark the backup has failed") + backupSession.Status.SessionState = formolv1alpha1.Failure + if err := r.Status().Update(ctx, &backupSession); err != nil { + r.Log.Error(err, "unable to update BackupSession status") + } + return ctrl.Result{}, err + } + case formolv1alpha1.Running: + // Backup ongoing. Check the status of the last backup task and decide what to do next. + case formolv1alpha1.Failure: + // Failed backup. Don't do anything anymore default: // BackupSession has just been created backupSession.Status.SessionState = formolv1alpha1.New diff --git a/controllers/backupsession_controller_helpers.go b/controllers/backupsession_controller_helpers.go index ef4596e..86d678c 100644 --- a/controllers/backupsession_controller_helpers.go +++ b/controllers/backupsession_controller_helpers.go @@ -38,7 +38,7 @@ func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.Back return len(backupSessionList.Items) > 0 } -func (r *BackupSessionReconciler) startNextTask(backupSession formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) (*formolv1alpha1.TargetStatus, error) { +func (r *BackupSessionReconciler) startNextTask(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) *formolv1alpha1.TargetStatus { nextTargetIndex := len(backupSession.Status.Targets) if nextTargetIndex < len(backupConf.Spec.Targets) { nextTarget := backupConf.Spec.Targets[nextTargetIndex] @@ -51,13 +51,16 @@ func (r *BackupSessionReconciler) startNextTask(backupSession formolv1alpha1.Bac Try: 1, } switch nextTarget.BackupType { + case formolv1alpha1.OnlineKind: + r.Log.V(0).Info("Starts a new OnlineKind task", "target", nextTarget) case formolv1alpha1.JobKind: r.Log.V(0).Info("Starts a new JobKind task", "target", nextTarget) case formolv1alpha1.SnapshotKind: r.Log.V(0).Info("Starts a new SnapshotKind task", "target", nextTarget) } - return &nextTargetStatus, nil + backupSession.Status.Targets = append(backupSession.Status.Targets, nextTargetStatus) + return &nextTargetStatus } else { - return nil, nil + return nil } } From 06999eb5537620aba484ea47875c3aef80389ca2 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 7 Feb 2023 23:15:13 +0100 Subject: [PATCH 14/69] Most of the state machine is implemented --- api/v1alpha1/backupconfiguration_types.go | 6 ++-- api/v1alpha1/backupsession_types.go | 7 +++-- controllers/backupsession_controller.go | 35 +++++++++++++++++++++++ 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 701ac03..3a7d9f1 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -47,9 +47,7 @@ type Step struct { type TargetContainer struct { Name string `json:"name"` Paths []string `json:"paths,omitempty"` - // +kubebuilder:default:=2 - Retry int `json:"retry"` - Steps []Step `json:"steps,omitempty"` + Steps []Step `json:"steps,omitempty"` } type Target struct { @@ -57,6 +55,8 @@ type Target struct { TargetKind `json:"targetKind"` TargetName string `json:"targetName"` Containers []TargetContainer `json:"containers"` + // +kubebuilder:default:=2 + Retry int `json:"retry"` } type Keep struct { diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 6ba8c27..7bab396 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -53,9 +53,10 @@ type BackupSessionSpec struct { // BackupSessionStatus defines the observed state of BackupSession type BackupSessionStatus struct { SessionState `json:"state"` - StartTime *metav1.Time `json:"startTime"` - Targets []TargetStatus `json:"target,omitempty"` - Keep string `json:"keep"` + StartTime *metav1.Time `json:"startTime"` + // +optional + Targets []TargetStatus `json:"target,omitempty"` + Keep string `json:"keep"` } //+kubebuilder:object:root=true diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index cf0fc62..f48861b 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -116,8 +116,43 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques } case formolv1alpha1.Running: // Backup ongoing. Check the status of the last backup task and decide what to do next. + currentTargetStatus := &(backupSession.Status.Targets[len(backupSession.Status.Targets)-1]) + switch currentTargetStatus.SessionState { + case formolv1alpha1.Running: + r.Log.V(0).Info("Current task is still running. Wait until it's finished") + case formolv1alpha1.Success: + r.Log.V(0).Info("Last backup task was a success. Start a new one") + if nextTargetStatus := r.startNextTask(&backupSession, backupConf); nextTargetStatus != nil { + r.Log.V(0).Info("Starting a new task", "task", nextTargetStatus) + } else { + r.Log.V(0).Info("No more tasks to start. The backup is a success. Let's do some cleanup") + backupSession.Status.SessionState = formolv1alpha1.Success + } + if err := r.Status().Update(ctx, &backupSession); err != nil { + r.Log.Error(err, "unable to update BackupSession") + } + return ctrl.Result{}, err + case formolv1alpha1.Failure: + // Last task failed. Try to run it again + if currentTargetStatus.Try < backupConf.Spec.Targets[len(backupSession.Status.Targets)-1].Retry { + r.Log.V(0).Info("Last task failed. Try to run it again") + currentTargetStatus.Try++ + currentTargetStatus.SessionState = formolv1alpha1.New + currentTargetStatus.StartTime = &metav1.Time{Time: time.Now()} + } else { + r.Log.V(0).Info("Task failed again and for the last time") + backupSession.Status.SessionState = formolv1alpha1.Failure + } + if err := r.Status().Update(ctx, &backupSession); err != nil { + r.Log.Error(err, "unable to update BackupSession") + } + return ctrl.Result{}, err + } + case formolv1alpha1.Failure: // Failed backup. Don't do anything anymore + case formolv1alpha1.Success: + // Backup was a success default: // BackupSession has just been created backupSession.Status.SessionState = formolv1alpha1.New From 6c4d1a749c2585b024e225bf42a741bbc97e7c26 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Wed, 8 Feb 2023 17:54:24 +0100 Subject: [PATCH 15/69] Added some RBAC to allow the sidecar container to get BackupSession informations --- .../backupconfiguration_controller_helpers.go | 113 +++++++++++++++++- 1 file changed, 109 insertions(+), 4 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 0e91ef5..04baf85 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -21,6 +21,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -30,6 +31,11 @@ import ( formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) +const ( + FORMOL_SA = "formol-controller" + FORMOL_SIDECAR_ROLE = "formol:sidecar-role" +) + func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { cronjob := &batchv1.CronJob{} if err := r.Get(r.Context, client.ObjectKey{ @@ -236,10 +242,19 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac Name: formolv1alpha1.SIDECARCONTAINER_NAME, Image: backupConf.Spec.Image, Args: []string{"backupsession", "server"}, - Env: append(env, corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.TargetName, - }), + Env: append(env, + corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }, + corev1.EnvVar{ + Name: formolv1alpha1.POD_NAMESPACE, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }), VolumeMounts: []corev1.VolumeMount{}, } switch target.TargetKind { @@ -253,6 +268,15 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac return err } if addTags(&sideCar, &deployment.Spec.Template.Spec, target) { + if err := r.createRBACSidecar(corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: deployment.Namespace, + Name: deployment.Spec.Template.Spec.ServiceAccountName, + }, + }); err != nil { + r.Log.Error(err, "unable to create RBAC for the sidecar container") + return err + } deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) if err := r.Update(r.Context, deployment); err != nil { @@ -266,3 +290,84 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac return nil } + +func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccount) error { + // sa := corev1.ServiceAccount {} + // if err := r.Get(r.Context, client.ObjectKey { + // Namespace: backupConf.Namespace, + // Name: FORMOL_SA, + // }, &sa); err != nil && errors.IsNotFound(err) { + // sa = corev1.ServiceAccount { + // ObjectMeta: metav1.ObjectMeta { + // Namespace: backupConf.Namespace, + // Name: FORMOL_SA, + // }, + // } + // r.Log.V(0).Info("Creating formol service account", "sa", sa) + // if err = r.Create(r.Context, &sa); err != nil { + // r.Log.Error(err, "unable to create service account") + // return err + // } + // } + if sa.Name == "" { + sa.Name = "default" + } + role := rbacv1.Role{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: sa.Namespace, + Name: FORMOL_SIDECAR_ROLE, + }, &role); err != nil && errors.IsNotFound(err) { + role = rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: sa.Namespace, + Name: FORMOL_SIDECAR_ROLE, + }, + Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{"formol.desmojim.fr"}, + Resources: []string{"backupsessions", "backupconfigurations"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + APIGroups: []string{"formol.desmojim.fr"}, + Resources: []string{"backupsessions/status"}, + }, + }, + } + r.Log.V(0).Info("Creating formol sidecar role", "role", role) + if err = r.Create(r.Context, &role); err != nil { + r.Log.Error(err, "unable to create sidecar role") + return err + } + } + rolebinding := rbacv1.RoleBinding{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: sa.Namespace, + Name: FORMOL_SIDECAR_ROLE, + }, &rolebinding); err != nil && errors.IsNotFound(err) { + rolebinding = rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: sa.Namespace, + Name: FORMOL_SIDECAR_ROLE, + }, + Subjects: []rbacv1.Subject{ + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: sa.Name, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: FORMOL_SIDECAR_ROLE, + }, + } + r.Log.V(0).Info("Creating formol sidecar rolebinding", "rolebinding", rolebinding) + if err = r.Create(r.Context, &rolebinding); err != nil { + r.Log.Error(err, "unable to create sidecar rolebinding") + return err + } + } + return nil +} From e80871346e62e4a1d35acc5f8dec9dfc7d9f7595 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Thu, 16 Feb 2023 11:24:35 +0100 Subject: [PATCH 16/69] defined constants for restic ENV variables --- api/v1alpha1/repo_types.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index 156d9e5..6c3b622 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -23,6 +23,13 @@ import ( "strings" ) +const ( + RESTIC_REPOSITORY = "RESTIC_REPOSITORY" + RESTIC_PASSWORD = "RESTIC_PASSWORD" + AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" +) + // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. type S3 struct { @@ -83,13 +90,13 @@ func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { strings.ToUpper(backupConf.Namespace), strings.ToLower(backupConf.Name)) env = append(env, corev1.EnvVar{ - Name: "RESTIC_REPOSITORY", + Name: RESTIC_REPOSITORY, Value: url, }) for _, key := range []string{ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "RESTIC_PASSWORD", + AWS_ACCESS_KEY_ID, + AWS_SECRET_ACCESS_KEY, + RESTIC_PASSWORD, } { env = append(env, corev1.EnvVar{ Name: key, From 55f3dc8dff7dcca0986867517f77f0888315d317 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Thu, 16 Feb 2023 11:25:52 +0100 Subject: [PATCH 17/69] The sidecar configuration is different depending on the backup type. Need more AddSidecar functions --- controllers/backupconfiguration_controller.go | 19 ++- .../backupconfiguration_controller_helpers.go | 121 ++++++++---------- 2 files changed, 66 insertions(+), 74 deletions(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 7a1378c..600c213 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -93,11 +93,20 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. backupConf.Status.ActiveCronJob = true } - if err := r.AddSidecar(backupConf); err != nil { - r.Log.Error(err, "unable to add sidecar container") - return ctrl.Result{}, err - } else { - backupConf.Status.ActiveSidecar = true + for _, target := range backupConf.Spec.Targets { + switch target.BackupType { + case formolv1alpha1.OnlineKind: + // TODO: add a sidecar to the pod with the target.Containers[].Paths mounted + if err := r.AddOnlineSidecar(backupConf, target); err != nil { + r.Log.Error(err, "unable to add online sidecar") + return ctrl.Result{}, err + } + backupConf.Status.ActiveSidecar = true + case formolv1alpha1.JobKind: + // TODO: add a sidecar to the pod with a shared + case formolv1alpha1.SnapshotKind: + // TOD: add a sidecar to run the steps + } } if err := r.Status().Update(ctx, &backupConf); err != nil { diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 04baf85..cd9c333 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -178,11 +178,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. return nil } -func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.BackupConfiguration) error { - // Go through all the 'targets' - // the backupType: Online needs a sidecar container for every single listed 'container' - // if the backupType is something else than Online, the 'container' will still need a sidecar - // if it has 'steps' +func (r *BackupConfigurationReconciler) AddOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) error { addTags := func(sideCar *corev1.Container, podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { for i, container := range podSpec.Containers { if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { @@ -217,73 +213,60 @@ func (r *BackupConfigurationReconciler) AddSidecar(backupConf formolv1alpha1.Bac return true } - for _, target := range backupConf.Spec.Targets { - addSidecar := false - for _, targetContainer := range target.Containers { - if len(targetContainer.Steps) > 0 { - addSidecar = true - } + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) + env := repo.GetResticEnv(backupConf) + sideCar := corev1.Container{ + Name: formolv1alpha1.SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"backupsession", "server"}, + Env: append(env, + corev1.EnvVar{ + Name: formolv1alpha1.TARGET_NAME, + Value: target.TargetName, + }, + corev1.EnvVar{ + Name: formolv1alpha1.POD_NAMESPACE, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }), + VolumeMounts: []corev1.VolumeMount{}, + } + switch target.TargetKind { + case formolv1alpha1.Deployment: + deployment := &appsv1.Deployment{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err } - if target.BackupType == formolv1alpha1.OnlineKind { - addSidecar = true - } - if addSidecar { - repo := formolv1alpha1.Repo{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: backupConf.Spec.Repository, - }, &repo); err != nil { - r.Log.Error(err, "unable to get Repo") + if addTags(&sideCar, &deployment.Spec.Template.Spec, target) { + if err := r.createRBACSidecar(corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: deployment.Namespace, + Name: deployment.Spec.Template.Spec.ServiceAccountName, + }, + }); err != nil { + r.Log.Error(err, "unable to create RBAC for the sidecar container") return err } - r.Log.V(1).Info("Got Repository", "repo", repo) - env := repo.GetResticEnv(backupConf) - sideCar := corev1.Container{ - Name: formolv1alpha1.SIDECARCONTAINER_NAME, - Image: backupConf.Spec.Image, - Args: []string{"backupsession", "server"}, - Env: append(env, - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.TargetName, - }, - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAMESPACE, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }), - VolumeMounts: []corev1.VolumeMount{}, - } - switch target.TargetKind { - case formolv1alpha1.Deployment: - deployment := &appsv1.Deployment{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err - } - if addTags(&sideCar, &deployment.Spec.Template.Spec, target) { - if err := r.createRBACSidecar(corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: deployment.Namespace, - Name: deployment.Spec.Template.Spec.ServiceAccountName, - }, - }); err != nil { - r.Log.Error(err, "unable to create RBAC for the sidecar container") - return err - } - deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) - r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) - if err := r.Update(r.Context, deployment); err != nil { - r.Log.Error(err, "cannot update deployment", "Deployment", deployment) - return err - } - } + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) + r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) + if err := r.Update(r.Context, deployment); err != nil { + r.Log.Error(err, "cannot update deployment", "Deployment", deployment) + return err } } } From c89a5222625eb4851a04bc3b1d18c5b11bb1faad Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 16 Feb 2023 21:40:22 +0100 Subject: [PATCH 18/69] added BackupConfiguration.Status.Targets struct to hold the paths to be backed up in the sidecar container --- api/v1alpha1/backupconfiguration_types.go | 9 ++++ api/v1alpha1/zz_generated.deepcopy.go | 27 ++++++++++++ controllers/backupconfiguration_controller.go | 8 +++- .../backupconfiguration_controller_helpers.go | 42 +++++++++++-------- 4 files changed, 67 insertions(+), 19 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 3a7d9f1..aff1486 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -38,6 +38,8 @@ const ( JobKind BackupType = "Job" ) +const BACKUP_PREFIX_PATH = `backup` + type Step struct { Name string `json:"name"` // +optional @@ -59,6 +61,11 @@ type Target struct { Retry int `json:"retry"` } +type TargetSidecarPath struct { + TargetName string `json:"targetName"` + SidecarPaths []string `json:"sidecarPaths"` +} + type Keep struct { Last int32 `json:"last"` Daily int32 `json:"daily"` @@ -84,6 +91,8 @@ type BackupConfigurationStatus struct { Suspended bool `json:"suspended"` ActiveCronJob bool `json:"activeCronJob"` ActiveSidecar bool `json:"activeSidecar"` + // +optional + Targets []TargetSidecarPath `json:"targets,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 3cd2fac..1cef863 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -145,6 +145,13 @@ func (in *BackupConfigurationStatus) DeepCopyInto(out *BackupConfigurationStatus in, out := &in.LastBackupTime, &out.LastBackupTime *out = (*in).DeepCopy() } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetSidecarPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfigurationStatus. @@ -610,6 +617,26 @@ func (in *TargetContainer) DeepCopy() *TargetContainer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSidecarPath) DeepCopyInto(out *TargetSidecarPath) { + *out = *in + if in.SidecarPaths != nil { + in, out := &in.SidecarPaths, &out.SidecarPaths + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSidecarPath. +func (in *TargetSidecarPath) DeepCopy() *TargetSidecarPath { + if in == nil { + return nil + } + out := new(TargetSidecarPath) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { *out = *in diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 600c213..792d94c 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -97,9 +97,15 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. switch target.BackupType { case formolv1alpha1.OnlineKind: // TODO: add a sidecar to the pod with the target.Containers[].Paths mounted - if err := r.AddOnlineSidecar(backupConf, target); err != nil { + if sidecarPaths, err := r.addOnlineSidecar(backupConf, target); err != nil { r.Log.Error(err, "unable to add online sidecar") return ctrl.Result{}, err + } else if len(sidecarPaths) > 0 { + backupConf.Status.Targets = append(backupConf.Status.Targets, + formolv1alpha1.TargetSidecarPath{ + TargetName: target.TargetName, + SidecarPaths: sidecarPaths, + }) } backupConf.Status.ActiveSidecar = true case formolv1alpha1.JobKind: diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index cd9c333..26c0f9e 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -24,9 +24,9 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "path/filepath" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "strings" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) @@ -178,11 +178,12 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. return nil } -func (r *BackupConfigurationReconciler) AddOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) error { - addTags := func(sideCar *corev1.Container, podSpec *corev1.PodSpec, target formolv1alpha1.Target) bool { +func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) (sidecarPaths []string, err error) { + addTags := func(sideCar *corev1.Container, podSpec *corev1.PodSpec, target formolv1alpha1.Target) ([]string, bool) { + var sidecarPaths []string for i, container := range podSpec.Containers { if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - return false + return sidecarPaths, false } for _, targetContainer := range target.Containers { if targetContainer.Name == container.Name { @@ -196,30 +197,34 @@ func (r *BackupConfigurationReconciler) AddOnlineSidecar(backupConf formolv1alph // and mount them under a path that exists in the sidecar container for i, path := range targetContainer.Paths { vm := corev1.VolumeMount{ReadOnly: true} + var longest int = 0 + var sidecarPath string for _, volumeMount := range container.VolumeMounts { - var longest int = 0 - if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { longest = len(volumeMount.MountPath) vm.Name = volumeMount.Name - vm.MountPath = fmt.Sprintf("/backup%d", i) + vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) vm.SubPath = volumeMount.SubPath + sidecarPath = filepath.Join(vm.MountPath, rel) } } sideCar.VolumeMounts = append(sideCar.VolumeMounts, vm) + sidecarPaths = append(sidecarPaths, sidecarPath) } } } } - return true + return sidecarPaths, true } repo := formolv1alpha1.Repo{} - if err := r.Get(r.Context, client.ObjectKey{ + if err = r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, Name: backupConf.Spec.Repository, }, &repo); err != nil { r.Log.Error(err, "unable to get Repo") - return err + return } r.Log.V(1).Info("Got Repository", "repo", repo) env := repo.GetResticEnv(backupConf) @@ -245,33 +250,34 @@ func (r *BackupConfigurationReconciler) AddOnlineSidecar(backupConf formolv1alph switch target.TargetKind { case formolv1alpha1.Deployment: deployment := &appsv1.Deployment{} - if err := r.Get(r.Context, client.ObjectKey{ + if err = r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, Name: target.TargetName, }, deployment); err != nil { r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err + return } - if addTags(&sideCar, &deployment.Spec.Template.Spec, target) { - if err := r.createRBACSidecar(corev1.ServiceAccount{ + if paths, add := addTags(&sideCar, &deployment.Spec.Template.Spec, target); add == true { + sidecarPaths = paths + if err = r.createRBACSidecar(corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Namespace: deployment.Namespace, Name: deployment.Spec.Template.Spec.ServiceAccountName, }, }); err != nil { r.Log.Error(err, "unable to create RBAC for the sidecar container") - return err + return } deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) - if err := r.Update(r.Context, deployment); err != nil { + if err = r.Update(r.Context, deployment); err != nil { r.Log.Error(err, "cannot update deployment", "Deployment", deployment) - return err + return } } } - return nil + return } func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccount) error { From 1361f6204547d424cea7acf4733d482122f4c3eb Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 18 Feb 2023 22:46:15 +0100 Subject: [PATCH 19/69] A sidecar ENV keeps the paths to backup. That will do for now but the BackupSession controller in the sidecar should do the job of computing the paths to backup every time it reconciles. In case the paths have changed --- api/v1alpha1/backupconfiguration_types.go | 7 -- api/v1alpha1/common.go | 2 + api/v1alpha1/zz_generated.deepcopy.go | 27 ------- controllers/backupconfiguration_controller.go | 8 +- .../backupconfiguration_controller_helpers.go | 76 ++++++++++++------- 5 files changed, 50 insertions(+), 70 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index aff1486..582d5bd 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -61,11 +61,6 @@ type Target struct { Retry int `json:"retry"` } -type TargetSidecarPath struct { - TargetName string `json:"targetName"` - SidecarPaths []string `json:"sidecarPaths"` -} - type Keep struct { Last int32 `json:"last"` Daily int32 `json:"daily"` @@ -91,8 +86,6 @@ type BackupConfigurationStatus struct { Suspended bool `json:"suspended"` ActiveCronJob bool `json:"activeCronJob"` ActiveSidecar bool `json:"activeSidecar"` - // +optional - Targets []TargetSidecarPath `json:"targets,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 9c21ad6..3491639 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -10,4 +10,6 @@ const ( // Used by the backupsession controller POD_NAME string = "POD_NAME" POD_NAMESPACE string = "POD_NAMESPACE" + // Backup Paths list + BACKUP_PATHS = "BACKUP_PATHS" ) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1cef863..3cd2fac 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -145,13 +145,6 @@ func (in *BackupConfigurationStatus) DeepCopyInto(out *BackupConfigurationStatus in, out := &in.LastBackupTime, &out.LastBackupTime *out = (*in).DeepCopy() } - if in.Targets != nil { - in, out := &in.Targets, &out.Targets - *out = make([]TargetSidecarPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfigurationStatus. @@ -617,26 +610,6 @@ func (in *TargetContainer) DeepCopy() *TargetContainer { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetSidecarPath) DeepCopyInto(out *TargetSidecarPath) { - *out = *in - if in.SidecarPaths != nil { - in, out := &in.SidecarPaths, &out.SidecarPaths - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSidecarPath. -func (in *TargetSidecarPath) DeepCopy() *TargetSidecarPath { - if in == nil { - return nil - } - out := new(TargetSidecarPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { *out = *in diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 792d94c..459d05d 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -97,15 +97,9 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. switch target.BackupType { case formolv1alpha1.OnlineKind: // TODO: add a sidecar to the pod with the target.Containers[].Paths mounted - if sidecarPaths, err := r.addOnlineSidecar(backupConf, target); err != nil { + if err := r.addOnlineSidecar(backupConf, target); err != nil { r.Log.Error(err, "unable to add online sidecar") return ctrl.Result{}, err - } else if len(sidecarPaths) > 0 { - backupConf.Status.Targets = append(backupConf.Status.Targets, - formolv1alpha1.TargetSidecarPath{ - TargetName: target.TargetName, - SidecarPaths: sidecarPaths, - }) } backupConf.Status.ActiveSidecar = true case formolv1alpha1.JobKind: diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 26c0f9e..7de1a43 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -24,9 +24,11 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "os" "path/filepath" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "strings" formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" ) @@ -178,13 +180,18 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. return nil } -func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) (sidecarPaths []string, err error) { - addTags := func(sideCar *corev1.Container, podSpec *corev1.PodSpec, target formolv1alpha1.Target) ([]string, bool) { - var sidecarPaths []string +func hasSidecar(podSpec *corev1.PodSpec) bool { + for _, container := range podSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + return true + } + } + return false +} + +func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) (err error) { + addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (sidecarPaths []string, vms []corev1.VolumeMount) { for i, container := range podSpec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - return sidecarPaths, false - } for _, targetContainer := range target.Containers { if targetContainer.Name == container.Name { // Found a target container. Tag it. @@ -209,13 +216,13 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph sidecarPath = filepath.Join(vm.MountPath, rel) } } - sideCar.VolumeMounts = append(sideCar.VolumeMounts, vm) + vms = append(vms, vm) sidecarPaths = append(sidecarPaths, sidecarPath) } } } } - return sidecarPaths, true + return } repo := formolv1alpha1.Repo{} @@ -224,11 +231,11 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph Name: backupConf.Spec.Repository, }, &repo); err != nil { r.Log.Error(err, "unable to get Repo") - return + return err } r.Log.V(1).Info("Got Repository", "repo", repo) env := repo.GetResticEnv(backupConf) - sideCar := corev1.Container{ + sidecar := corev1.Container{ Name: formolv1alpha1.SIDECARCONTAINER_NAME, Image: backupConf.Spec.Image, Args: []string{"backupsession", "server"}, @@ -247,33 +254,44 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph }), VolumeMounts: []corev1.VolumeMount{}, } + var targetObject client.Object + var targetPodSpec *corev1.PodSpec switch target.TargetKind { case formolv1alpha1.Deployment: - deployment := &appsv1.Deployment{} + deployment := appsv1.Deployment{} if err = r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, Name: target.TargetName, - }, deployment); err != nil { + }, &deployment); err != nil { r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) return } - if paths, add := addTags(&sideCar, &deployment.Spec.Template.Spec, target); add == true { - sidecarPaths = paths - if err = r.createRBACSidecar(corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: deployment.Namespace, - Name: deployment.Spec.Template.Spec.ServiceAccountName, - }, - }); err != nil { - r.Log.Error(err, "unable to create RBAC for the sidecar container") - return - } - deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, sideCar) - r.Log.V(1).Info("Updating deployment", "deployment", deployment, "containers", deployment.Spec.Template.Spec.Containers) - if err = r.Update(r.Context, deployment); err != nil { - r.Log.Error(err, "cannot update deployment", "Deployment", deployment) - return - } + targetObject = &deployment + targetPodSpec = &deployment.Spec.Template.Spec + } + if !hasSidecar(targetPodSpec) { + if err = r.createRBACSidecar(corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backupConf.Namespace, + Name: targetPodSpec.ServiceAccountName, + }, + }); err != nil { + r.Log.Error(err, "unable to create RBAC for the sidecar container") + return + } + sidecarPaths, vms := addTags(targetPodSpec, target) + sidecar.VolumeMounts = vms + sidecar.Env = append(sidecar.Env, corev1.EnvVar{ + Name: formolv1alpha1.BACKUP_PATHS, + Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), + }) + + // The sidecar definition is complete. Add it to the targetObject + targetPodSpec.Containers = append(targetPodSpec.Containers, sidecar) + r.Log.V(1).Info("Adding sidecar", "targetObject", targetObject, "sidecar", sidecar) + if err = r.Update(r.Context, targetObject); err != nil { + r.Log.Error(err, "unable to add sidecar", "targetObject", targetObject) + return } } From 6c2fa76111c21e87aaa1206ce831ba6cbd91b860 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Mon, 20 Feb 2023 09:42:06 +0100 Subject: [PATCH 20/69] back to the latest formolcli image --- test/02-backupconf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 2ef6b15..95982da 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -6,7 +6,7 @@ metadata: namespace: demo spec: suspend: true - image: desmo999r/formolcli:0.3.2 + image: desmo999r/formolcli:latest repository: repo-minio schedule: "15 * * * *" keep: From 9f91876dd2fb42a01831e94e41f12e88ed0d85c0 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 21 Feb 2023 01:08:53 +0100 Subject: [PATCH 21/69] updated the tests --- test/00-setup.yaml | 9 +++++---- test/01-deployment.yaml | 12 ++++++------ test/02-backupconf.yaml | 8 +++----- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/test/00-setup.yaml b/test/00-setup.yaml index 665603d..7a9ef76 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -74,11 +74,12 @@ data: apiVersion: formol.desmojim.fr/v1alpha1 kind: Repo metadata: - name: repo-empty + name: repo-local namespace: demo spec: backend: - nfs: "toto" + local: + path: /repo repositorySecrets: secret-minio --- apiVersion: formol.desmojim.fr/v1alpha1 @@ -162,7 +163,7 @@ metadata: namespace: demo spec: name: maintenance-off - command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] + command: ["/bin/sh", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] --- apiVersion: formol.desmojim.fr/v1alpha1 kind: Function @@ -171,4 +172,4 @@ metadata: namespace: demo spec: name: maintenance-on - command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] + command: ["/bin/sh", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] diff --git a/test/01-deployment.yaml b/test/01-deployment.yaml index a08040e..84b08e6 100644 --- a/test/01-deployment.yaml +++ b/test/01-deployment.yaml @@ -2,27 +2,27 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: nginx-deployment + name: apache-deployment namespace: demo labels: - app: nginx + app: apache spec: replicas: 1 strategy: type: Recreate selector: matchLabels: - app: nginx + app: apache template: metadata: labels: - app: nginx + app: apache spec: imagePullSecrets: - name: regcred containers: - - name: nginx - image: docker.io/nginx:1.23.3 + - name: apache + image: docker.io/httpd:alpine3.17 ports: - containerPort: 80 volumeMounts: diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 95982da..952a6d2 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -7,7 +7,7 @@ metadata: spec: suspend: true image: desmo999r/formolcli:latest - repository: repo-minio + repository: repo-local schedule: "15 * * * *" keep: last: 5 @@ -18,13 +18,11 @@ spec: targets: - backupType: Online targetKind: Deployment - targetName: nginx-deployment + targetName: apache-deployment containers: - - name: nginx + - name: apache steps: - name: maintenance-on - - name: with-env - - name: with-envfrom - name: maintenance-off finalize: true paths: From cfea083594a82770b0632cb05824e66c9b1011ef Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 21 Feb 2023 01:10:24 +0100 Subject: [PATCH 22/69] sharedprocess and local restic repostory --- api/v1alpha1/repo_types.go | 26 ++++++-- api/v1alpha1/zz_generated.deepcopy.go | 21 +++++- .../backupconfiguration_controller_helpers.go | 66 +++++++++++++++---- 3 files changed, 92 insertions(+), 21 deletions(-) diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index 6c3b622..b124b32 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -30,8 +30,6 @@ const ( AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. type S3 struct { Server string `json:"server"` Bucket string `json:"bucket"` @@ -39,11 +37,15 @@ type S3 struct { Prefix string `json:"prefix,omitempty"` } +type Local struct { + Path string `json:"path"` +} + type Backend struct { // +optional S3 *S3 `json:"s3,omitempty"` // +optional - Nfs *string `json:"nfs,omitempty"` + Local *Local `json:"local,omitempty"` } // RepoSpec defines the desired state of Repo @@ -96,7 +98,6 @@ func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { for _, key := range []string{ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, - RESTIC_PASSWORD, } { env = append(env, corev1.EnvVar{ Name: key, @@ -111,6 +112,23 @@ func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { }) } } + if repo.Spec.Backend.Local != nil { + env = append(env, corev1.EnvVar{ + Name: RESTIC_REPOSITORY, + Value: repo.Spec.Backend.Local.Path, + }) + } + env = append(env, corev1.EnvVar{ + Name: RESTIC_PASSWORD, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: repo.Spec.RepositorySecrets, + }, + Key: RESTIC_PASSWORD, + }, + }, + }) return env } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 3cd2fac..e8cf666 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -34,9 +34,9 @@ func (in *Backend) DeepCopyInto(out *Backend) { *out = new(S3) **out = **in } - if in.Nfs != nil { - in, out := &in.Nfs, &out.Nfs - *out = new(string) + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(Local) **out = **in } } @@ -347,6 +347,21 @@ func (in *Keep) DeepCopy() *Keep { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Local) DeepCopyInto(out *Local) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Local. +func (in *Local) DeepCopy() *Local { + if in == nil { + return nil + } + out := new(Local) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Repo) DeepCopyInto(out *Repo) { *out = *in diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 7de1a43..6770056 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -150,30 +150,54 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. } } } + repo := formolv1alpha1.Repo{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: backupConf.Spec.Repository, + }, &repo); err != nil { + r.Log.Error(err, "unable to get Repo") + return err + } + r.Log.V(1).Info("Got Repository", "repo", repo) for _, target := range backupConf.Spec.Targets { + var targetObject client.Object + var targetPodSpec *corev1.PodSpec switch target.TargetKind { case formolv1alpha1.Deployment: - deployment := &appsv1.Deployment{} + deployment := appsv1.Deployment{} if err := r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, Name: target.TargetName, - }, deployment); err != nil { + }, &deployment); err != nil { r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) return err } - restoreContainers := []corev1.Container{} - for _, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + targetObject = &deployment + targetPodSpec = &deployment.Spec.Template.Spec + + } + restoreContainers := []corev1.Container{} + for _, container := range targetPodSpec.Containers { + if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + continue + } + restoreContainers = append(restoreContainers, container) + } + targetPodSpec.Containers = restoreContainers + if repo.Spec.Backend.Local != nil { + restoreVolumes := []corev1.Volume{} + for _, volume := range targetPodSpec.Volumes { + if volume.Name == "restic-local-repo" { continue } - restoreContainers = append(restoreContainers, container) - } - deployment.Spec.Template.Spec.Containers = restoreContainers - removeTags(&deployment.Spec.Template.Spec, target) - if err := r.Update(r.Context, deployment); err != nil { - r.Log.Error(err, "unable to update deployment", "deployment", deployment) - return err + restoreVolumes = append(restoreVolumes, volume) } + targetPodSpec.Volumes = restoreVolumes + } + removeTags(targetPodSpec, target) + if err := r.Update(r.Context, targetObject); err != nil { + r.Log.Error(err, "unable to remove sidecar", "targetObject", targetObject) + return err } } @@ -280,14 +304,28 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph return } sidecarPaths, vms := addTags(targetPodSpec, target) - sidecar.VolumeMounts = vms sidecar.Env = append(sidecar.Env, corev1.EnvVar{ Name: formolv1alpha1.BACKUP_PATHS, Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), }) + if repo.Spec.Backend.Local != nil { + sidecar.VolumeMounts = append(vms, corev1.VolumeMount{ + Name: "restic-local-repo", + MountPath: "/repo", + }) + targetPodSpec.Volumes = append(targetPodSpec.Volumes, corev1.Volume{ + Name: "restic-local-repo", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + } else { + sidecar.VolumeMounts = vms + } // The sidecar definition is complete. Add it to the targetObject targetPodSpec.Containers = append(targetPodSpec.Containers, sidecar) + targetPodSpec.ShareProcessNamespace = func() *bool { b := true; return &b }() r.Log.V(1).Info("Adding sidecar", "targetObject", targetObject, "sidecar", sidecar) if err = r.Update(r.Context, targetObject); err != nil { r.Log.Error(err, "unable to add sidecar", "targetObject", targetObject) @@ -333,7 +371,7 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions", "backupconfigurations"}, + Resources: []string{"backupsessions", "backupconfigurations", "functions", "repos"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, From 0c2d2d8f548436836ccaf1f24b1d4537896629b8 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 21 Feb 2023 20:48:59 +0100 Subject: [PATCH 23/69] Remove the RBAC once the last sidecar from the namespace is removed --- controllers/backupconfiguration_controller.go | 6 +-- .../backupconfiguration_controller_helpers.go | 49 ++++++++++++------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 459d05d..d0fe86f 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -64,6 +64,7 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. if controllerutil.ContainsFinalizer(&backupConf, finalizerName) { _ = r.DeleteSidecar(backupConf) _ = r.DeleteCronJob(backupConf) + _ = r.deleteRBACSidecar(backupConf.Namespace) controllerutil.RemoveFinalizer(&backupConf, finalizerName) if err := r.Update(ctx, &backupConf); err != nil { r.Log.Error(err, "unable to remove finalizer") @@ -96,16 +97,15 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. for _, target := range backupConf.Spec.Targets { switch target.BackupType { case formolv1alpha1.OnlineKind: - // TODO: add a sidecar to the pod with the target.Containers[].Paths mounted if err := r.addOnlineSidecar(backupConf, target); err != nil { r.Log.Error(err, "unable to add online sidecar") return ctrl.Result{}, err } backupConf.Status.ActiveSidecar = true case formolv1alpha1.JobKind: - // TODO: add a sidecar to the pod with a shared + // TODO: add a sidecar to the pod with a shared volume case formolv1alpha1.SnapshotKind: - // TOD: add a sidecar to run the steps + // TODO: add a sidecar to run the steps } } diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 6770056..df04c68 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -336,24 +336,39 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph return } +func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) error { + podList := corev1.PodList{} + if err := r.List(r.Context, &podList, &client.ListOptions{ + Namespace: namespace, + }); err != nil { + r.Log.Error(err, "unable to get the list of pods", "namespace", namespace) + return err + } + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + for _, env := range container.Env { + if env.Name == formolv1alpha1.SIDECARCONTAINER_NAME { + // There is still a sidecar in the namespace. + // cannot delete the sidecar role + return nil + } + } + } + } + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: FORMOL_SIDECAR_ROLE, + }, + } + if err := r.Delete(r.Context, &role); err != nil { + r.Log.Error(err, "unable to delete sidecar role") + return err + } + return nil +} + func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccount) error { - // sa := corev1.ServiceAccount {} - // if err := r.Get(r.Context, client.ObjectKey { - // Namespace: backupConf.Namespace, - // Name: FORMOL_SA, - // }, &sa); err != nil && errors.IsNotFound(err) { - // sa = corev1.ServiceAccount { - // ObjectMeta: metav1.ObjectMeta { - // Namespace: backupConf.Namespace, - // Name: FORMOL_SA, - // }, - // } - // r.Log.V(0).Info("Creating formol service account", "sa", sa) - // if err = r.Create(r.Context, &sa); err != nil { - // r.Log.Error(err, "unable to create service account") - // return err - // } - // } if sa.Name == "" { sa.Name = "default" } From 5090b5fb1abc87cae16b209562786a1b86dc77e9 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Wed, 22 Feb 2023 14:33:44 +0100 Subject: [PATCH 24/69] added local repository for restic --- api/v1alpha1/repo_types.go | 6 ++++-- api/v1alpha1/zz_generated.deepcopy.go | 3 ++- .../backupconfiguration_controller_helpers.go | 12 +++++------- test/00-setup.yaml | 3 ++- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index b124b32..224befe 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -24,6 +24,8 @@ import ( ) const ( + RESTIC_REPO_VOLUME = "restic-volume" + RESTIC_REPO_PATH = "/restic-repo" RESTIC_REPOSITORY = "RESTIC_REPOSITORY" RESTIC_PASSWORD = "RESTIC_PASSWORD" AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" @@ -38,7 +40,7 @@ type S3 struct { } type Local struct { - Path string `json:"path"` + corev1.VolumeSource `json:"source"` } type Backend struct { @@ -115,7 +117,7 @@ func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { if repo.Spec.Backend.Local != nil { env = append(env, corev1.EnvVar{ Name: RESTIC_REPOSITORY, - Value: repo.Spec.Backend.Local.Path, + Value: RESTIC_REPO_PATH, }) } env = append(env, corev1.EnvVar{ diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e8cf666..3c46448 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -37,7 +37,7 @@ func (in *Backend) DeepCopyInto(out *Backend) { if in.Local != nil { in, out := &in.Local, &out.Local *out = new(Local) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -350,6 +350,7 @@ func (in *Keep) DeepCopy() *Keep { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Local) DeepCopyInto(out *Local) { *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Local. diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index df04c68..c40bed7 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -187,7 +187,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. if repo.Spec.Backend.Local != nil { restoreVolumes := []corev1.Volume{} for _, volume := range targetPodSpec.Volumes { - if volume.Name == "restic-local-repo" { + if volume.Name == formolv1alpha1.RESTIC_REPO_VOLUME { continue } restoreVolumes = append(restoreVolumes, volume) @@ -310,14 +310,12 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph }) if repo.Spec.Backend.Local != nil { sidecar.VolumeMounts = append(vms, corev1.VolumeMount{ - Name: "restic-local-repo", - MountPath: "/repo", + Name: formolv1alpha1.RESTIC_REPO_VOLUME, + MountPath: formolv1alpha1.RESTIC_REPO_PATH, }) targetPodSpec.Volumes = append(targetPodSpec.Volumes, corev1.Volume{ - Name: "restic-local-repo", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, + Name: formolv1alpha1.RESTIC_REPO_VOLUME, + VolumeSource: repo.Spec.Backend.Local.VolumeSource, }) } else { sidecar.VolumeMounts = vms diff --git a/test/00-setup.yaml b/test/00-setup.yaml index 7a9ef76..d186669 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -79,7 +79,8 @@ metadata: spec: backend: local: - path: /repo + source: + emptyDir: repositorySecrets: secret-minio --- apiVersion: formol.desmojim.fr/v1alpha1 From b330e2cfdde8e33488c5ed16fe6fab5cfe373942 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Wed, 22 Feb 2023 14:54:26 +0100 Subject: [PATCH 25/69] documentation --- controllers/backupconfiguration_controller_helpers.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index c40bed7..701d6c0 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -334,6 +334,7 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph return } +// Delete the sidecar role is there is no more sidecar container in the namespace func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) error { podList := corev1.PodList{} if err := r.List(r.Context, &podList, &client.ListOptions{ @@ -366,6 +367,8 @@ func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) erro return nil } +// Creates a role to allow the BackupSession controller in the sidecar to have access to resources +// like Repo, Functions, ... func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccount) error { if sa.Name == "" { sa.Name = "default" From 6b0add3fdf30c4a3cbf5e361496dfe20417e6133 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 23 Feb 2023 22:44:46 +0100 Subject: [PATCH 26/69] fixed conflict --- api/v1alpha1/backupconfiguration_types.go | 6 +- controllers/backupconfiguration_controller.go | 15 +--- .../backupconfiguration_controller_helpers.go | 90 ++++++++++--------- 3 files changed, 54 insertions(+), 57 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 582d5bd..a5b0c72 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -47,9 +47,11 @@ type Step struct { } type TargetContainer struct { - Name string `json:"name"` + Name string `json:"name"` + // +optional Paths []string `json:"paths,omitempty"` - Steps []Step `json:"steps,omitempty"` + // +optional + Steps []Step `json:"steps,omitempty"` } type Target struct { diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index d0fe86f..71ffc45 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -95,18 +95,11 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. } for _, target := range backupConf.Spec.Targets { - switch target.BackupType { - case formolv1alpha1.OnlineKind: - if err := r.addOnlineSidecar(backupConf, target); err != nil { - r.Log.Error(err, "unable to add online sidecar") - return ctrl.Result{}, err - } - backupConf.Status.ActiveSidecar = true - case formolv1alpha1.JobKind: - // TODO: add a sidecar to the pod with a shared volume - case formolv1alpha1.SnapshotKind: - // TODO: add a sidecar to run the steps + if err := r.addSidecar(backupConf, target); err != nil { + r.Log.Error(err, "unable to add online sidecar") + return ctrl.Result{}, err } + backupConf.Status.ActiveSidecar = true } if err := r.Status().Update(ctx, &backupConf); err != nil { diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 701d6c0..0ee8e8f 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -213,42 +213,7 @@ func hasSidecar(podSpec *corev1.PodSpec) bool { return false } -func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) (err error) { - addTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (sidecarPaths []string, vms []corev1.VolumeMount) { - for i, container := range podSpec.Containers { - for _, targetContainer := range target.Containers { - if targetContainer.Name == container.Name { - // Found a target container. Tag it. - podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: container.Name, - }) - // targetContainer.Paths are the paths to backup - // We have to find what volumes are mounted under those paths - // and mount them under a path that exists in the sidecar container - for i, path := range targetContainer.Paths { - vm := corev1.VolumeMount{ReadOnly: true} - var longest int = 0 - var sidecarPath string - for _, volumeMount := range container.VolumeMounts { - // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { - if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { - longest = len(volumeMount.MountPath) - vm.Name = volumeMount.Name - vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) - vm.SubPath = volumeMount.SubPath - sidecarPath = filepath.Join(vm.MountPath, rel) - } - } - vms = append(vms, vm) - sidecarPaths = append(sidecarPaths, sidecarPath) - } - } - } - } - return - } - +func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.BackupConfiguration, target formolv1alpha1.Target) (err error) { repo := formolv1alpha1.Repo{} if err = r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, @@ -303,13 +268,17 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph r.Log.Error(err, "unable to create RBAC for the sidecar container") return } - sidecarPaths, vms := addTags(targetPodSpec, target) - sidecar.Env = append(sidecar.Env, corev1.EnvVar{ - Name: formolv1alpha1.BACKUP_PATHS, - Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), - }) + switch target.BackupType { + case formolv1alpha1.OnlineKind: + sidecarPaths, vms := addOnlineSidecarTags(targetPodSpec, target) + sidecar.Env = append(sidecar.Env, corev1.EnvVar{ + Name: formolv1alpha1.BACKUP_PATHS, + Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), + }) + sidecar.VolumeMounts = vms + } if repo.Spec.Backend.Local != nil { - sidecar.VolumeMounts = append(vms, corev1.VolumeMount{ + sidecar.VolumeMounts = append(sidecar.VolumeMounts, corev1.VolumeMount{ Name: formolv1alpha1.RESTIC_REPO_VOLUME, MountPath: formolv1alpha1.RESTIC_REPO_PATH, }) @@ -317,8 +286,6 @@ func (r *BackupConfigurationReconciler) addOnlineSidecar(backupConf formolv1alph Name: formolv1alpha1.RESTIC_REPO_VOLUME, VolumeSource: repo.Spec.Backend.Local.VolumeSource, }) - } else { - sidecar.VolumeMounts = vms } // The sidecar definition is complete. Add it to the targetObject @@ -432,3 +399,38 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou } return nil } + +func addOnlineSidecarTags(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (sidecarPaths []string, vms []corev1.VolumeMount) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + // Found a target container. Tag it. + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + // targetContainer.Paths are the paths to backup + // We have to find what volumes are mounted under those paths + // and mount them under a path that exists in the sidecar container + for i, path := range targetContainer.Paths { + vm := corev1.VolumeMount{ReadOnly: true} + var longest int = 0 + var sidecarPath string + for _, volumeMount := range container.VolumeMounts { + // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { + longest = len(volumeMount.MountPath) + vm.Name = volumeMount.Name + vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) + vm.SubPath = volumeMount.SubPath + sidecarPath = filepath.Join(vm.MountPath, rel) + } + } + vms = append(vms, vm) + sidecarPaths = append(sidecarPaths, sidecarPath) + } + } + } + } + return +} From 3790f30e29c43c585d6cf2aed9e86a533c967baa Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 24 Feb 2023 21:49:02 +0100 Subject: [PATCH 27/69] Handle sidecar creation for the Job backup type with shared volume --- api/v1alpha1/backupconfiguration_types.go | 7 +- api/v1alpha1/repo_types.go | 3 +- .../backupconfiguration_controller_helpers.go | 91 +++++++++++++++++-- test/00-setup.yaml | 3 +- test/02-backupconf.yaml | 5 + 5 files changed, 97 insertions(+), 12 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index a5b0c72..a73ba08 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -38,7 +38,10 @@ const ( JobKind BackupType = "Job" ) -const BACKUP_PREFIX_PATH = `backup` +const ( + BACKUP_PREFIX_PATH = `backup` + FORMOL_SHARED_VOLUME = `formol-shared` +) type Step struct { Name string `json:"name"` @@ -52,6 +55,8 @@ type TargetContainer struct { Paths []string `json:"paths,omitempty"` // +optional Steps []Step `json:"steps,omitempty"` + // +kubebuilder:default:=/formol-shared + SharePath string `json:"sharePath"` } type Target struct { diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index 224befe..b4de2d8 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -40,7 +40,8 @@ type S3 struct { } type Local struct { - corev1.VolumeSource `json:"source"` + //corev1.VolumeSource `json:"source"` + corev1.VolumeSource `json:",inline"` } type Backend struct { diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 0ee8e8f..d6545bf 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -175,25 +175,48 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. targetObject = &deployment targetPodSpec = &deployment.Spec.Template.Spec + case formolv1alpha1.StatefulSet: + statefulSet := appsv1.StatefulSet{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, &statefulSet); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return err + } + targetObject = &statefulSet + targetPodSpec = &statefulSet.Spec.Template.Spec + } restoreContainers := []corev1.Container{} for _, container := range targetPodSpec.Containers { if container.Name == formolv1alpha1.SIDECARCONTAINER_NAME { continue } + restoreVms := []corev1.VolumeMount{} + for _, vm := range container.VolumeMounts { + if vm.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { + r.Log.V(0).Info("cleanup VolumeMounts", "container", container.Name, "VolumeMount", vm.Name) + continue + } + restoreVms = append(restoreVms, vm) + } + r.Log.V(0).Info("cleanup VolumeMounts", "container", container.Name, "restoreVms", restoreVms) + container.VolumeMounts = restoreVms restoreContainers = append(restoreContainers, container) } targetPodSpec.Containers = restoreContainers - if repo.Spec.Backend.Local != nil { - restoreVolumes := []corev1.Volume{} - for _, volume := range targetPodSpec.Volumes { - if volume.Name == formolv1alpha1.RESTIC_REPO_VOLUME { - continue - } - restoreVolumes = append(restoreVolumes, volume) + restoreVolumes := []corev1.Volume{} + for _, volume := range targetPodSpec.Volumes { + if volume.Name == formolv1alpha1.RESTIC_REPO_VOLUME { + continue } - targetPodSpec.Volumes = restoreVolumes + if volume.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { + continue + } + restoreVolumes = append(restoreVolumes, volume) } + targetPodSpec.Volumes = restoreVolumes removeTags(targetPodSpec, target) if err := r.Update(r.Context, targetObject); err != nil { r.Log.Error(err, "unable to remove sidecar", "targetObject", targetObject) @@ -257,6 +280,17 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac } targetObject = &deployment targetPodSpec = &deployment.Spec.Template.Spec + case formolv1alpha1.StatefulSet: + statefulSet := appsv1.StatefulSet{} + if err = r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, &statefulSet); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) + return + } + targetObject = &statefulSet + targetPodSpec = &statefulSet.Spec.Template.Spec } if !hasSidecar(targetPodSpec) { if err = r.createRBACSidecar(corev1.ServiceAccount{ @@ -276,6 +310,8 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), }) sidecar.VolumeMounts = vms + case formolv1alpha1.JobKind: + sidecar.VolumeMounts = addJobSidecarTags(targetPodSpec, target) } if repo.Spec.Backend.Local != nil { sidecar.VolumeMounts = append(sidecar.VolumeMounts, corev1.VolumeMount{ @@ -400,6 +436,45 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou return nil } +func addJobSidecarTags(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (vms []corev1.VolumeMount) { + for i, container := range podSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + // Found a target container. Tag it. + podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + // Create a shared mount between the target and sidecar container + // the output of the Job will be saved in the shared volume + // and restic will then backup the content of the volume + var addSharedVol bool = true + for _, vol := range podSpec.Volumes { + if vol.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { + addSharedVol = false + } + } + if addSharedVol { + podSpec.Volumes = append(podSpec.Volumes, + corev1.Volume{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + } + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) + vms = append(vms, corev1.VolumeMount{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) + } + } + } + return +} + func addOnlineSidecarTags(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (sidecarPaths []string, vms []corev1.VolumeMount) { for i, container := range podSpec.Containers { for _, targetContainer := range target.Containers { diff --git a/test/00-setup.yaml b/test/00-setup.yaml index d186669..2fe5c66 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -79,8 +79,7 @@ metadata: spec: backend: local: - source: - emptyDir: + emptyDir: repositorySecrets: secret-minio --- apiVersion: formol.desmojim.fr/v1alpha1 diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 952a6d2..a6a8816 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -27,6 +27,11 @@ spec: finalize: true paths: - /data + - backupType: Job + targetKind: StatefulSet + targetName: postgres-demo + containers: + - name: postgres # - kind: Job # name: backup-pg # steps: From da8b224cf09513f0198ed5b76ca65abd64372987 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 26 Feb 2023 00:43:50 +0100 Subject: [PATCH 28/69] need a Job to run --- api/v1alpha1/backupconfiguration_types.go | 2 ++ api/v1alpha1/zz_generated.deepcopy.go | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index a73ba08..2ce1c14 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -57,6 +57,8 @@ type TargetContainer struct { Steps []Step `json:"steps,omitempty"` // +kubebuilder:default:=/formol-shared SharePath string `json:"sharePath"` + // +optional + Job []Step `json:"job,omitempty"` } type Target struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 3c46448..4f7eaf8 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -614,6 +614,13 @@ func (in *TargetContainer) DeepCopyInto(out *TargetContainer) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Job != nil { + in, out := &in.Job, &out.Job + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetContainer. From 46247d17ae8d51f474d92cfc1e90399a5dd2e488 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 26 Feb 2023 00:46:09 +0100 Subject: [PATCH 29/69] the sidecar needs privileges to run chroot commands --- controllers/backupconfiguration_controller_helpers.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index d6545bf..68eab52 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -265,6 +265,9 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac }, }), VolumeMounts: []corev1.VolumeMount{}, + SecurityContext: &corev1.SecurityContext{ + Privileged: func() *bool { b := true; return &b }(), + }, } var targetObject client.Object var targetPodSpec *corev1.PodSpec From 2ac8bc2c715a97fe51a6daec01c022ff7e7bb1d7 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 26 Feb 2023 00:46:54 +0100 Subject: [PATCH 30/69] getTargetObjects function --- .../backupconfiguration_controller_helpers.go | 92 ++++++++----------- 1 file changed, 39 insertions(+), 53 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 68eab52..300d7a7 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -130,6 +130,36 @@ func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.Bac } } +func (r *BackupConfigurationReconciler) getTargetObjects(kind formolv1alpha1.TargetKind, namespace string, name string) (targetObject client.Object, targetPodSpec *corev1.PodSpec, err error) { + switch kind { + case formolv1alpha1.Deployment: + deployment := appsv1.Deployment{} + if err = r.Get(r.Context, client.ObjectKey{ + Namespace: namespace, + Name: name, + }, &deployment); err != nil { + r.Log.Error(err, "cannot get deployment", "Deployment", name) + return + } + targetObject = &deployment + targetPodSpec = &deployment.Spec.Template.Spec + + case formolv1alpha1.StatefulSet: + statefulSet := appsv1.StatefulSet{} + if err = r.Get(r.Context, client.ObjectKey{ + Namespace: namespace, + Name: name, + }, &statefulSet); err != nil { + r.Log.Error(err, "cannot get StatefulSet", "StatefulSet", name) + return + } + targetObject = &statefulSet + targetPodSpec = &statefulSet.Spec.Template.Spec + + } + return +} + func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { for i, container := range podSpec.Containers { @@ -160,33 +190,10 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. } r.Log.V(1).Info("Got Repository", "repo", repo) for _, target := range backupConf.Spec.Targets { - var targetObject client.Object - var targetPodSpec *corev1.PodSpec - switch target.TargetKind { - case formolv1alpha1.Deployment: - deployment := appsv1.Deployment{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, &deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err - } - targetObject = &deployment - targetPodSpec = &deployment.Spec.Template.Spec - - case formolv1alpha1.StatefulSet: - statefulSet := appsv1.StatefulSet{} - if err := r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, &statefulSet); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return err - } - targetObject = &statefulSet - targetPodSpec = &statefulSet.Spec.Template.Spec - + targetObject, targetPodSpec, err := r.getTargetObjects(target.TargetKind, backupConf.Namespace, target.TargetName) + if err != nil { + r.Log.Error(err, "unable to get target objects") + return err } restoreContainers := []corev1.Container{} for _, container := range targetPodSpec.Containers { @@ -218,7 +225,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. } targetPodSpec.Volumes = restoreVolumes removeTags(targetPodSpec, target) - if err := r.Update(r.Context, targetObject); err != nil { + if err = r.Update(r.Context, targetObject); err != nil { r.Log.Error(err, "unable to remove sidecar", "targetObject", targetObject) return err } @@ -269,31 +276,10 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac Privileged: func() *bool { b := true; return &b }(), }, } - var targetObject client.Object - var targetPodSpec *corev1.PodSpec - switch target.TargetKind { - case formolv1alpha1.Deployment: - deployment := appsv1.Deployment{} - if err = r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, &deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return - } - targetObject = &deployment - targetPodSpec = &deployment.Spec.Template.Spec - case formolv1alpha1.StatefulSet: - statefulSet := appsv1.StatefulSet{} - if err = r.Get(r.Context, client.ObjectKey{ - Namespace: backupConf.Namespace, - Name: target.TargetName, - }, &statefulSet); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", target.TargetName) - return - } - targetObject = &statefulSet - targetPodSpec = &statefulSet.Spec.Template.Spec + targetObject, targetPodSpec, err := r.getTargetObjects(target.TargetKind, backupConf.Namespace, target.TargetName) + if err != nil { + r.Log.Error(err, "unable to get target objects") + return err } if !hasSidecar(targetPodSpec) { if err = r.createRBACSidecar(corev1.ServiceAccount{ From b42bd46efe822ddad250448d7ed2578efe5460f7 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 26 Feb 2023 00:48:57 +0100 Subject: [PATCH 31/69] backup job type --- test/00-setup.yaml | 103 ---------------------------------------- test/01-deployment.yaml | 8 ++-- test/02-backupconf.yaml | 101 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 106 deletions(-) diff --git a/test/00-setup.yaml b/test/00-setup.yaml index 2fe5c66..9efa181 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -70,106 +70,3 @@ data: RESTIC_PASSWORD: bHIyOXhtOTU= AWS_ACCESS_KEY_ID: OWFTSXZBSEVzWlNVMmkyTU9zVGxWSk1lL1NjPQ== AWS_SECRET_ACCESS_KEY: WVN5ck9ncVllcjBWNFNLdlVOcmx2OGhjTllhZGZuN2xaNjBIaXRlL3djWT0= ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Repo -metadata: - name: repo-local - namespace: demo -spec: - backend: - local: - emptyDir: - repositorySecrets: secret-minio ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Repo -metadata: - name: repo-minio - namespace: demo -spec: - backend: - s3: - server: raid5.desmojim.fr:9000 - bucket: testbucket2 - repositorySecrets: secret-minio ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: restore-pg - namespace: demo -spec: - name: restore-pg - image: desmo999r/formolcli:latest - args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] - env: - - name: PGHOST - value: postgres - - name: PGDATABASE - value: demopostgres - - name: PGUSER - value: demopostgres - - name: PGPASSWD - value: password123! ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: with-envfrom - namespace: demo -spec: - name: with-envfrom - command: ["touch", $(title)] - envFrom: - - secretRef: - name: with-envfrom-secret ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: with-env - namespace: demo -spec: - name: with-env - command: ["touch", $(TESTFILE)] - env: - - name: TESTFILE - value: /data/testfile ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: backup-pg - namespace: demo -spec: - name: backup-pg - image: desmo999r/formolcli:latest - args: ["postgres", "backup", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] - env: - - name: PGHOST - value: postgres - - name: PGDATABASE - value: demopostgres - - name: PGUSER - value: demopostgres - - name: PGPASSWD - value: password123! ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: maintenance-off - namespace: demo -spec: - name: maintenance-off - command: ["/bin/sh", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: maintenance-on - namespace: demo -spec: - name: maintenance-on - command: ["/bin/sh", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] diff --git a/test/01-deployment.yaml b/test/01-deployment.yaml index 84b08e6..041340c 100644 --- a/test/01-deployment.yaml +++ b/test/01-deployment.yaml @@ -84,9 +84,11 @@ spec: name: postgres-config-demo ports: - containerPort: 5432 - name: postgredb + name: postgresdb volumeMounts: - - name: postgredb + - name: postgres-volume mountPath: /var/lib/postgresql/data volumes: - - name: postgredb + - name: postgres-volume + hostPath: + path: /data/postgresdb diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index a6a8816..0890f87 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -1,5 +1,104 @@ --- apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-local + namespace: demo +spec: + backend: + local: + emptyDir: + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-minio + namespace: demo +spec: + backend: + s3: + server: raid5.desmojim.fr:9000 + bucket: testbucket2 + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: restore-pg + namespace: demo +spec: + name: restore-pg + image: desmo999r/formolcli:latest + args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] + env: + - name: PGHOST + value: postgres + - name: PGDATABASE + value: demopostgres + - name: PGUSER + value: demopostgres + - name: PGPASSWD + value: password123! +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-envfrom + namespace: demo +spec: + name: with-envfrom + command: ["touch", $(title)] + envFrom: + - secretRef: + name: with-envfrom-secret +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-env + namespace: demo +spec: + name: with-env + command: ["touch", $(TESTFILE)] + env: + - name: TESTFILE + value: /data/testfile +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: backup-pg + namespace: demo +spec: + name: backup-pg + command: ["pg_dump"] + args: ["--username", $(PGUSER), "--clean", "--if-exists", "--inserts", "--file", "/formol-shared/backup-pg.sql"] + env: + - name: PGUSER + value: demopostgres +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-off + namespace: demo +spec: + name: maintenance-off + command: ["/bin/sh"] + args: ["-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-on + namespace: demo +spec: + name: maintenance-on + command: ["/bin/sh"] + args: ["-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 kind: BackupConfiguration metadata: name: backup-demo @@ -32,6 +131,8 @@ spec: targetName: postgres-demo containers: - name: postgres + job: + - name: backup-pg # - kind: Job # name: backup-pg # steps: From b5a217bc3a536095084b48e19f935bc1508baedf Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 27 Feb 2023 00:52:01 +0100 Subject: [PATCH 32/69] Reworked the scheduling of the tasks. We want the init/backup/finalize tasks to be run for all the targets one after the other. --- api/v1alpha1/backupsession_types.go | 17 ++-- controllers/backupsession_controller.go | 79 +++++++----------- .../backupsession_controller_helpers.go | 82 ++++++++++++++----- 3 files changed, 97 insertions(+), 81 deletions(-) diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 7bab396..03f69cd 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -24,14 +24,15 @@ import ( type SessionState string const ( - New SessionState = "New" - Init SessionState = "Initializing" - Running SessionState = "Running" - Waiting SessionState = "Waiting" - Finalize SessionState = "Finalizing" - Success SessionState = "Success" - Failure SessionState = "Failure" - Deleted SessionState = "Deleted" + New SessionState = "New" + Initializing SessionState = "Initializing" + Initialized SessionState = "Initialized" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + Finalize SessionState = "Finalize" + Success SessionState = "Success" + Failure SessionState = "Failure" + Deleted SessionState = "Deleted" ) type TargetStatus struct { diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index f48861b..6ebd11f 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -91,72 +91,49 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + var newSessionState formolv1alpha1.SessionState switch backupSession.Status.SessionState { case formolv1alpha1.New: + // Go through the Targets and create the corresponding TargetStatus. Move to Initializing. if r.isBackupOngoing(backupConf) { r.Log.V(0).Info("there is an ongoing backup. Let's reschedule this operation") return ctrl.Result{ RequeueAfter: 30 * time.Second, }, nil } - if nextTargetStatus := r.startNextTask(&backupSession, backupConf); nextTargetStatus != nil { - r.Log.V(0).Info("New backup. Start the first task", "task", nextTargetStatus) - backupSession.Status.SessionState = formolv1alpha1.Running - if err := r.Status().Update(ctx, &backupSession); err != nil { - r.Log.Error(err, "unable to update BackupSession status") - } - return ctrl.Result{}, err - } else { - r.Log.V(0).Info("No first target? That should not happen. Mark the backup has failed") - backupSession.Status.SessionState = formolv1alpha1.Failure - if err := r.Status().Update(ctx, &backupSession); err != nil { - r.Log.Error(err, "unable to update BackupSession status") - } - return ctrl.Result{}, err - } + newSessionState = r.initBackup(&backupSession, backupConf) + case formolv1alpha1.Initializing: + // Wait for all the Targets to be in the Initialized state then move them to Running and move to Running myself. + // if one of the Target fails to initialize, move it back to New state and decrement Try. + // if try reaches 0, move all the Targets to Finalize and move myself to Failure. + newSessionState = r.checkInitialized(&backupSession, backupConf) case formolv1alpha1.Running: - // Backup ongoing. Check the status of the last backup task and decide what to do next. - currentTargetStatus := &(backupSession.Status.Targets[len(backupSession.Status.Targets)-1]) - switch currentTargetStatus.SessionState { - case formolv1alpha1.Running: - r.Log.V(0).Info("Current task is still running. Wait until it's finished") - case formolv1alpha1.Success: - r.Log.V(0).Info("Last backup task was a success. Start a new one") - if nextTargetStatus := r.startNextTask(&backupSession, backupConf); nextTargetStatus != nil { - r.Log.V(0).Info("Starting a new task", "task", nextTargetStatus) - } else { - r.Log.V(0).Info("No more tasks to start. The backup is a success. Let's do some cleanup") - backupSession.Status.SessionState = formolv1alpha1.Success - } - if err := r.Status().Update(ctx, &backupSession); err != nil { - r.Log.Error(err, "unable to update BackupSession") - } - return ctrl.Result{}, err - case formolv1alpha1.Failure: - // Last task failed. Try to run it again - if currentTargetStatus.Try < backupConf.Spec.Targets[len(backupSession.Status.Targets)-1].Retry { - r.Log.V(0).Info("Last task failed. Try to run it again") - currentTargetStatus.Try++ - currentTargetStatus.SessionState = formolv1alpha1.New - currentTargetStatus.StartTime = &metav1.Time{Time: time.Now()} - } else { - r.Log.V(0).Info("Task failed again and for the last time") - backupSession.Status.SessionState = formolv1alpha1.Failure - } - if err := r.Status().Update(ctx, &backupSession); err != nil { - r.Log.Error(err, "unable to update BackupSession") - } - return ctrl.Result{}, err + // Wait for all the target to be in Waiting state then move them to the Finalize state. Move myself to Finalize. + // if one of the Target fails the backup, move it back to Running state and decrement Try. + // if try reaches 0, move all the Targets to Finalize and move myself to Failure. + newSessionState = r.checkWaiting(&backupSession, backupConf) + case formolv1alpha1.Finalize: + // Check the TargetStatus of all the Targets. If they are all Success then move myself to Success. + // if one of the Target fails to Finalize, move it back to Finalize state and decrement Try. + // if try reaches 0, move myself to Success because the backup was a Success even if the Finalize failed. + if newSessionState = r.checkSuccess(&backupSession, backupConf); newSessionState == formolv1alpha1.Failure { + r.Log.V(0).Info("One of the target did not manage to Finalize but the backup is still a Success") + newSessionState = formolv1alpha1.Success } + case formolv1alpha1.Success: + r.Log.V(0).Info("Backup was a success") case formolv1alpha1.Failure: - // Failed backup. Don't do anything anymore - case formolv1alpha1.Success: - // Backup was a success + r.Log.V(0).Info("Backup failed") + default: // BackupSession has just been created - backupSession.Status.SessionState = formolv1alpha1.New + newSessionState = formolv1alpha1.New backupSession.Status.StartTime = &metav1.Time{Time: time.Now()} + } + if newSessionState != "" { + r.Log.V(0).Info("BackupSession needs a status update", "newSessionState", newSessionState, "backupSession", backupSession) + backupSession.Status.SessionState = newSessionState if err := r.Status().Update(ctx, &backupSession); err != nil { r.Log.Error(err, "unable to update BackupSession.Status") return ctrl.Result{}, err diff --git a/controllers/backupsession_controller_helpers.go b/controllers/backupsession_controller_helpers.go index 86d678c..0ef20db 100644 --- a/controllers/backupsession_controller_helpers.go +++ b/controllers/backupsession_controller_helpers.go @@ -38,29 +38,67 @@ func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.Back return len(backupSessionList.Items) > 0 } -func (r *BackupSessionReconciler) startNextTask(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) *formolv1alpha1.TargetStatus { - nextTargetIndex := len(backupSession.Status.Targets) - if nextTargetIndex < len(backupConf.Spec.Targets) { - nextTarget := backupConf.Spec.Targets[nextTargetIndex] - nextTargetStatus := formolv1alpha1.TargetStatus{ - BackupType: nextTarget.BackupType, - TargetName: nextTarget.TargetName, - TargetKind: nextTarget.TargetKind, - SessionState: formolv1alpha1.New, +func (r *BackupSessionReconciler) initBackup(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + for _, target := range backupConf.Spec.Targets { + r.Log.V(0).Info("Creating new target", "target", target.TargetName) + backupSession.Status.Targets = append(backupSession.Status.Targets, formolv1alpha1.TargetStatus{ + BackupType: target.BackupType, + TargetName: target.TargetName, + TargetKind: target.TargetKind, + SessionState: "", StartTime: &metav1.Time{Time: time.Now()}, Try: 1, - } - switch nextTarget.BackupType { - case formolv1alpha1.OnlineKind: - r.Log.V(0).Info("Starts a new OnlineKind task", "target", nextTarget) - case formolv1alpha1.JobKind: - r.Log.V(0).Info("Starts a new JobKind task", "target", nextTarget) - case formolv1alpha1.SnapshotKind: - r.Log.V(0).Info("Starts a new SnapshotKind task", "target", nextTarget) - } - backupSession.Status.Targets = append(backupSession.Status.Targets, nextTargetStatus) - return &nextTargetStatus - } else { - return nil + }) } + return formolv1alpha1.Initializing +} + +func (r *BackupSessionReconciler) checkSessionState( + backupSession *formolv1alpha1.BackupSession, + backupConf formolv1alpha1.BackupConfiguration, + currentState formolv1alpha1.SessionState, + waitState formolv1alpha1.SessionState, + nextState formolv1alpha1.SessionState) formolv1alpha1.SessionState { + for i, targetStatus := range backupSession.Status.Targets { + r.Log.V(0).Info("Target status", "target", targetStatus.TargetName, "session state", targetStatus.SessionState) + switch targetStatus.SessionState { + case currentState: + r.Log.V(0).Info("Move target to waitState", "target", targetStatus.TargetName, "waitState", waitState) + backupSession.Status.Targets[i].SessionState = waitState + return waitState + case formolv1alpha1.Failure: + if targetStatus.Try < backupConf.Spec.Targets[i].Retry { + r.Log.V(0).Info("Target failed. Try one more time", "target", targetStatus.TargetName, "waitState", waitState) + backupSession.Status.Targets[i].SessionState = waitState + backupSession.Status.Targets[i].Try++ + backupSession.Status.Targets[i].StartTime = &metav1.Time{Time: time.Now()} + return waitState + } else { + r.Log.V(0).Info("Target failed for the last time", "target", targetStatus.TargetName) + return formolv1alpha1.Failure + } + case waitState: + // target is still busy with its current state. Wait until it is done. + r.Log.V(0).Info("Waiting for one target to finish", "waitState", waitState) + return "" + default: + if i == len(backupSession.Status.Targets)-1 { + r.Log.V(0).Info("Moving to next state", "nextState", nextState) + return nextState + } + } + } + return "" +} + +func (r *BackupSessionReconciler) checkInitialized(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return r.checkSessionState(backupSession, backupConf, "", formolv1alpha1.Initializing, formolv1alpha1.Running) +} + +func (r *BackupSessionReconciler) checkWaiting(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return r.checkSessionState(backupSession, backupConf, formolv1alpha1.Initialized, formolv1alpha1.Running, formolv1alpha1.Finalize) +} + +func (r *BackupSessionReconciler) checkSuccess(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return r.checkSessionState(backupSession, backupConf, formolv1alpha1.Waiting, formolv1alpha1.Finalize, formolv1alpha1.Success) } From e025a07c9bf5762c2403930388825f98aa30a6ef Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Mon, 27 Feb 2023 18:21:43 +0100 Subject: [PATCH 33/69] Should be able to delete bs even if the bc has already been deleted --- controllers/backupsession_controller.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 6ebd11f..e86b662 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -62,15 +62,6 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques } return ctrl.Result{}, err } - backupConf := formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Spec.Ref.Namespace, - Name: backupSession.Spec.Ref.Name, - }, &backupConf); err != nil { - r.Log.Error(err, "unable to get BackupConfiguration") - return ctrl.Result{}, err - } - if !backupSession.ObjectMeta.DeletionTimestamp.IsZero() { r.Log.V(0).Info("BackupSession is being deleted") if controllerutil.ContainsFinalizer(&backupSession, finalizerName) { @@ -90,6 +81,14 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques } return ctrl.Result{}, err } + backupConf := formolv1alpha1.BackupConfiguration{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: backupSession.Spec.Ref.Namespace, + Name: backupSession.Spec.Ref.Name, + }, &backupConf); err != nil { + r.Log.Error(err, "unable to get BackupConfiguration") + return ctrl.Result{}, err + } var newSessionState formolv1alpha1.SessionState switch backupSession.Status.SessionState { From b10337aa66be4e4f7ed44dbdcd07493d124b9887 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Wed, 1 Mar 2023 21:53:16 +0100 Subject: [PATCH 34/69] pg_dumpall --- test/02-backupconf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 0890f87..2a780f4 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -72,7 +72,7 @@ metadata: namespace: demo spec: name: backup-pg - command: ["pg_dump"] + command: ["pg_dumpall"] args: ["--username", $(PGUSER), "--clean", "--if-exists", "--inserts", "--file", "/formol-shared/backup-pg.sql"] env: - name: PGUSER From 613d0a63742f76d849837384446fd60aeedfe60a Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 2 Mar 2023 21:30:55 +0100 Subject: [PATCH 35/69] Allow sidecar containers to access RestoreSessions --- controllers/backupconfiguration_controller_helpers.go | 7 ++++++- test/00-setup.yaml | 4 ++-- test/02-backupconf.yaml | 6 +++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 300d7a7..a2d484a 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -379,13 +379,18 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"backupsessions", "backupconfigurations", "functions", "repos"}, + Resources: []string{"restoresessions", "backupsessions", "backupconfigurations", "functions", "repos"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, APIGroups: []string{"formol.desmojim.fr"}, Resources: []string{"backupsessions/status"}, }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + APIGroups: []string{"formol.desmojim.fr"}, + Resources: []string{"restoresessions/status"}, + }, }, } r.Log.V(0).Info("Creating formol sidecar role", "role", role) diff --git a/test/00-setup.yaml b/test/00-setup.yaml index 9efa181..7eaa8f8 100644 --- a/test/00-setup.yaml +++ b/test/00-setup.yaml @@ -68,5 +68,5 @@ metadata: namespace: demo data: RESTIC_PASSWORD: bHIyOXhtOTU= - AWS_ACCESS_KEY_ID: OWFTSXZBSEVzWlNVMmkyTU9zVGxWSk1lL1NjPQ== - AWS_SECRET_ACCESS_KEY: WVN5ck9ncVllcjBWNFNLdlVOcmx2OGhjTllhZGZuN2xaNjBIaXRlL3djWT0= + AWS_ACCESS_KEY_ID: SjV4V2NqQ2RzckxpZ2lEZA== + AWS_SECRET_ACCESS_KEY: OVdBMnN1djVtanRLRTdnMkRjNWl5WWtkbDNobGV5UU8= diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 2a780f4..1e37d74 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -18,8 +18,8 @@ metadata: spec: backend: s3: - server: raid5.desmojim.fr:9000 - bucket: testbucket2 + server: minio-svc.minio:9000 + bucket: backups repositorySecrets: secret-minio --- apiVersion: formol.desmojim.fr/v1alpha1 @@ -106,7 +106,7 @@ metadata: spec: suspend: true image: desmo999r/formolcli:latest - repository: repo-local + repository: repo-minio schedule: "15 * * * *" keep: last: 5 From f0c17e061f8494739a238f409b72956330adc281 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 6 Mar 2023 23:04:39 +0100 Subject: [PATCH 36/69] The BackupSession controller in the sidecar should get the latest informtation about the repository everytime it reconciles because it might change --- api/v1alpha1/repo_types.go | 52 ------------------- .../backupconfiguration_controller_helpers.go | 12 +++-- 2 files changed, 8 insertions(+), 56 deletions(-) diff --git a/api/v1alpha1/repo_types.go b/api/v1alpha1/repo_types.go index b4de2d8..8959a8e 100644 --- a/api/v1alpha1/repo_types.go +++ b/api/v1alpha1/repo_types.go @@ -17,10 +17,8 @@ limitations under the License. package v1alpha1 import ( - "fmt" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strings" ) const ( @@ -85,53 +83,3 @@ type RepoList struct { func init() { SchemeBuilder.Register(&Repo{}, &RepoList{}) } - -func (repo *Repo) GetResticEnv(backupConf BackupConfiguration) []corev1.EnvVar { - env := []corev1.EnvVar{} - if repo.Spec.Backend.S3 != nil { - url := fmt.Sprintf("s3:http://%s/%s/%s-%s", - repo.Spec.Backend.S3.Server, - repo.Spec.Backend.S3.Bucket, - strings.ToUpper(backupConf.Namespace), - strings.ToLower(backupConf.Name)) - env = append(env, corev1.EnvVar{ - Name: RESTIC_REPOSITORY, - Value: url, - }) - for _, key := range []string{ - AWS_ACCESS_KEY_ID, - AWS_SECRET_ACCESS_KEY, - } { - env = append(env, corev1.EnvVar{ - Name: key, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: repo.Spec.RepositorySecrets, - }, - Key: key, - }, - }, - }) - } - } - if repo.Spec.Backend.Local != nil { - env = append(env, corev1.EnvVar{ - Name: RESTIC_REPOSITORY, - Value: RESTIC_REPO_PATH, - }) - } - env = append(env, corev1.EnvVar{ - Name: RESTIC_PASSWORD, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: repo.Spec.RepositorySecrets, - }, - Key: RESTIC_PASSWORD, - }, - }, - }) - - return env -} diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index a2d484a..a6828ad 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -253,12 +253,11 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac return err } r.Log.V(1).Info("Got Repository", "repo", repo) - env := repo.GetResticEnv(backupConf) sidecar := corev1.Container{ Name: formolv1alpha1.SIDECARCONTAINER_NAME, Image: backupConf.Spec.Image, Args: []string{"backupsession", "server"}, - Env: append(env, + Env: []corev1.EnvVar{ corev1.EnvVar{ Name: formolv1alpha1.TARGET_NAME, Value: target.TargetName, @@ -270,7 +269,7 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac FieldPath: "metadata.namespace", }, }, - }), + }}, VolumeMounts: []corev1.VolumeMount{}, SecurityContext: &corev1.SecurityContext{ Privileged: func() *bool { b := true; return &b }(), @@ -379,7 +378,12 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"formol.desmojim.fr"}, - Resources: []string{"restoresessions", "backupsessions", "backupconfigurations", "functions", "repos"}, + Resources: []string{"restoresessions", "backupsessions", "backupconfigurations", "functions", "repoes"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, From 19d74cda40b40eaea1d633f30400abd46d898f7a Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 6 Mar 2023 23:05:27 +0100 Subject: [PATCH 37/69] minio repository with minikube --- test/minio.yaml | 106 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 test/minio.yaml diff --git a/test/minio.yaml b/test/minio.yaml new file mode 100644 index 0000000..f8d26aa --- /dev/null +++ b/test/minio.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: minio +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: demo-minio-pvc + namespace: minio +spec: + storageClassName: standard + resources: + requests: + storage: 1Gi + accessModes: + - ReadWriteOnce +--- +apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1 +kind: Deployment +metadata: + # This name uniquely identifies the Deployment + name: minio-deployment + namespace: minio +spec: + selector: + matchLabels: + app: minio + strategy: + type: Recreate + template: + metadata: + labels: + # Label is used as selector in the service. + app: minio + spec: + # Refer to the PVC created earlier + volumes: + - name: storage + persistentVolumeClaim: + # Name of the PVC created earlier + claimName: demo-minio-pvc + containers: + - name: minio + # Pulls the default Minio image from Docker Hub + image: minio/minio:latest + args: + - server + - /storage + - --console-address + - ":9001" + env: + # Minio access key and secret key + - name: MINIO_ACCESS_KEY + value: "minio" + - name: MINIO_SECRET_KEY + value: "minio123" + ports: + - containerPort: 9000 + hostPort: 9000 + - containerPort: 9001 + hostPort: 9001 + # Mount the volume into the pod + volumeMounts: + - name: storage # must match the volume name, above + mountPath: "/storage" +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-svc + namespace: minio + labels: + app: minio +spec: + ports: + - port: 9000 + name: minio + - port: 9001 + name: minio-console + selector: + app: minio +--- +# Source: nextcloud/templates/ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: minio + namespace: "minio" + annotations: + # cert-manager.io/cluster-issuer: letsencrypt + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: minio.minikube + http: + paths: + - path: "/" + pathType: Prefix + backend: + service: + name: minio-svc + port: + number: 9001 + From 3486ad2efe7bcf40990212299a87aaa70f88965f Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 20 Mar 2023 22:13:44 +0100 Subject: [PATCH 38/69] prepared BackupSession and RestoreSession common code --- api/v1alpha1/restoresession_types.go | 17 ++- api/v1alpha1/zz_generated.deepcopy.go | 33 +++++- .../backupconfiguration_controller_helpers.go | 2 +- controllers/backupsession_controller.go | 16 +-- .../backupsession_controller_helpers.go | 104 ------------------ controllers/restoresession_controller.go | 57 +++++++--- .../restoresession_controller_helper.go | 9 ++ controllers/session.go | 100 +++++++++++++++++ main.go | 12 +- 9 files changed, 205 insertions(+), 145 deletions(-) delete mode 100644 controllers/backupsession_controller_helpers.go create mode 100644 controllers/restoresession_controller_helper.go create mode 100644 controllers/session.go diff --git a/api/v1alpha1/restoresession_types.go b/api/v1alpha1/restoresession_types.go index 462bd3c..289dacb 100644 --- a/api/v1alpha1/restoresession_types.go +++ b/api/v1alpha1/restoresession_types.go @@ -20,22 +20,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type BackupSessionRef struct { + Spec BackupSessionSpec `json:"spec"` + Status BackupSessionStatus `json:"status"` +} // RestoreSessionSpec defines the desired state of RestoreSession type RestoreSessionSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of RestoreSession. Edit restoresession_types.go to remove/update - Foo string `json:"foo,omitempty"` + BackupSessionRef `json:"backupSessionRef"` } // RestoreSessionStatus defines the observed state of RestoreSession type RestoreSessionStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + SessionState `json:"state,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + Targets []TargetStatus `json:"targets,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 4f7eaf8..a4b093e 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -216,6 +216,23 @@ func (in *BackupSessionList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSessionRef) DeepCopyInto(out *BackupSessionRef) { + *out = *in + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSessionRef. +func (in *BackupSessionRef) DeepCopy() *BackupSessionRef { + if in == nil { + return nil + } + out := new(BackupSessionRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSessionSpec) DeepCopyInto(out *BackupSessionSpec) { *out = *in @@ -458,8 +475,8 @@ func (in *RestoreSession) DeepCopyInto(out *RestoreSession) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSession. @@ -515,6 +532,7 @@ func (in *RestoreSessionList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionSpec) DeepCopyInto(out *RestoreSessionSpec) { *out = *in + in.BackupSessionRef.DeepCopyInto(&out.BackupSessionRef) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionSpec. @@ -530,6 +548,17 @@ func (in *RestoreSessionSpec) DeepCopy() *RestoreSessionSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSessionStatus) DeepCopyInto(out *RestoreSessionStatus) { *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSessionStatus. diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index a6828ad..c9dcb96 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -256,7 +256,7 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac sidecar := corev1.Container{ Name: formolv1alpha1.SIDECARCONTAINER_NAME, Image: backupConf.Spec.Image, - Args: []string{"backupsession", "server"}, + Args: []string{"server"}, Env: []corev1.EnvVar{ corev1.EnvVar{ Name: formolv1alpha1.TARGET_NAME, diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index e86b662..6bd3210 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -20,10 +20,8 @@ import ( "context" "time" - "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -39,10 +37,7 @@ const ( // BackupSessionReconciler reconciles a BackupSession object type BackupSessionReconciler struct { - client.Client - Scheme *runtime.Scheme - Log logr.Logger - context.Context + Session } //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete @@ -100,22 +95,23 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques RequeueAfter: 30 * time.Second, }, nil } - newSessionState = r.initBackup(&backupSession, backupConf) + backupSession.Status.Targets = r.initSession(backupConf) + newSessionState = formolv1alpha1.Initializing case formolv1alpha1.Initializing: // Wait for all the Targets to be in the Initialized state then move them to Running and move to Running myself. // if one of the Target fails to initialize, move it back to New state and decrement Try. // if try reaches 0, move all the Targets to Finalize and move myself to Failure. - newSessionState = r.checkInitialized(&backupSession, backupConf) + newSessionState = r.checkInitialized(backupSession.Status.Targets, backupConf) case formolv1alpha1.Running: // Wait for all the target to be in Waiting state then move them to the Finalize state. Move myself to Finalize. // if one of the Target fails the backup, move it back to Running state and decrement Try. // if try reaches 0, move all the Targets to Finalize and move myself to Failure. - newSessionState = r.checkWaiting(&backupSession, backupConf) + newSessionState = r.checkWaiting(backupSession.Status.Targets, backupConf) case formolv1alpha1.Finalize: // Check the TargetStatus of all the Targets. If they are all Success then move myself to Success. // if one of the Target fails to Finalize, move it back to Finalize state and decrement Try. // if try reaches 0, move myself to Success because the backup was a Success even if the Finalize failed. - if newSessionState = r.checkSuccess(&backupSession, backupConf); newSessionState == formolv1alpha1.Failure { + if newSessionState = r.checkSuccess(backupSession.Status.Targets, backupConf); newSessionState == formolv1alpha1.Failure { r.Log.V(0).Info("One of the target did not manage to Finalize but the backup is still a Success") newSessionState = formolv1alpha1.Success } diff --git a/controllers/backupsession_controller_helpers.go b/controllers/backupsession_controller_helpers.go deleted file mode 100644 index 0ef20db..0000000 --- a/controllers/backupsession_controller_helpers.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "sigs.k8s.io/controller-runtime/pkg/client" - "time" -) - -func (r *BackupSessionReconciler) isBackupOngoing(backupConf formolv1alpha1.BackupConfiguration) bool { - backupSessionList := &formolv1alpha1.BackupSessionList{} - if err := r.List(r.Context, backupSessionList, - client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{ - Selector: fields.SelectorFromSet(fields.Set{ - sessionState: "Running", - }), - }); err != nil { - r.Log.Error(err, "unable to get backupsessionlist") - return true - } - return len(backupSessionList.Items) > 0 -} - -func (r *BackupSessionReconciler) initBackup(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { - for _, target := range backupConf.Spec.Targets { - r.Log.V(0).Info("Creating new target", "target", target.TargetName) - backupSession.Status.Targets = append(backupSession.Status.Targets, formolv1alpha1.TargetStatus{ - BackupType: target.BackupType, - TargetName: target.TargetName, - TargetKind: target.TargetKind, - SessionState: "", - StartTime: &metav1.Time{Time: time.Now()}, - Try: 1, - }) - } - return formolv1alpha1.Initializing -} - -func (r *BackupSessionReconciler) checkSessionState( - backupSession *formolv1alpha1.BackupSession, - backupConf formolv1alpha1.BackupConfiguration, - currentState formolv1alpha1.SessionState, - waitState formolv1alpha1.SessionState, - nextState formolv1alpha1.SessionState) formolv1alpha1.SessionState { - for i, targetStatus := range backupSession.Status.Targets { - r.Log.V(0).Info("Target status", "target", targetStatus.TargetName, "session state", targetStatus.SessionState) - switch targetStatus.SessionState { - case currentState: - r.Log.V(0).Info("Move target to waitState", "target", targetStatus.TargetName, "waitState", waitState) - backupSession.Status.Targets[i].SessionState = waitState - return waitState - case formolv1alpha1.Failure: - if targetStatus.Try < backupConf.Spec.Targets[i].Retry { - r.Log.V(0).Info("Target failed. Try one more time", "target", targetStatus.TargetName, "waitState", waitState) - backupSession.Status.Targets[i].SessionState = waitState - backupSession.Status.Targets[i].Try++ - backupSession.Status.Targets[i].StartTime = &metav1.Time{Time: time.Now()} - return waitState - } else { - r.Log.V(0).Info("Target failed for the last time", "target", targetStatus.TargetName) - return formolv1alpha1.Failure - } - case waitState: - // target is still busy with its current state. Wait until it is done. - r.Log.V(0).Info("Waiting for one target to finish", "waitState", waitState) - return "" - default: - if i == len(backupSession.Status.Targets)-1 { - r.Log.V(0).Info("Moving to next state", "nextState", nextState) - return nextState - } - } - } - return "" -} - -func (r *BackupSessionReconciler) checkInitialized(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { - return r.checkSessionState(backupSession, backupConf, "", formolv1alpha1.Initializing, formolv1alpha1.Running) -} - -func (r *BackupSessionReconciler) checkWaiting(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { - return r.checkSessionState(backupSession, backupConf, formolv1alpha1.Initialized, formolv1alpha1.Running, formolv1alpha1.Finalize) -} - -func (r *BackupSessionReconciler) checkSuccess(backupSession *formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { - return r.checkSessionState(backupSession, backupConf, formolv1alpha1.Waiting, formolv1alpha1.Finalize, formolv1alpha1.Success) -} diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index b9d8da1..fdf9b0b 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -18,8 +18,10 @@ package controllers import ( "context" + "time" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -29,28 +31,53 @@ import ( // RestoreSessionReconciler reconciles a RestoreSession object type RestoreSessionReconciler struct { - client.Client - Scheme *runtime.Scheme + Session } //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/status,verbs=get;update;patch //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=restoresessions/finalizers,verbs=update -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the RestoreSession object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.1/pkg/reconcile func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - - // TODO(user): your logic here + r.Log = log.FromContext(ctx) + r.Context = ctx + restoreSession := formolv1alpha1.RestoreSession{} + err := r.Get(r.Context, req.NamespacedName, &restoreSession) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + backupSession := formolv1alpha1.BackupSession{ + Spec: restoreSession.Spec.BackupSessionRef.Spec, + Status: restoreSession.Spec.BackupSessionRef.Status, + } + backupConf := formolv1alpha1.BackupConfiguration{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupSession.Spec.Ref.Namespace, + Name: backupSession.Spec.Ref.Name, + }, &restoreSession); err != nil { + r.Log.Error(err, "unable to get BackupConfiguration") + return ctrl.Result{}, err + } + var newSessionState formolv1alpha1.SessionState + switch restoreSession.Status.SessionState { + case formolv1alpha1.New: + newSessionState = r.initRestore(&restoreSession, backupConf) + case "": + newSessionState = formolv1alpha1.New + restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()} + } + if newSessionState != "" { + r.Log.V(0).Info("Restore session needs a status update", "newSessionState", newSessionState, "RestoreSession", restoreSession) + restoreSession.Status.SessionState = newSessionState + if err := r.Status().Update(r.Context, &restoreSession); err != nil { + r.Log.Error(err, "unable to update RestoreSession.Status") + return ctrl.Result{}, err + } + } return ctrl.Result{}, nil } diff --git a/controllers/restoresession_controller_helper.go b/controllers/restoresession_controller_helper.go new file mode 100644 index 0000000..7013fb7 --- /dev/null +++ b/controllers/restoresession_controller_helper.go @@ -0,0 +1,9 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" +) + +func (r *RestoreSessionReconciler) initRestore(restoreSession *formolv1alpha1.RestoreSession, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return formolv1alpha1.Running +} diff --git a/controllers/session.go b/controllers/session.go new file mode 100644 index 0000000..afb5cbc --- /dev/null +++ b/controllers/session.go @@ -0,0 +1,100 @@ +package controllers + +import ( + "context" + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "time" +) + +type Session struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + context.Context + Namespace string +} + +func (s Session) isBackupOngoing(backupConf formolv1alpha1.BackupConfiguration) bool { + backupSessionList := &formolv1alpha1.BackupSessionList{} + if err := s.List(s.Context, backupSessionList, + client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{ + Selector: fields.SelectorFromSet(fields.Set{ + sessionState: "Running", + }), + }); err != nil { + s.Log.Error(err, "unable to get backupsessionlist") + return true + } + return len(backupSessionList.Items) > 0 +} + +func (s Session) initSession(backupConf formolv1alpha1.BackupConfiguration) []formolv1alpha1.TargetStatus { + tss := []formolv1alpha1.TargetStatus{} + for _, target := range backupConf.Spec.Targets { + s.Log.V(0).Info("Creating new target", "target", target.TargetName) + tss = append(tss, formolv1alpha1.TargetStatus{ + BackupType: target.BackupType, + TargetName: target.TargetName, + TargetKind: target.TargetKind, + SessionState: "", + StartTime: &metav1.Time{Time: time.Now()}, + Try: 1, + }) + } + return tss +} + +func (s Session) checkSessionState( + tss []formolv1alpha1.TargetStatus, + backupConf formolv1alpha1.BackupConfiguration, + currentState formolv1alpha1.SessionState, + waitState formolv1alpha1.SessionState, + nextState formolv1alpha1.SessionState) formolv1alpha1.SessionState { + for i, targetStatus := range tss { + s.Log.V(0).Info("Target status", "target", targetStatus.TargetName, "session state", targetStatus.SessionState) + switch targetStatus.SessionState { + case currentState: + s.Log.V(0).Info("Move target to waitState", "target", targetStatus.TargetName, "waitState", waitState) + tss[i].SessionState = waitState + return waitState + case formolv1alpha1.Failure: + if targetStatus.Try < backupConf.Spec.Targets[i].Retry { + s.Log.V(0).Info("Target failed. Try one more time", "target", targetStatus.TargetName, "waitState", waitState) + tss[i].SessionState = waitState + tss[i].Try++ + tss[i].StartTime = &metav1.Time{Time: time.Now()} + return waitState + } else { + s.Log.V(0).Info("Target failed for the last time", "target", targetStatus.TargetName) + return formolv1alpha1.Failure + } + case waitState: + // target is still busy with its current state. Wait until it is done. + s.Log.V(0).Info("Waiting for one target to finish", "waitState", waitState) + return "" + default: + if i == len(tss)-1 { + s.Log.V(0).Info("Moving to next state", "nextState", nextState) + return nextState + } + } + } + return "" +} + +func (s Session) checkInitialized(tss []formolv1alpha1.TargetStatus, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return s.checkSessionState(tss, backupConf, "", formolv1alpha1.Initializing, formolv1alpha1.Running) +} + +func (s Session) checkWaiting(tss []formolv1alpha1.TargetStatus, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return s.checkSessionState(tss, backupConf, formolv1alpha1.Initialized, formolv1alpha1.Running, formolv1alpha1.Finalize) +} + +func (s Session) checkSuccess(tss []formolv1alpha1.TargetStatus, backupConf formolv1alpha1.BackupConfiguration) formolv1alpha1.SessionState { + return s.checkSessionState(tss, backupConf, formolv1alpha1.Waiting, formolv1alpha1.Finalize, formolv1alpha1.Success) +} diff --git a/main.go b/main.go index 0ae8782..6d7c5f7 100644 --- a/main.go +++ b/main.go @@ -97,15 +97,19 @@ func main() { os.Exit(1) } if err = (&controllers.BackupSessionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Session: controllers.Session{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "BackupSession") os.Exit(1) } if err = (&controllers.RestoreSessionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Session: controllers.Session{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RestoreSession") os.Exit(1) From 7e007bfd44cc0041a74760c82b752aa2a93242fb Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Tue, 21 Mar 2023 17:57:14 +0100 Subject: [PATCH 39/69] Prepared RestoreSession for OnlineKind with initContainer --- api/v1alpha1/backupconfiguration_types.go | 19 +++++++ .../backupconfiguration_controller_helpers.go | 51 +++++-------------- controllers/restoresession_controller.go | 28 +++++++++- 3 files changed, 59 insertions(+), 39 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 2ce1c14..c315ef2 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -17,7 +17,10 @@ limitations under the License. package v1alpha1 import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) // +kubebuilder:validation:Enum=Deployment;StatefulSet;Pod @@ -38,6 +41,22 @@ const ( JobKind BackupType = "Job" ) +func GetTargetObjects(kind TargetKind) (targetObject client.Object, targetPodSpec *corev1.PodSpec) { + switch kind { + case Deployment: + deployment := appsv1.Deployment{} + targetObject = &deployment + targetPodSpec = &deployment.Spec.Template.Spec + + case StatefulSet: + statefulSet := appsv1.StatefulSet{} + targetObject = &statefulSet + targetPodSpec = &statefulSet.Spec.Template.Spec + + } + return +} + const ( BACKUP_PREFIX_PATH = `backup` FORMOL_SHARED_VOLUME = `formol-shared` diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index c9dcb96..97dc74a 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -18,7 +18,6 @@ package controllers import ( "fmt" - appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -130,36 +129,6 @@ func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.Bac } } -func (r *BackupConfigurationReconciler) getTargetObjects(kind formolv1alpha1.TargetKind, namespace string, name string) (targetObject client.Object, targetPodSpec *corev1.PodSpec, err error) { - switch kind { - case formolv1alpha1.Deployment: - deployment := appsv1.Deployment{} - if err = r.Get(r.Context, client.ObjectKey{ - Namespace: namespace, - Name: name, - }, &deployment); err != nil { - r.Log.Error(err, "cannot get deployment", "Deployment", name) - return - } - targetObject = &deployment - targetPodSpec = &deployment.Spec.Template.Spec - - case formolv1alpha1.StatefulSet: - statefulSet := appsv1.StatefulSet{} - if err = r.Get(r.Context, client.ObjectKey{ - Namespace: namespace, - Name: name, - }, &statefulSet); err != nil { - r.Log.Error(err, "cannot get StatefulSet", "StatefulSet", name) - return - } - targetObject = &statefulSet - targetPodSpec = &statefulSet.Spec.Template.Spec - - } - return -} - func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1.BackupConfiguration) error { removeTags := func(podSpec *corev1.PodSpec, target formolv1alpha1.Target) { for i, container := range podSpec.Containers { @@ -190,9 +159,12 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. } r.Log.V(1).Info("Got Repository", "repo", repo) for _, target := range backupConf.Spec.Targets { - targetObject, targetPodSpec, err := r.getTargetObjects(target.TargetKind, backupConf.Namespace, target.TargetName) - if err != nil { - r.Log.Error(err, "unable to get target objects") + targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, targetObject); err != nil { + r.Log.Error(err, "cannot get target", "target", target.TargetName) return err } restoreContainers := []corev1.Container{} @@ -225,7 +197,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. } targetPodSpec.Volumes = restoreVolumes removeTags(targetPodSpec, target) - if err = r.Update(r.Context, targetObject); err != nil { + if err := r.Update(r.Context, targetObject); err != nil { r.Log.Error(err, "unable to remove sidecar", "targetObject", targetObject) return err } @@ -275,9 +247,12 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac Privileged: func() *bool { b := true; return &b }(), }, } - targetObject, targetPodSpec, err := r.getTargetObjects(target.TargetKind, backupConf.Namespace, target.TargetName) - if err != nil { - r.Log.Error(err, "unable to get target objects") + targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: backupConf.Namespace, + Name: target.TargetName, + }, targetObject); err != nil { + r.Log.Error(err, "cannot get target", "target", target.TargetName) return err } if !hasSidecar(targetPodSpec) { diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index fdf9b0b..ea2759a 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -65,7 +65,33 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque var newSessionState formolv1alpha1.SessionState switch restoreSession.Status.SessionState { case formolv1alpha1.New: - newSessionState = r.initRestore(&restoreSession, backupConf) + // Go through the Targets and create the corresponding TargetStatus. Move to Initializing. + if r.isBackupOngoing(backupConf) { + r.Log.V(0).Info("there is an ongoing backup. Let's reschedule this operation") + return ctrl.Result{ + RequeueAfter: 30 * time.Second, + }, nil + } + restoreSession.Status.Targets = r.initSession(backupConf) + newSessionState = formolv1alpha1.Initializing + case formolv1alpha1.Initializing: + // Wait for all the Targets to be in the Initialized state then move them to Running and move to Running myself. + // if one of the Target fails to initialize, move it back to New state and decrement Try. + // if try reaches 0, move all the Targets to Finalize and move myself to Failure. + newSessionState = r.checkInitialized(restoreSession.Status.Targets, backupConf) + case formolv1alpha1.Running: + // Wait for all the target to be in Waiting state then move them to the Finalize state. Move myself to Finalize. + // if one of the Target fails the backup, move it back to Running state and decrement Try. + // if try reaches 0, move all the Targets to Finalize and move myself to Failure. + newSessionState = r.checkWaiting(restoreSession.Status.Targets, backupConf) + case formolv1alpha1.Finalize: + // Check the TargetStatus of all the Targets. If they are all Success then move myself to Success. + // if one of the Target fails to Finalize, move it back to Finalize state and decrement Try. + // if try reaches 0, move myself to Success because the backup was a Success even if the Finalize failed. + if newSessionState = r.checkSuccess(restoreSession.Status.Targets, backupConf); newSessionState == formolv1alpha1.Failure { + r.Log.V(0).Info("One of the target did not manage to Finalize but the backup is still a Success") + newSessionState = formolv1alpha1.Success + } case "": newSessionState = formolv1alpha1.New restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()} From b2d80d66ae6bca9e1bc5d04737998806296f8a93 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 23 Mar 2023 22:19:13 +0100 Subject: [PATCH 40/69] Restore OnlineKind --- api/v1alpha1/backupsession_types.go | 9 ++++--- api/v1alpha1/common.go | 1 + api/v1alpha1/restoresession_types.go | 2 +- .../backupconfiguration_controller_helpers.go | 5 ++++ controllers/restoresession_controller.go | 2 +- test/03-restoresession.yaml | 27 ++++++++++--------- 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 03f69cd..03d7a1d 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -40,10 +40,11 @@ type TargetStatus struct { TargetName string `json:"targetName"` TargetKind `json:"targetKind"` SessionState `json:"state"` - SnapshotId string `json:"snapshotId"` - StartTime *metav1.Time `json:"startTime"` - Duration *metav1.Duration `json:"duration,omitempty"` - Try int `json:"try"` + // +optional + SnapshotId string `json:"snapshotId,omitempty"` + StartTime *metav1.Time `json:"startTime"` + Duration *metav1.Duration `json:"duration,omitempty"` + Try int `json:"try"` } // BackupSessionSpec defines the desired state of BackupSession diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 3491639..ea61a01 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -1,6 +1,7 @@ package v1alpha1 const ( + RESTORECONTAINER_NAME string = "formol-restore" // the name of the sidecar container SIDECARCONTAINER_NAME string = "formol" // the name of the container we backup when there are more than 1 container in the pod diff --git a/api/v1alpha1/restoresession_types.go b/api/v1alpha1/restoresession_types.go index 289dacb..8011ddb 100644 --- a/api/v1alpha1/restoresession_types.go +++ b/api/v1alpha1/restoresession_types.go @@ -27,7 +27,7 @@ type BackupSessionRef struct { // RestoreSessionSpec defines the desired state of RestoreSession type RestoreSessionSpec struct { - BackupSessionRef `json:"backupSessionRef"` + BackupSessionRef `json:"backupSession"` } // RestoreSessionStatus defines the observed state of RestoreSession diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 97dc74a..eeba15e 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -350,6 +350,11 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou Name: FORMOL_SIDECAR_ROLE, }, Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "update"}, + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"formol.desmojim.fr"}, diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index ea2759a..edbcf06 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -58,7 +58,7 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque if err := r.Get(r.Context, client.ObjectKey{ Namespace: backupSession.Spec.Ref.Namespace, Name: backupSession.Spec.Ref.Name, - }, &restoreSession); err != nil { + }, &backupConf); err != nil { r.Log.Error(err, "unable to get BackupConfiguration") return ctrl.Result{}, err } diff --git a/test/03-restoresession.yaml b/test/03-restoresession.yaml index 9cb2348..c5a9e78 100644 --- a/test/03-restoresession.yaml +++ b/test/03-restoresession.yaml @@ -10,21 +10,24 @@ spec: name: backup-demo namespace: demo status: - keep: monthly - startTime: "2021-05-01T22:15:28Z" + keep: "" + startTime: "2023-03-20T20:47:08Z" state: Success target: - - duration: 17.952754232s - kind: Sidecar - name: nginx-deployment - snapshotId: f411315c - startTime: "2021-05-01T22:15:34Z" + - backupType: Online + duration: 3.189468146s + snapshotId: 4730eaad + startTime: "2023-03-20T20:47:08Z" state: Success + targetKind: Deployment + targetName: apache-deployment try: 1 - - duration: 25.46747271s - kind: Job - name: backup-pg - snapshotId: ca673e5a - startTime: "2021-05-01T22:15:55Z" + - backupType: Job + duration: 7.509060051s + snapshotId: 4ddc5da1 + startTime: "2023-03-20T20:47:08Z" state: Success + targetKind: StatefulSet + targetName: postgres-demo try: 1 + From 9526cf404bdf8ef2826c32368f84acf52f4eaddd Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Fri, 24 Mar 2023 11:31:16 +0100 Subject: [PATCH 41/69] code the final steps of the restore session --- controllers/restoresession_controller.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/controllers/restoresession_controller.go b/controllers/restoresession_controller.go index edbcf06..b4abb5f 100644 --- a/controllers/restoresession_controller.go +++ b/controllers/restoresession_controller.go @@ -92,6 +92,10 @@ func (r *RestoreSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reque r.Log.V(0).Info("One of the target did not manage to Finalize but the backup is still a Success") newSessionState = formolv1alpha1.Success } + case formolv1alpha1.Success: + r.Log.V(0).Info("The restore was a success") + case formolv1alpha1.Failure: + r.Log.V(0).Info("The restore was a failure") case "": newSessionState = formolv1alpha1.New restoreSession.Status.StartTime = &metav1.Time{Time: time.Now()} From b7747b635d0253d39829d64ae96472da09c64297 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 24 Mar 2023 21:52:04 +0100 Subject: [PATCH 42/69] backup / restore of OnlineKind and JobKind work --- .../backupconfiguration_controller_helpers.go | 2 +- test/02-backupconf.yaml | 35 ++++++++----------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index eeba15e..76107cb 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -154,7 +154,7 @@ func (r *BackupConfigurationReconciler) DeleteSidecar(backupConf formolv1alpha1. Namespace: backupConf.Namespace, Name: backupConf.Spec.Repository, }, &repo); err != nil { - r.Log.Error(err, "unable to get Repo") + r.Log.Error(err, "unable to get Repo", "repo", backupConf.Spec.Repository) return err } r.Log.V(1).Info("Got Repository", "repo", repo) diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 1e37d74..da007bc 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -24,25 +24,6 @@ spec: --- apiVersion: formol.desmojim.fr/v1alpha1 kind: Function -metadata: - name: restore-pg - namespace: demo -spec: - name: restore-pg - image: desmo999r/formolcli:latest - args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] - env: - - name: PGHOST - value: postgres - - name: PGDATABASE - value: demopostgres - - name: PGUSER - value: demopostgres - - name: PGPASSWD - value: password123! ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function metadata: name: with-envfrom namespace: demo @@ -80,6 +61,20 @@ spec: --- apiVersion: formol.desmojim.fr/v1alpha1 kind: Function +metadata: + name: restore-pg + namespace: demo +spec: + name: restore-pg + image: desmo999r/formolcli:latest + command: ["psql"] + args: ["--username", $(PGUSER), "--quiet", "--file", "/formol-shared/backup-pg.sql", "postgres"] + env: + - name: PGUSER + value: demopostgres +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function metadata: name: maintenance-off namespace: demo @@ -132,7 +127,7 @@ spec: containers: - name: postgres job: - - name: backup-pg + - name: pg # - kind: Job # name: backup-pg # steps: From e73ef7c3f24ee612421b62963f443ff1a1e790dc Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 25 Mar 2023 18:23:39 +0100 Subject: [PATCH 43/69] reworked Steps --- api/v1alpha1/backupconfiguration_types.go | 9 +++++++-- api/v1alpha1/zz_generated.deepcopy.go | 17 ++++++++++++++++- test/02-backupconf.yaml | 8 ++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index c315ef2..7d8ffe1 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -63,9 +63,14 @@ const ( ) type Step struct { - Name string `json:"name"` // +optional - Finalize *bool `json:"finalize"` + Finalize *string `json:"finalize,omitempty"` + // +optional + Initialize *string `json:"initialize,omitempty"` + // +optional + Backup *string `json:"backup,omitempty"` + // +optional + Restore *string `json:"restore,omitempty"` } type TargetContainer struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a4b093e..b93466a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -591,7 +591,22 @@ func (in *Step) DeepCopyInto(out *Step) { *out = *in if in.Finalize != nil { in, out := &in.Finalize, &out.Finalize - *out = new(bool) + *out = new(string) + **out = **in + } + if in.Initialize != nil { + in, out := &in.Initialize, &out.Initialize + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(string) **out = **in } } diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index da007bc..8d5150c 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -116,9 +116,8 @@ spec: containers: - name: apache steps: - - name: maintenance-on - - name: maintenance-off - finalize: true + - initialize: maintenance-on + - finalize: maintenance-off paths: - /data - backupType: Job @@ -127,7 +126,8 @@ spec: containers: - name: postgres job: - - name: pg + - backup: backup-pg + restore: restore-pg # - kind: Job # name: backup-pg # steps: From f890962221bf05fb54c9d46d839957a59d644cc8 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sat, 25 Mar 2023 21:22:28 +0100 Subject: [PATCH 44/69] backupsession housekeeping. delete the old backup and the corresponding restic snapshots --- controllers/backupsession_controller.go | 22 ++- .../backupsession_controller_helper.go | 146 ++++++++++++++++++ test/02-backupconf.yaml | 2 +- 3 files changed, 161 insertions(+), 9 deletions(-) create mode 100644 controllers/backupsession_controller_helper.go diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 6bd3210..f2460cc 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -57,8 +57,21 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques } return ctrl.Result{}, err } + // we might need the BackupConfiguration is the BackupSession + // is being deleted + backupConf := formolv1alpha1.BackupConfiguration{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: backupSession.Spec.Ref.Namespace, + Name: backupSession.Spec.Ref.Name, + }, &backupConf); err != nil { + r.Log.Error(err, "unable to get BackupConfiguration") + return ctrl.Result{}, err + } if !backupSession.ObjectMeta.DeletionTimestamp.IsZero() { r.Log.V(0).Info("BackupSession is being deleted") + if err := r.deleteSnapshots(backupSession, backupConf); err != nil { + r.Log.Error(err, "unable to delete the BackupSession snapshots") + } if controllerutil.ContainsFinalizer(&backupSession, finalizerName) { controllerutil.RemoveFinalizer(&backupSession, finalizerName) err := r.Update(ctx, &backupSession) @@ -76,14 +89,6 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques } return ctrl.Result{}, err } - backupConf := formolv1alpha1.BackupConfiguration{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backupSession.Spec.Ref.Namespace, - Name: backupSession.Spec.Ref.Name, - }, &backupConf); err != nil { - r.Log.Error(err, "unable to get BackupConfiguration") - return ctrl.Result{}, err - } var newSessionState formolv1alpha1.SessionState switch backupSession.Status.SessionState { @@ -116,6 +121,7 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques newSessionState = formolv1alpha1.Success } case formolv1alpha1.Success: + r.cleanupSessions(backupConf) r.Log.V(0).Info("Backup was a success") case formolv1alpha1.Failure: diff --git a/controllers/backupsession_controller_helper.go b/controllers/backupsession_controller_helper.go new file mode 100644 index 0000000..052743c --- /dev/null +++ b/controllers/backupsession_controller_helper.go @@ -0,0 +1,146 @@ +package controllers + +import ( + formolv1alpha1 "github.com/desmo999r/formol/api/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "sigs.k8s.io/controller-runtime/pkg/client" + "sort" + "strings" + "time" +) + +const ( + JOBTTL int32 = 7200 +) + +func (r *BackupSessionReconciler) cleanupSessions(backupConf formolv1alpha1.BackupConfiguration) { + backupSessionList := formolv1alpha1.BackupSessionList{} + if err := r.List(r.Context, &backupSessionList, client.InNamespace(backupConf.Namespace), client.MatchingFieldsSelector{Selector: fields.SelectorFromSet(fields.Set{sessionState: string(formolv1alpha1.Success)})}); err != nil { + r.Log.Error(err, "unable to get backupsessionlist") + return + } + if len(backupSessionList.Items) < 2 { + // Not enough backupSession to proceed + r.Log.V(1).Info("Not enough successful backup jobs") + return + } + + sort.Slice(backupSessionList.Items, func(i, j int) bool { + return backupSessionList.Items[i].Status.StartTime.Time.Unix() > backupSessionList.Items[j].Status.StartTime.Time.Unix() + }) + + type KeepBackup struct { + Counter int32 + Last time.Time + } + + var lastBackups, dailyBackups, weeklyBackups, monthlyBackups, yearlyBackups KeepBackup + lastBackups.Counter = backupConf.Spec.Keep.Last + dailyBackups.Counter = backupConf.Spec.Keep.Daily + weeklyBackups.Counter = backupConf.Spec.Keep.Weekly + monthlyBackups.Counter = backupConf.Spec.Keep.Monthly + yearlyBackups.Counter = backupConf.Spec.Keep.Yearly + for _, session := range backupSessionList.Items { + if session.Spec.Ref.Name != backupConf.Name { + continue + } + deleteSession := true + keep := []string{} + if lastBackups.Counter > 0 { + r.Log.V(1).Info("Keep backup", "last", session.Status.StartTime) + lastBackups.Counter-- + keep = append(keep, "last") + deleteSession = false + } + if dailyBackups.Counter > 0 { + if session.Status.StartTime.Time.YearDay() != dailyBackups.Last.YearDay() { + r.Log.V(1).Info("Keep backup", "daily", session.Status.StartTime) + dailyBackups.Counter-- + dailyBackups.Last = session.Status.StartTime.Time + keep = append(keep, "daily") + deleteSession = false + } + } + if weeklyBackups.Counter > 0 { + if session.Status.StartTime.Time.Weekday().String() == "Sunday" && session.Status.StartTime.Time.YearDay() != weeklyBackups.Last.YearDay() { + r.Log.V(1).Info("Keep backup", "weekly", session.Status.StartTime) + weeklyBackups.Counter-- + weeklyBackups.Last = session.Status.StartTime.Time + keep = append(keep, "weekly") + deleteSession = false + } + } + if monthlyBackups.Counter > 0 { + if session.Status.StartTime.Time.Day() == 1 && session.Status.StartTime.Time.Month() != monthlyBackups.Last.Month() { + r.Log.V(1).Info("Keep backup", "monthly", session.Status.StartTime) + monthlyBackups.Counter-- + monthlyBackups.Last = session.Status.StartTime.Time + keep = append(keep, "monthly") + deleteSession = false + } + } + if yearlyBackups.Counter > 0 { + if session.Status.StartTime.Time.YearDay() == 1 && session.Status.StartTime.Time.Year() != yearlyBackups.Last.Year() { + r.Log.V(1).Info("Keep backup", "yearly", session.Status.StartTime) + yearlyBackups.Counter-- + yearlyBackups.Last = session.Status.StartTime.Time + keep = append(keep, "yearly") + deleteSession = false + } + } + if deleteSession { + r.Log.V(1).Info("Delete session", "delete", session.Status.StartTime) + if err := r.Delete(r.Context, &session); err != nil { + r.Log.Error(err, "unable to delete backupsession", "session", session.Name) + // we don't return anything, we keep going + } + } else { + session.Status.Keep = strings.Join(keep, ",") // + " " + time.Now().Format("2006 Jan 02 15:04:05 -0700 MST") + if err := r.Status().Update(r.Context, &session); err != nil { + r.Log.Error(err, "unable to update session status", "session", session) + } + } + } +} + +func (r *BackupSessionReconciler) deleteSnapshots(backupSession formolv1alpha1.BackupSession, backupConf formolv1alpha1.BackupConfiguration) error { + snapshots := []corev1.Container{} + for _, target := range backupSession.Status.Targets { + if target.SnapshotId != "" { + snapshots = append(snapshots, corev1.Container{ + Name: target.TargetName, + Image: backupConf.Spec.Image, + Args: []string{"snapshot", "delete", "--namespace", backupConf.Namespace, "--name", backupConf.Name, "--snapshot-id", target.SnapshotId}, + }) + } + } + if len(snapshots) > 0 { + job := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "delete-" + backupSession.Name + "-", + Namespace: backupSession.Namespace, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: func() *int32 { ttl := JOBTTL; return &ttl }(), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + // The snapshot deletions have to be sequential + // otherwise the repository will be locked by restic and it won't work. + InitContainers: snapshots[1:], + Containers: []corev1.Container{snapshots[0]}, + RestartPolicy: corev1.RestartPolicyOnFailure, + }, + }, + }, + } + r.Log.V(0).Info("creating a job to delete the BackupSession restic snapshots", "backupSession", backupSession) + if err := r.Create(r.Context, &job); err != nil { + r.Log.Error(err, "unable to create the job") + return err + } + } + return nil +} diff --git a/test/02-backupconf.yaml b/test/02-backupconf.yaml index 8d5150c..90965b1 100644 --- a/test/02-backupconf.yaml +++ b/test/02-backupconf.yaml @@ -104,7 +104,7 @@ spec: repository: repo-minio schedule: "15 * * * *" keep: - last: 5 + last: 2 daily: 2 weekly: 2 monthly: 6 From ab775cab585e773e0381c57f25894eb986f5be88 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Fri, 31 Mar 2023 16:28:01 +0200 Subject: [PATCH 45/69] rearranged the tests --- test/00-setup.yaml | 72 ------------------- .../backupconf.yaml} | 0 .../deployment.yaml} | 0 test/{ => common}/minio.yaml | 0 .../restoresession.yaml} | 16 ++--- test/common/setup.yaml | 32 +++++++++ test/minikube/003-minio.yaml | 1 + test/minikube/005-setup.yaml | 1 + test/minikube/010-pvc.yaml | 14 ++++ test/minikube/020-deployment.yaml | 1 + test/minikube/030-backupconf.yaml | 1 + test/minikube/040-restoresession.yaml | 1 + 12 files changed, 59 insertions(+), 80 deletions(-) delete mode 100644 test/00-setup.yaml rename test/{02-backupconf.yaml => common/backupconf.yaml} (100%) rename test/{01-deployment.yaml => common/deployment.yaml} (100%) rename test/{ => common}/minio.yaml (100%) rename test/{03-restoresession.yaml => common/restoresession.yaml} (66%) create mode 100644 test/common/setup.yaml create mode 120000 test/minikube/003-minio.yaml create mode 120000 test/minikube/005-setup.yaml create mode 100644 test/minikube/010-pvc.yaml create mode 120000 test/minikube/020-deployment.yaml create mode 120000 test/minikube/030-backupconf.yaml create mode 120000 test/minikube/040-restoresession.yaml diff --git a/test/00-setup.yaml b/test/00-setup.yaml deleted file mode 100644 index 7eaa8f8..0000000 --- a/test/00-setup.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: demo ---- -apiVersion: v1 -kind: Secret -metadata: - name: regcred - namespace: demo -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 ---- -apiVersion: v1 -kind: Secret -metadata: - namespace: demo - name: demo-chap-secret -type: "kubernetes.io/iscsi-chap" -data: - discovery.sendtargets.auth.username: ZGVtbw== - discovery.sendtargets.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= - node.session.auth.username: ZGVtbw== - node.session.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= ---- -apiVersion: v1 -kind: Secret -metadata: - namespace: demo - name: with-envfrom-secret -data: - title: dmVyeXNlY3JldA== ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: demo-pv - namespace: demo -spec: - storageClassName: manual - capacity: - storage: 50Mi - accessModes: - - ReadWriteOnce - hostPath: - path: /tmp/demo - type: DirectoryOrCreate ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: demo-pvc - namespace: demo -spec: - storageClassName: manual - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Mi ---- -apiVersion: v1 -kind: Secret -metadata: - name: secret-minio - namespace: demo -data: - RESTIC_PASSWORD: bHIyOXhtOTU= - AWS_ACCESS_KEY_ID: SjV4V2NqQ2RzckxpZ2lEZA== - AWS_SECRET_ACCESS_KEY: OVdBMnN1djVtanRLRTdnMkRjNWl5WWtkbDNobGV5UU8= diff --git a/test/02-backupconf.yaml b/test/common/backupconf.yaml similarity index 100% rename from test/02-backupconf.yaml rename to test/common/backupconf.yaml diff --git a/test/01-deployment.yaml b/test/common/deployment.yaml similarity index 100% rename from test/01-deployment.yaml rename to test/common/deployment.yaml diff --git a/test/minio.yaml b/test/common/minio.yaml similarity index 100% rename from test/minio.yaml rename to test/common/minio.yaml diff --git a/test/03-restoresession.yaml b/test/common/restoresession.yaml similarity index 66% rename from test/03-restoresession.yaml rename to test/common/restoresession.yaml index c5a9e78..3041560 100644 --- a/test/03-restoresession.yaml +++ b/test/common/restoresession.yaml @@ -10,22 +10,22 @@ spec: name: backup-demo namespace: demo status: - keep: "" - startTime: "2023-03-20T20:47:08Z" + keep: last + startTime: "2023-03-31T14:21:27Z" state: Success target: - backupType: Online - duration: 3.189468146s - snapshotId: 4730eaad - startTime: "2023-03-20T20:47:08Z" + duration: 1.734652641s + snapshotId: 1d2baf88 + startTime: "2023-03-31T14:21:27Z" state: Success targetKind: Deployment targetName: apache-deployment try: 1 - backupType: Job - duration: 7.509060051s - snapshotId: 4ddc5da1 - startTime: "2023-03-20T20:47:08Z" + duration: 3.072021762s + snapshotId: 748622a4 + startTime: "2023-03-31T14:21:27Z" state: Success targetKind: StatefulSet targetName: postgres-demo diff --git a/test/common/setup.yaml b/test/common/setup.yaml new file mode 100644 index 0000000..7c0f52c --- /dev/null +++ b/test/common/setup.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: demo +--- +apiVersion: v1 +kind: Secret +metadata: + name: regcred + namespace: demo +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 +--- +apiVersion: v1 +kind: Secret +metadata: + namespace: demo + name: with-envfrom-secret +data: + title: dmVyeXNlY3JldA== +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-minio + namespace: demo +data: + RESTIC_PASSWORD: bHIyOXhtOTU= + AWS_ACCESS_KEY_ID: SjV4V2NqQ2RzckxpZ2lEZA== + AWS_SECRET_ACCESS_KEY: OVdBMnN1djVtanRLRTdnMkRjNWl5WWtkbDNobGV5UU8= diff --git a/test/minikube/003-minio.yaml b/test/minikube/003-minio.yaml new file mode 120000 index 0000000..52a9418 --- /dev/null +++ b/test/minikube/003-minio.yaml @@ -0,0 +1 @@ +../common/minio.yaml \ No newline at end of file diff --git a/test/minikube/005-setup.yaml b/test/minikube/005-setup.yaml new file mode 120000 index 0000000..2322cfc --- /dev/null +++ b/test/minikube/005-setup.yaml @@ -0,0 +1 @@ +../common/setup.yaml \ No newline at end of file diff --git a/test/minikube/010-pvc.yaml b/test/minikube/010-pvc.yaml new file mode 100644 index 0000000..3151967 --- /dev/null +++ b/test/minikube/010-pvc.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: demo-pvc + namespace: demo +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi + storageClassName: csi-hostpath-sc diff --git a/test/minikube/020-deployment.yaml b/test/minikube/020-deployment.yaml new file mode 120000 index 0000000..2eec0f2 --- /dev/null +++ b/test/minikube/020-deployment.yaml @@ -0,0 +1 @@ +../common/deployment.yaml \ No newline at end of file diff --git a/test/minikube/030-backupconf.yaml b/test/minikube/030-backupconf.yaml new file mode 120000 index 0000000..19a1daf --- /dev/null +++ b/test/minikube/030-backupconf.yaml @@ -0,0 +1 @@ +../common/backupconf.yaml \ No newline at end of file diff --git a/test/minikube/040-restoresession.yaml b/test/minikube/040-restoresession.yaml new file mode 120000 index 0000000..a66989b --- /dev/null +++ b/test/minikube/040-restoresession.yaml @@ -0,0 +1 @@ +common/restoresession.yaml \ No newline at end of file From 9aa127560516fd0fdf2bddd4b6aec760036b6ec8 Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Fri, 31 Mar 2023 16:29:14 +0200 Subject: [PATCH 46/69] removed backup files --- test/00-setup.yaml~ | 173 --------------------------------------- test/01-deployment.yaml~ | 92 --------------------- test/02-backupconf.yaml~ | 35 -------- 3 files changed, 300 deletions(-) delete mode 100644 test/00-setup.yaml~ delete mode 100644 test/01-deployment.yaml~ delete mode 100644 test/02-backupconf.yaml~ diff --git a/test/00-setup.yaml~ b/test/00-setup.yaml~ deleted file mode 100644 index b62e6b9..0000000 --- a/test/00-setup.yaml~ +++ /dev/null @@ -1,173 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: demo ---- -apiVersion: v1 -kind: Secret -metadata: - name: regcred - namespace: demo -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJkZXNtbzk5OXIiLCJwYXNzd29yZCI6IlU5QXNlVGF5cUY5UlJCd0l2Q1k0IiwiZW1haWwiOiJqZWFubWFyYy5qaW0uYW5kcmVAZ21haWwuY29tIiwiYXV0aCI6IlpHVnpiVzg1T1RseU9sVTVRWE5sVkdGNWNVWTVVbEpDZDBsMlExazAifX19 ---- -apiVersion: v1 -kind: Secret -metadata: - namespace: demo - name: demo-chap-secret -type: "kubernetes.io/iscsi-chap" -data: - discovery.sendtargets.auth.username: ZGVtbw== - discovery.sendtargets.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= - node.session.auth.username: ZGVtbw== - node.session.auth.password: VHJtK1lZaXZvMUNZSGszcGFGVWMrcTdCMmdJPQo= ---- -apiVersion: v1 -kind: Secret -metadata: - namespace: demo - name: with-envfrom-secret -data: - title: dmVyeXNlY3JldA== ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: demo-pv - namespace: demo -spec: - storageClassName: manual - capacity: - storage: 50Mi - accessModes: - - ReadWriteOnce - hostPath: - path: /tmp/demo - type: DirectoryOrCreate ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: demo-pvc - namespace: demo -spec: - storageClassName: manual - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Mi ---- -apiVersion: v1 -kind: Secret -metadata: - name: secret-minio - namespace: demo -data: - RESTIC_PASSWORD: bHIyOXhtOTU= - AWS_ACCESS_KEY_ID: OWFTSXZBSEVzWlNVMmkyTU9zVGxWSk1lL1NjPQ== - AWS_SECRET_ACCESS_KEY: WVN5ck9ncVllcjBWNFNLdlVOcmx2OGhjTllhZGZuN2xaNjBIaXRlL3djWT0= ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Repo -metadata: - name: repo-empty - namespace: demo -spec: - backend: - repositorySecrets: secret-minio ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Repo -metadata: - name: repo-minio - namespace: demo -spec: - backend: - s3: - server: raid5.desmojim.fr:9000 - bucket: testbucket2 - repositorySecrets: secret-minio ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: restore-pg - namespace: demo -spec: - name: restore-pg - image: desmo999r/formolcli:latest - args: ["postgres", "restore", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] - env: - - name: PGHOST - value: postgres - - name: PGDATABASE - value: demopostgres - - name: PGUSER - value: demopostgres - - name: PGPASSWD - value: password123! ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: with-envfrom - namespace: demo -spec: - name: with-envfrom - command: ["touch", $(title)] - envFrom: - - secretRef: - name: with-envfrom-secret ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: with-env - namespace: demo -spec: - name: with-env - command: ["touch", $(TESTFILE)] - env: - - name: TESTFILE - value: /data/testfile ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: backup-pg - namespace: demo -spec: - name: backup-pg - image: desmo999r/formolcli:latest - args: ["postgres", "backup", "--hostname", $(PGHOST), "--database", $(PGDATABASE), "--username", $(PGUSER), "--password", $(PGPASSWD), "--file", "/output/backup-pg.sql"] - env: - - name: PGHOST - value: postgres - - name: PGDATABASE - value: demopostgres - - name: PGUSER - value: demopostgres - - name: PGPASSWD - value: password123! ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: maintenance-off - namespace: demo -spec: - name: maintenance-off - command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: Function -metadata: - name: maintenance-on - namespace: demo -spec: - name: maintenance-on - command: ["/bin/bash", "-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] diff --git a/test/01-deployment.yaml~ b/test/01-deployment.yaml~ deleted file mode 100644 index f6e9cc3..0000000 --- a/test/01-deployment.yaml~ +++ /dev/null @@ -1,92 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment - namespace: demo - labels: - app: nginx -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - imagePullSecrets: - - name: regcred - containers: - - name: nginx - image: nginx:1.14.2 - ports: - - containerPort: 80 - volumeMounts: - - name: demo-data - mountPath: /data - volumes: - - name: demo-data - persistentVolumeClaim: - claimName: demo-pvc ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: postgres-config-demo - namespace: demo - labels: - app: postgres -data: - POSTGRES_DB: demopostgres - POSTGRES_USER: demopostgres - POSTGRES_PASSWORD: password123! ---- -apiVersion: v1 -kind: Service -metadata: - name: postgres - namespace: demo - labels: - app: postgres -spec: - ports: - - port: 5432 - name: postgres - clusterIP: None - selector: - app: postgres ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: postgres-demo - namespace: demo -spec: - serviceName: "postgres" - replicas: 1 - selector: - matchLabels: - app: postgres - template: - metadata: - labels: - app: postgres - spec: - containers: - - name: postgres - image: postgres:12 - envFrom: - - configMapRef: - name: postgres-config-demo - ports: - - containerPort: 5432 - name: postgredb - volumeMounts: - - name: postgredb - mountPath: /var/lib/postgresql/data - volumes: - - name: postgredb diff --git a/test/02-backupconf.yaml~ b/test/02-backupconf.yaml~ deleted file mode 100644 index d9d4d03..0000000 --- a/test/02-backupconf.yaml~ +++ /dev/null @@ -1,35 +0,0 @@ ---- -apiVersion: formol.desmojim.fr/v1alpha1 -kind: BackupConfiguration -metadata: - name: backup-demo - namespace: demo -spec: - suspend: true - image: desmo999r/formolcli:0.3.2 - repository: repo-empty - schedule: "15 * * * *" - keep: - last: 5 - daily: 2 - weekly: 2 - monthly: 6 - yearly: 3 - targets: - - backupType: Online - targetKind: Deployment - targetName: nginx-deployment - containers: - - name: nginx - steps: - - name: maintenance-on - - name: with-env - - name: with-envfrom - - name: maintenance-off - finalize: true - paths: - - /data -# - kind: Job -# name: backup-pg -# steps: -# - name: backup-pg From 6e0cec44059c82815895561b4d086feb725a4785 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 31 Mar 2023 20:27:21 +0200 Subject: [PATCH 47/69] ignore 040-restoresession.yaml --- .gitignore | 1 + test/minikube/040-restoresession.yaml | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 120000 test/minikube/040-restoresession.yaml diff --git a/.gitignore b/.gitignore index d181f1b..2a862de 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ config/crd/bases test/restic test/password testbin/* +test/minikube/040-restoresession.yaml diff --git a/test/minikube/040-restoresession.yaml b/test/minikube/040-restoresession.yaml deleted file mode 120000 index a66989b..0000000 --- a/test/minikube/040-restoresession.yaml +++ /dev/null @@ -1 +0,0 @@ -common/restoresession.yaml \ No newline at end of file From e54d8abdb9a952cad043a5d603f37309e341a79d Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 2 Apr 2023 22:04:05 +0200 Subject: [PATCH 48/69] backupconfig with snapshots --- test/minikube/030-backupconf-snapshot.yaml | 134 +++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 test/minikube/030-backupconf-snapshot.yaml diff --git a/test/minikube/030-backupconf-snapshot.yaml b/test/minikube/030-backupconf-snapshot.yaml new file mode 100644 index 0000000..45e4c64 --- /dev/null +++ b/test/minikube/030-backupconf-snapshot.yaml @@ -0,0 +1,134 @@ +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-local + namespace: demo +spec: + backend: + local: + emptyDir: + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Repo +metadata: + name: repo-minio + namespace: demo +spec: + backend: + s3: + server: minio-svc.minio:9000 + bucket: backups + repositorySecrets: secret-minio +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-envfrom + namespace: demo +spec: + name: with-envfrom + command: ["touch", $(title)] + envFrom: + - secretRef: + name: with-envfrom-secret +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: with-env + namespace: demo +spec: + name: with-env + command: ["touch", $(TESTFILE)] + env: + - name: TESTFILE + value: /data/testfile +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: backup-pg + namespace: demo +spec: + name: backup-pg + command: ["pg_dumpall"] + args: ["--username", $(PGUSER), "--clean", "--if-exists", "--inserts", "--file", "/formol-shared/backup-pg.sql"] + env: + - name: PGUSER + value: demopostgres +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: restore-pg + namespace: demo +spec: + name: restore-pg + image: desmo999r/formolcli:latest + command: ["psql"] + args: ["--username", $(PGUSER), "--quiet", "--file", "/formol-shared/backup-pg.sql", "postgres"] + env: + - name: PGUSER + value: demopostgres +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-off + namespace: demo +spec: + name: maintenance-off + command: ["/bin/sh"] + args: ["-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-off >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: Function +metadata: + name: maintenance-on + namespace: demo +spec: + name: maintenance-on + command: ["/bin/sh"] + args: ["-c", "echo $(date +%Y/%m/%d-%H:%M:%S) maintenance-on >> /data/logs.txt"] +--- +apiVersion: formol.desmojim.fr/v1alpha1 +kind: BackupConfiguration +metadata: + name: backup-demo + namespace: demo +spec: + suspend: true + image: desmo999r/formolcli:latest + repository: repo-minio + schedule: "15 * * * *" + keep: + last: 2 + daily: 2 + weekly: 2 + monthly: 6 + yearly: 3 + targets: + - backupType: Snapshot + targetKind: Deployment + targetName: apache-deployment + containers: + - name: apache + steps: + - initialize: maintenance-on + - finalize: maintenance-off + paths: + - /data + - backupType: Job + targetKind: StatefulSet + targetName: postgres-demo + containers: + - name: postgres + job: + - backup: backup-pg + restore: restore-pg +# - kind: Job +# name: backup-pg +# steps: +# - name: backup-pg From b67d300db2874a16dcec4bd3aea38cf41f49ecbe Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Sun, 2 Apr 2023 22:04:43 +0200 Subject: [PATCH 49/69] rearranged a bit --- .../backupconfiguration_controller_helpers.go | 137 ++++++++---------- 1 file changed, 64 insertions(+), 73 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 76107cb..93e1b13 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -265,16 +265,29 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac r.Log.Error(err, "unable to create RBAC for the sidecar container") return } - switch target.BackupType { - case formolv1alpha1.OnlineKind: - sidecarPaths, vms := addOnlineSidecarTags(targetPodSpec, target) - sidecar.Env = append(sidecar.Env, corev1.EnvVar{ - Name: formolv1alpha1.BACKUP_PATHS, - Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), - }) - sidecar.VolumeMounts = vms - case formolv1alpha1.JobKind: - sidecar.VolumeMounts = addJobSidecarTags(targetPodSpec, target) + for i, container := range targetPodSpec.Containers { + for _, targetContainer := range target.Containers { + if targetContainer.Name == container.Name { + // Found a target container. Tag it. + targetPodSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ + Name: formolv1alpha1.TARGETCONTAINER_TAG, + Value: container.Name, + }) + switch target.BackupType { + case formolv1alpha1.OnlineKind: + sidecarPaths, vms := addOnlineSidecarTags(container, targetContainer) + sidecar.Env = append(sidecar.Env, corev1.EnvVar{ + Name: formolv1alpha1.BACKUP_PATHS, + Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), + }) + sidecar.VolumeMounts = vms + case formolv1alpha1.JobKind: + sidecar.VolumeMounts = addJobSidecarTags(targetPodSpec, i, targetContainer) + case formolv1alpha1.SnapshotKind: + + } + } + } } if repo.Spec.Backend.Local != nil { sidecar.VolumeMounts = append(sidecar.VolumeMounts, corev1.VolumeMount{ @@ -414,76 +427,54 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou return nil } -func addJobSidecarTags(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (vms []corev1.VolumeMount) { - for i, container := range podSpec.Containers { - for _, targetContainer := range target.Containers { - if targetContainer.Name == container.Name { - // Found a target container. Tag it. - podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: container.Name, - }) - // Create a shared mount between the target and sidecar container - // the output of the Job will be saved in the shared volume - // and restic will then backup the content of the volume - var addSharedVol bool = true - for _, vol := range podSpec.Volumes { - if vol.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { - addSharedVol = false - } - } - if addSharedVol { - podSpec.Volumes = append(podSpec.Volumes, - corev1.Volume{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - }) - } - podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - MountPath: targetContainer.SharePath, - }) - vms = append(vms, corev1.VolumeMount{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - MountPath: targetContainer.SharePath, - }) - } +func addJobSidecarTags(podSpec *corev1.PodSpec, index int, targetContainer formolv1alpha1.TargetContainer) (vms []corev1.VolumeMount) { + // Create a shared mount between the target and sidecar container + // the output of the Job will be saved in the shared volume + // and restic will then backup the content of the volume + var addSharedVol bool = true + for _, vol := range podSpec.Volumes { + if vol.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { + addSharedVol = false } } + if addSharedVol { + podSpec.Volumes = append(podSpec.Volumes, + corev1.Volume{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + } + podSpec.Containers[index].VolumeMounts = append(podSpec.Containers[index].VolumeMounts, corev1.VolumeMount{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) + vms = append(vms, corev1.VolumeMount{ + Name: formolv1alpha1.FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) return } -func addOnlineSidecarTags(podSpec *corev1.PodSpec, target formolv1alpha1.Target) (sidecarPaths []string, vms []corev1.VolumeMount) { - for i, container := range podSpec.Containers { - for _, targetContainer := range target.Containers { - if targetContainer.Name == container.Name { - // Found a target container. Tag it. - podSpec.Containers[i].Env = append(container.Env, corev1.EnvVar{ - Name: formolv1alpha1.TARGETCONTAINER_TAG, - Value: container.Name, - }) - // targetContainer.Paths are the paths to backup - // We have to find what volumes are mounted under those paths - // and mount them under a path that exists in the sidecar container - for i, path := range targetContainer.Paths { - vm := corev1.VolumeMount{ReadOnly: true} - var longest int = 0 - var sidecarPath string - for _, volumeMount := range container.VolumeMounts { - // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { - if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { - longest = len(volumeMount.MountPath) - vm.Name = volumeMount.Name - vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) - vm.SubPath = volumeMount.SubPath - sidecarPath = filepath.Join(vm.MountPath, rel) - } - } - vms = append(vms, vm) - sidecarPaths = append(sidecarPaths, sidecarPath) - } +func addOnlineSidecarTags(container corev1.Container, targetContainer formolv1alpha1.TargetContainer) (sidecarPaths []string, vms []corev1.VolumeMount) { + // targetContainer.Paths are the paths to backup + // We have to find what volumes are mounted under those paths + // and mount them under a path that exists in the sidecar container + for i, path := range targetContainer.Paths { + vm := corev1.VolumeMount{ReadOnly: true} + var longest int = 0 + var sidecarPath string + for _, volumeMount := range container.VolumeMounts { + // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { + longest = len(volumeMount.MountPath) + vm.Name = volumeMount.Name + vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) + vm.SubPath = volumeMount.SubPath + sidecarPath = filepath.Join(vm.MountPath, rel) } } + vms = append(vms, vm) + sidecarPaths = append(sidecarPaths, sidecarPath) } return } From d8b685c1ab88f0edf34b17850e6359b1eb632d25 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 11 Apr 2023 10:47:33 +0200 Subject: [PATCH 50/69] First volume snapshot created --- api/v1alpha1/backupconfiguration_types.go | 2 + api/v1alpha1/common.go | 84 +++++++++ .../backupconfiguration_controller_helpers.go | 173 +++++++++--------- 3 files changed, 175 insertions(+), 84 deletions(-) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 7d8ffe1..02d777d 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -92,6 +92,8 @@ type Target struct { Containers []TargetContainer `json:"containers"` // +kubebuilder:default:=2 Retry int `json:"retry"` + // +optional + VolumeSnapshotClass string `json:"volumeSnapshotClass,omitempty"` } type Keep struct { diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index ea61a01..cc70251 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -1,5 +1,11 @@ package v1alpha1 +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + "path/filepath" +) + const ( RESTORECONTAINER_NAME string = "formol-restore" // the name of the sidecar container @@ -14,3 +20,81 @@ const ( // Backup Paths list BACKUP_PATHS = "BACKUP_PATHS" ) + +func GetSharedPath(podSpec *corev1.PodSpec, index int, targetContainer TargetContainer) (vms []corev1.VolumeMount) { + // Create a shared mount between the target and sidecar container + // the output of the Job will be saved in the shared volume + // and restic will then backup the content of the volume + var addSharedVol bool = true + for _, vol := range podSpec.Volumes { + if vol.Name == FORMOL_SHARED_VOLUME { + addSharedVol = false + } + } + if addSharedVol { + podSpec.Volumes = append(podSpec.Volumes, + corev1.Volume{ + Name: FORMOL_SHARED_VOLUME, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + } + podSpec.Containers[index].VolumeMounts = append(podSpec.Containers[index].VolumeMounts, corev1.VolumeMount{ + Name: FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) + vms = append(vms, corev1.VolumeMount{ + Name: FORMOL_SHARED_VOLUME, + MountPath: targetContainer.SharePath, + }) + return +} + +func GetVolumeMounts(container corev1.Container, targetContainer TargetContainer) (sidecarPaths []string, vms []corev1.VolumeMount) { + // targetContainer.Paths are the paths to backup + // We have to find what volumes are mounted under those paths + // and mount them under a path that exists in the sidecar container + for i, path := range targetContainer.Paths { + vm := corev1.VolumeMount{ReadOnly: true} + var longest int = 0 + var sidecarPath string + for _, volumeMount := range container.VolumeMounts { + // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { + if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { + longest = len(volumeMount.MountPath) + vm.Name = volumeMount.Name + vm.MountPath = fmt.Sprintf("/%s%d", BACKUP_PREFIX_PATH, i) + vm.SubPath = volumeMount.SubPath + sidecarPath = filepath.Join(vm.MountPath, rel) + } + } + vms = append(vms, vm) + sidecarPaths = append(sidecarPaths, sidecarPath) + } + return +} + +func GetSidecar(backupConf BackupConfiguration, target Target) corev1.Container { + sidecar := corev1.Container{ + Name: SIDECARCONTAINER_NAME, + Image: backupConf.Spec.Image, + Args: []string{"server"}, + Env: []corev1.EnvVar{ + corev1.EnvVar{ + Name: TARGET_NAME, + Value: target.TargetName, + }, + corev1.EnvVar{ + Name: POD_NAMESPACE, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }}, + VolumeMounts: []corev1.VolumeMount{}, + SecurityContext: &corev1.SecurityContext{ + Privileged: func() *bool { b := true; return &b }(), + }, + } + return sidecar +} diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 93e1b13..20094ba 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -17,14 +17,12 @@ limitations under the License. package controllers import ( - "fmt" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "os" - "path/filepath" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "strings" @@ -33,8 +31,9 @@ import ( ) const ( - FORMOL_SA = "formol-controller" - FORMOL_SIDECAR_ROLE = "formol:sidecar-role" + FORMOL_SA = "formol-controller" + FORMOL_SIDECAR_ROLE = "formol:sidecar-role" + FORMOL_SIDECAR_CLUSTERROLE = "formol:sidecar-clusterrole" ) func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { @@ -225,28 +224,7 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac return err } r.Log.V(1).Info("Got Repository", "repo", repo) - sidecar := corev1.Container{ - Name: formolv1alpha1.SIDECARCONTAINER_NAME, - Image: backupConf.Spec.Image, - Args: []string{"server"}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: formolv1alpha1.TARGET_NAME, - Value: target.TargetName, - }, - corev1.EnvVar{ - Name: formolv1alpha1.POD_NAMESPACE, - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }}, - VolumeMounts: []corev1.VolumeMount{}, - SecurityContext: &corev1.SecurityContext{ - Privileged: func() *bool { b := true; return &b }(), - }, - } + sidecar := formolv1alpha1.GetSidecar(backupConf, target) targetObject, targetPodSpec := formolv1alpha1.GetTargetObjects(target.TargetKind) if err := r.Get(r.Context, client.ObjectKey{ Namespace: backupConf.Namespace, @@ -275,16 +253,14 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac }) switch target.BackupType { case formolv1alpha1.OnlineKind: - sidecarPaths, vms := addOnlineSidecarTags(container, targetContainer) + sidecarPaths, vms := formolv1alpha1.GetVolumeMounts(container, targetContainer) sidecar.Env = append(sidecar.Env, corev1.EnvVar{ Name: formolv1alpha1.BACKUP_PATHS, Value: strings.Join(sidecarPaths, string(os.PathListSeparator)), }) sidecar.VolumeMounts = vms case formolv1alpha1.JobKind: - sidecar.VolumeMounts = addJobSidecarTags(targetPodSpec, i, targetContainer) - case formolv1alpha1.SnapshotKind: - + sidecar.VolumeMounts = formolv1alpha1.GetSharedPath(targetPodSpec, i, targetContainer) } } } @@ -333,6 +309,15 @@ func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) erro } } } + roleBinding := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: FORMOL_SIDECAR_ROLE, + }, + } + if err := r.Delete(r.Context, &roleBinding); err != nil { + r.Log.Error(err, "unable to delete sidecar role binding") + } role := rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -341,7 +326,24 @@ func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) erro } if err := r.Delete(r.Context, &role); err != nil { r.Log.Error(err, "unable to delete sidecar role") - return err + } + clusterRoleBinding := rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, + } + if err := r.Delete(r.Context, &clusterRoleBinding); err != nil { + r.Log.Error(err, "unable to delete sidecar clusterRole binding") + } + clusterRole := rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, + } + if err := r.Delete(r.Context, &clusterRole); err != nil { + r.Log.Error(err, "unable to delete sidecar clusterRole") } return nil } @@ -376,7 +378,7 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch"}, APIGroups: []string{""}, - Resources: []string{"secrets"}, + Resources: []string{"secrets", "persistentvolumeclaims"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, @@ -424,57 +426,60 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou return err } } + clusterRole := rbacv1.ClusterRole{} + if err := r.Get(r.Context, client.ObjectKey{ + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, &clusterRole); err != nil && errors.IsNotFound(err) { + clusterRole = rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, + Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{"", "snapshot.storage.k8s.io"}, + Resources: []string{"volumesnapshotclasses", "persistentvolumes"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + APIGroups: []string{"snapshot.storage.k8s.io"}, + Resources: []string{"volumesnapshots"}, + }, + }, + } + r.Log.V(0).Info("Creating formol sidecar cluster role", "clusterRole", clusterRole) + if err = r.Create(r.Context, &clusterRole); err != nil { + r.Log.Error(err, "unable to create sidecar cluster role") + return err + } + } + clusterRolebinding := rbacv1.ClusterRoleBinding{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: sa.Namespace, + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, &clusterRolebinding); err != nil && errors.IsNotFound(err) { + clusterRolebinding = rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, + Subjects: []rbacv1.Subject{ + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, + } + r.Log.V(0).Info("Creating formol sidecar clusterrolebinding", "clusterrolebinding", clusterRolebinding) + if err = r.Create(r.Context, &clusterRolebinding); err != nil { + r.Log.Error(err, "unable to create sidecar cluster rolebinding") + return err + } + } return nil } - -func addJobSidecarTags(podSpec *corev1.PodSpec, index int, targetContainer formolv1alpha1.TargetContainer) (vms []corev1.VolumeMount) { - // Create a shared mount between the target and sidecar container - // the output of the Job will be saved in the shared volume - // and restic will then backup the content of the volume - var addSharedVol bool = true - for _, vol := range podSpec.Volumes { - if vol.Name == formolv1alpha1.FORMOL_SHARED_VOLUME { - addSharedVol = false - } - } - if addSharedVol { - podSpec.Volumes = append(podSpec.Volumes, - corev1.Volume{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - }) - } - podSpec.Containers[index].VolumeMounts = append(podSpec.Containers[index].VolumeMounts, corev1.VolumeMount{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - MountPath: targetContainer.SharePath, - }) - vms = append(vms, corev1.VolumeMount{ - Name: formolv1alpha1.FORMOL_SHARED_VOLUME, - MountPath: targetContainer.SharePath, - }) - return -} - -func addOnlineSidecarTags(container corev1.Container, targetContainer formolv1alpha1.TargetContainer) (sidecarPaths []string, vms []corev1.VolumeMount) { - // targetContainer.Paths are the paths to backup - // We have to find what volumes are mounted under those paths - // and mount them under a path that exists in the sidecar container - for i, path := range targetContainer.Paths { - vm := corev1.VolumeMount{ReadOnly: true} - var longest int = 0 - var sidecarPath string - for _, volumeMount := range container.VolumeMounts { - // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { - if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { - longest = len(volumeMount.MountPath) - vm.Name = volumeMount.Name - vm.MountPath = fmt.Sprintf("/%s%d", formolv1alpha1.BACKUP_PREFIX_PATH, i) - vm.SubPath = volumeMount.SubPath - sidecarPath = filepath.Join(vm.MountPath, rel) - } - } - vms = append(vms, vm) - sidecarPaths = append(sidecarPaths, sidecarPath) - } - return -} From 61f45a79404e1f71d9f7661d295d6ac3cd07dd8c Mon Sep 17 00:00:00 2001 From: Jean-Marc Andre Date: Tue, 11 Apr 2023 18:18:46 +0200 Subject: [PATCH 51/69] Fixed RBAC --- controllers/backupconfiguration_controller_helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 20094ba..18527a1 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -366,7 +366,7 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou }, Rules: []rbacv1.PolicyRule{ rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "update"}, + Verbs: []string{"get", "list", "watch", "update"}, APIGroups: []string{"apps"}, Resources: []string{"deployments"}, }, From 8975f77e5858ee167508ef0359c3b9d6cbaba6ee Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Fri, 14 Apr 2023 20:48:08 +0200 Subject: [PATCH 52/69] Need WaitingForJob special state for SnapshotKind --- api/v1alpha1/backupsession_types.go | 19 ++++++++++--------- controllers/session.go | 10 +++++++++- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/api/v1alpha1/backupsession_types.go b/api/v1alpha1/backupsession_types.go index 03d7a1d..2bbd433 100644 --- a/api/v1alpha1/backupsession_types.go +++ b/api/v1alpha1/backupsession_types.go @@ -24,15 +24,16 @@ import ( type SessionState string const ( - New SessionState = "New" - Initializing SessionState = "Initializing" - Initialized SessionState = "Initialized" - Running SessionState = "Running" - Waiting SessionState = "Waiting" - Finalize SessionState = "Finalize" - Success SessionState = "Success" - Failure SessionState = "Failure" - Deleted SessionState = "Deleted" + New SessionState = "New" + Initializing SessionState = "Initializing" + Initialized SessionState = "Initialized" + Running SessionState = "Running" + Waiting SessionState = "Waiting" + WaitingForJob SessionState = "WaitingForJob" + Finalize SessionState = "Finalize" + Success SessionState = "Success" + Failure SessionState = "Failure" + //Deleted SessionState = "Deleted" ) type TargetStatus struct { diff --git a/controllers/session.go b/controllers/session.go index afb5cbc..bf49e1e 100644 --- a/controllers/session.go +++ b/controllers/session.go @@ -54,7 +54,7 @@ func (s Session) checkSessionState( backupConf formolv1alpha1.BackupConfiguration, currentState formolv1alpha1.SessionState, waitState formolv1alpha1.SessionState, - nextState formolv1alpha1.SessionState) formolv1alpha1.SessionState { + nextState formolv1alpha1.SessionState) (sessionState formolv1alpha1.SessionState) { for i, targetStatus := range tss { s.Log.V(0).Info("Target status", "target", targetStatus.TargetName, "session state", targetStatus.SessionState) switch targetStatus.SessionState { @@ -77,6 +77,14 @@ func (s Session) checkSessionState( // target is still busy with its current state. Wait until it is done. s.Log.V(0).Info("Waiting for one target to finish", "waitState", waitState) return "" + case formolv1alpha1.WaitingForJob: + // SnapshotKind special case + // A Job is scheduled to do the backup from a Volume Snapshot. It might take some time. + // We still want to run Finalize for all the targets (continue) + // but we also don't want to move the global BackupSession to Success (rewrite sessionState) + // When the Job is over, it will move the target state to Finalized and we'll be fine + defer func() { sessionState = "" }() + continue default: if i == len(tss)-1 { s.Log.V(0).Info("Moving to next state", "nextState", nextState) From 65d880180b3bfc9eb5a568a17aaf9be9ee3d5a01 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 01:09:42 +0200 Subject: [PATCH 53/69] cannot set sessionState to empty string otherwise the BackupSession does not get updated and the other targets won't Finalize --- controllers/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/session.go b/controllers/session.go index bf49e1e..bddf4c2 100644 --- a/controllers/session.go +++ b/controllers/session.go @@ -83,7 +83,7 @@ func (s Session) checkSessionState( // We still want to run Finalize for all the targets (continue) // but we also don't want to move the global BackupSession to Success (rewrite sessionState) // When the Job is over, it will move the target state to Finalized and we'll be fine - defer func() { sessionState = "" }() + defer func() { sessionState = waitState }() continue default: if i == len(tss)-1 { From ea1c1bd2e31cc6f67621ed71659e738ca5f5d8c8 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 01:10:01 +0200 Subject: [PATCH 54/69] Added RBAC needed --- controllers/backupconfiguration_controller_helpers.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 18527a1..353f0e0 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -376,10 +376,15 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou Resources: []string{"restoresessions", "backupsessions", "backupconfigurations", "functions", "repoes"}, }, rbacv1.PolicyRule{ - Verbs: []string{"get", "list", "watch"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, APIGroups: []string{""}, Resources: []string{"secrets", "persistentvolumeclaims"}, }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, APIGroups: []string{"formol.desmojim.fr"}, From c80b16e8db2685853356a4fe63c8d6b98d305358 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 23:00:33 +0200 Subject: [PATCH 55/69] adujsted the BackupConfiguration and BackupSession RBAC --- controllers/backupconfiguration_controller.go | 24 ++++++++++++++++--- controllers/backupsession_controller.go | 4 +--- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 71ffc45..89faab1 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -38,9 +38,27 @@ type BackupConfigurationReconciler struct { context.Context } -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupconfigurations/finalizers,verbs=update +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=* +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=persistentvolumes,verbs=get;list;watch +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshots,verbs=get;list;watch;create;update;patch;delete func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Context = ctx diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index f2460cc..7dc14cc 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -40,9 +40,7 @@ type BackupSessionReconciler struct { Session } -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=backupsessions/finalizers,verbs=update +//+kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=* func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log = log.FromContext(ctx) From 005d02e8916a25c757698acb77eebebadc9a6276 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 23:00:59 +0200 Subject: [PATCH 56/69] Missing files --- .gitignore | 28 +- PROJECT | 25 + ...rmol.desmojim.fr_backupconfigurations.yaml | 183 ++ .../formol.desmojim.fr_backupsessions.yaml | 173 ++ .../bases/formol.desmojim.fr_functions.yaml | 1202 +++++++++++++ .../crd/bases/formol.desmojim.fr_repoes.yaml | 1597 +++++++++++++++++ .../formol.desmojim.fr_restoresessions.yaml | 220 +++ config/manager/kustomization.yaml | 8 + config/manager/manager.yaml | 102 ++ .../rbac/auth_proxy_client_clusterrole.yaml | 16 + config/rbac/auth_proxy_role.yaml | 24 + config/rbac/auth_proxy_role_binding.yaml | 19 + config/rbac/auth_proxy_service.yaml | 21 + .../rbac/backupconfiguration_editor_role.yaml | 31 + .../rbac/backupconfiguration_viewer_role.yaml | 27 + config/rbac/backupsession_editor_role.yaml | 31 + config/rbac/backupsession_viewer_role.yaml | 27 + config/rbac/function_editor_role.yaml | 31 + config/rbac/function_viewer_role.yaml | 27 + config/rbac/kustomization.yaml | 18 + config/rbac/leader_election_role.yaml | 44 + config/rbac/leader_election_role_binding.yaml | 19 + config/rbac/repo_editor_role.yaml | 31 + config/rbac/repo_viewer_role.yaml | 27 + config/rbac/restoresession_editor_role.yaml | 31 + config/rbac/restoresession_viewer_role.yaml | 27 + config/rbac/role.yaml | 249 +++ config/rbac/role_binding.yaml | 19 + config/rbac/service_account.yaml | 12 + go.sum | 796 ++++++++ 30 files changed, 5049 insertions(+), 16 deletions(-) create mode 100644 PROJECT create mode 100644 config/crd/bases/formol.desmojim.fr_backupconfigurations.yaml create mode 100644 config/crd/bases/formol.desmojim.fr_backupsessions.yaml create mode 100644 config/crd/bases/formol.desmojim.fr_functions.yaml create mode 100644 config/crd/bases/formol.desmojim.fr_repoes.yaml create mode 100644 config/crd/bases/formol.desmojim.fr_restoresessions.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml create mode 100644 config/rbac/auth_proxy_role.yaml create mode 100644 config/rbac/auth_proxy_role_binding.yaml create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/backupconfiguration_editor_role.yaml create mode 100644 config/rbac/backupconfiguration_viewer_role.yaml create mode 100644 config/rbac/backupsession_editor_role.yaml create mode 100644 config/rbac/backupsession_viewer_role.yaml create mode 100644 config/rbac/function_editor_role.yaml create mode 100644 config/rbac/function_viewer_role.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/repo_editor_role.yaml create mode 100644 config/rbac/repo_viewer_role.yaml create mode 100644 config/rbac/restoresession_editor_role.yaml create mode 100644 config/rbac/restoresession_viewer_role.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/rbac/service_account.yaml create mode 100644 go.sum diff --git a/.gitignore b/.gitignore index 2a862de..54973e4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -*~ # Binaries for programs and plugins *.exe @@ -6,26 +5,23 @@ *.dll *.so *.dylib -*.swp +bin +testbin/* +Dockerfile.cross -# Test binary, built with `go test -c` +# Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out -go.sum -PROJECT +# Kubernetes Generated files - skip generated files, except for vendored files -# Dependency directories (remove the comment below to include it) -# vendor/ -bin/ -config/* -!config/crd -!config/default -config/crd/bases -!config/samples -test/restic -test/password -testbin/* +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ test/minikube/040-restoresession.yaml diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..dfc3d26 --- /dev/null +++ b/PROJECT @@ -0,0 +1,25 @@ +domain: desmojim.fr +layout: +- go.kubebuilder.io/v3 +projectName: formol +repo: github.com/desmo999r/formol +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: desmojim.fr + group: formol + kind: BackupConfiguration + path: github.com/desmo999r/formol/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: desmojim.fr + group: formol + kind: BackupSession + path: github.com/desmo999r/formol/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/config/crd/bases/formol.desmojim.fr_backupconfigurations.yaml b/config/crd/bases/formol.desmojim.fr_backupconfigurations.yaml new file mode 100644 index 0000000..ea787fa --- /dev/null +++ b/config/crd/bases/formol.desmojim.fr_backupconfigurations.yaml @@ -0,0 +1,183 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: backupconfigurations.formol.desmojim.fr +spec: + group: formol.desmojim.fr + names: + kind: BackupConfiguration + listKind: BackupConfigurationList + plural: backupconfigurations + shortNames: + - bc + singular: backupconfiguration + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.suspend + name: Suspended + type: boolean + - jsonPath: .spec.schedule + name: Schedule + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: BackupConfiguration is the Schema for the backupconfigurations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupConfigurationSpec defines the desired state of BackupConfiguration + properties: + image: + type: string + keep: + properties: + daily: + format: int32 + type: integer + last: + format: int32 + type: integer + monthly: + format: int32 + type: integer + weekly: + format: int32 + type: integer + yearly: + format: int32 + type: integer + required: + - daily + - last + - monthly + - weekly + - yearly + type: object + repository: + type: string + schedule: + type: string + suspend: + default: false + type: boolean + targets: + items: + properties: + backupType: + enum: + - Online + - Snapshot + - Job + type: string + containers: + items: + properties: + job: + items: + properties: + backup: + type: string + finalize: + type: string + initialize: + type: string + restore: + type: string + type: object + type: array + name: + type: string + paths: + items: + type: string + type: array + sharePath: + default: /formol-shared + type: string + steps: + items: + properties: + backup: + type: string + finalize: + type: string + initialize: + type: string + restore: + type: string + type: object + type: array + required: + - name + - sharePath + type: object + type: array + retry: + default: 2 + type: integer + targetKind: + enum: + - Deployment + - StatefulSet + - Pod + type: string + targetName: + type: string + volumeSnapshotClass: + type: string + required: + - backupType + - containers + - retry + - targetKind + - targetName + type: object + type: array + required: + - image + - keep + - repository + - schedule + - suspend + - targets + type: object + status: + description: BackupConfigurationStatus defines the observed state of BackupConfiguration + properties: + activeCronJob: + type: boolean + activeSidecar: + type: boolean + lastBackupTime: + format: date-time + type: string + suspended: + type: boolean + required: + - activeCronJob + - activeSidecar + - suspended + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/formol.desmojim.fr_backupsessions.yaml b/config/crd/bases/formol.desmojim.fr_backupsessions.yaml new file mode 100644 index 0000000..51cf255 --- /dev/null +++ b/config/crd/bases/formol.desmojim.fr_backupsessions.yaml @@ -0,0 +1,173 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: backupsessions.formol.desmojim.fr +spec: + group: formol.desmojim.fr + names: + kind: BackupSession + listKind: BackupSessionList + plural: backupsessions + shortNames: + - bs + singular: backupsession + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.ref.name + name: Ref + type: string + - jsonPath: .status.state + name: State + type: string + - format: date-time + jsonPath: .status.startTime + name: Started + type: string + - jsonPath: .status.keep + name: Keep + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: BackupSession is the Schema for the backupsessions API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSessionSpec defines the desired state of BackupSession + properties: + ref: + description: "ObjectReference contains enough information to let you + inspect or modify the referred object. --- New uses of this type + are discouraged because of difficulty describing its usage when + embedded in APIs. 1. Ignored fields. It includes many fields which + are not generally honored. For instance, ResourceVersion and FieldPath + are both very rarely valid in actual usage. 2. Invalid usage help. + \ It is impossible to add specific help for individual usage. In + most embedded usages, there are particular restrictions like, \"must + refer only to types A and B\" or \"UID not honored\" or \"name must + be restricted\". Those cannot be well described when embedded. 3. + Inconsistent validation. Because the usages are different, the + validation rules are different by usage, which makes it hard for + users to predict what will happen. 4. The fields are both imprecise + and overly precise. Kind is not a precise mapping to a URL. This + can produce ambiguity during interpretation and require a REST mapping. + \ In most cases, the dependency is on the group,resource tuple and + the version of the actual struct is irrelevant. 5. We cannot easily + change it. Because this type is embedded in many locations, updates + to this type will affect numerous schemas. Don't make new APIs + embed an underspecified API type they do not control. \n Instead + of using this type, create a locally provided and used type that + is well-focused on your reference. For example, ServiceReferences + for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + status: + description: BackupSessionStatus defines the observed state of BackupSession + properties: + keep: + type: string + startTime: + format: date-time + type: string + state: + type: string + target: + items: + properties: + backupType: + enum: + - Online + - Snapshot + - Job + type: string + duration: + type: string + snapshotId: + type: string + startTime: + format: date-time + type: string + state: + type: string + targetKind: + enum: + - Deployment + - StatefulSet + - Pod + type: string + targetName: + type: string + try: + type: integer + required: + - backupType + - startTime + - state + - targetKind + - targetName + - try + type: object + type: array + required: + - keep + - startTime + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/formol.desmojim.fr_functions.yaml b/config/crd/bases/formol.desmojim.fr_functions.yaml new file mode 100644 index 0000000..7b3a4f2 --- /dev/null +++ b/config/crd/bases/formol.desmojim.fr_functions.yaml @@ -0,0 +1,1202 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: functions.formol.desmojim.fr +spec: + group: formol.desmojim.fr + names: + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Function is the Schema for the functions API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container + image''s ENTRYPOINT is used if this is not provided. Variable references + $(VAR_NAME) are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the input string + will be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or not. Cannot + be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in + the container. The keys defined within a source must be a C_IDENTIFIER. + All invalid keys will be reported as an event when the container + is starting. When a key exists in multiple sources, the value associated + with the last source will take precedence. Values defined by an + Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to + default or override container images in workload controllers like + Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response + to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler + and kept for the backward compatibility. There are no validation + of this field and lifecycle hooks will fail in runtime when + tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event such + as liveness/startup probe failure, preemption, resource contention, + etc. The handler is not called if the container crashes or exits. + The Pod''s termination grace period countdown begins before + the PreStop hook is executed. Regardless of the outcome of the + handler, the container will eventually terminate within the + Pod''s termination grace period (unless delayed by finalizers). + Other management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler + and kept for the backward compatibility. There are no validation + of this field and lifecycle hooks will fail in runtime when + tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will + be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This + is a beta field and requires enabling GRPCContainerProbe feature + gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is defined + by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate + gracefully upon probe failure. The grace period is the duration + in seconds after the processes running in the pod are sent a + termination signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer than the expected + cleanup time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a + beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot be + updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying + a port here DOES NOT prevent that port from being exposed. Any port + which is listening on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying this array with strategic + merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and + unique within the pod. Each named port in a pod must have + a unique name. Name for the port that can be referred to by + services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults + to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This + is a beta field and requires enabling GRPCContainerProbe feature + gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is defined + by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate + gracefully upon probe failure. The grace period is the duration + in seconds after the processes running in the pod are sent a + termination signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer than the expected + cleanup time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a + beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container + should be run with. If set, the fields of SecurityContext override + the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process + can gain more privileges than its parent process. This bool + directly controls if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation is true always when + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container + runtime. Note that this field cannot be set when spec.os.name + is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for + the containers. The default is DefaultProcMount which uses the + container runtime defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when spec.os.name + is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If + seccomp options are provided at both the pod & container level, + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will + be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until this + completes successfully. If this probe fails, the Pod will be restarted, + just as if the livenessProbe failed. This can be used to provide + different probe parameters at the beginning of a Pod''s lifecycle, + when it might take a long time to load data or warm a cache, than + during steady-state operation. This cannot be updated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This + is a beta field and requires enabling GRPCContainerProbe feature + gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is defined + by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate + gracefully upon probe failure. The grace period is the duration + in seconds after the processes running in the pod are sent a + termination signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer than the expected + cleanup time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a + beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin + in the container runtime. If this is not set, reads from stdin in + the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and then + remains open and accepts data until the client disconnects, at which + time stdin is closed and remains closed until the container is restarted. + If this flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the node + if greater than 4096 bytes. The total message length across all + containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. The + log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, + also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used + by the container. + items: + description: volumeDevice describes a mapping of a raw block device + within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the + container runtime's default will be used, which might be configured + in the container image. Cannot be updated. + type: string + required: + - name + type: object + status: + description: FunctionStatus defines the observed state of Function + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/formol.desmojim.fr_repoes.yaml b/config/crd/bases/formol.desmojim.fr_repoes.yaml new file mode 100644 index 0000000..d156149 --- /dev/null +++ b/config/crd/bases/formol.desmojim.fr_repoes.yaml @@ -0,0 +1,1597 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: repoes.formol.desmojim.fr +spec: + group: formol.desmojim.fr + names: + kind: Repo + listKind: RepoList + plural: repoes + singular: repo + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Repo is the Schema for the repoes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RepoSpec defines the desired state of Repo + properties: + backend: + properties: + local: + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk + resource that is attached to a kubelet''s host machine and + then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume + that you want to mount. If omitted, the default is to + mount by volume name. Examples: For volume /dev/sda1, + you specify the partition as "1". Similarly, the volume + partition for /dev/sda is "0" (or you can leave the + property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent + disk resource in AWS (Amazon EBS volume). More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to + shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados user + name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume in + cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode + bits. Defaults to 0644. Directories within the path + are not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair + in the Data field of the referenced ConfigMap will be + projected into the volume as a file whose name is the + key and content is the value. If specified, the listed + keys will be projected into the specified paths, and + unlisted keys will not be present. If a key is specified + which is not present in the ConfigMap, the volume setup + will error unless it is marked optional. Paths must + be relative and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal + value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for + mode bits. If not specified, the volume defaultMode + will be used. This might be in conflict with other + options that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external CSI + drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that + handles this volume. Consult with your admin for the + correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path are not + affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which + means to use the node''s default medium. Must be an + empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is + also applicable for memory medium. The maximum usage + on memory medium EmptyDir would be the minimum value + between the SizeLimit specified here and the sum of + memory limits of all containers in a pod. The default + is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use + this if: a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot + or capacity tracking are needed, c) the storage driver is + specified through a storage class, and d) the storage driver + supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the connection + between this volume type and PersistentVolumeClaim). \n + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local + ephemeral volumes if the CSI driver is meant to be used + that way - see the documentation of the driver for more + information. \n A pod can use both types of ephemeral volumes + and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC + to provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of + the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). + \n An existing PVC with that name that is not owned + by the pod will *not* be used for the pod to avoid using + an unrelated volume by mistake. Starting the pod is + then blocked until the unrelated PVC is removed. If + such a pre-created PVC is meant to be used by the pod, + the PVC has to updated with an owner reference to the + pod once the pod exists. Normally this should not be + necessary, but it may be useful when manually reconstructing + a broken cluster. \n This field is read-only and no + changes will be made by Kubernetes to the PVC after + it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No + other fields are allowed and will be rejected during + validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the + PVC that gets created from this template. The same + fields as in a PersistentVolumeClaim are also valid + here. + properties: + accessModes: + description: 'accessModes contains the desired + access modes the volume should have. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to + specify either: * An existing VolumeSnapshot + object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If + the provisioner or an external controller can + support the specified data source, it will create + a new volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have + the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup is + not specified, the specified Kind must be + in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, + if a non-empty volume is desired. This may be + any local object from a non-empty API group + (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume + binding will only succeed if the type of the + specified object matches some installed volume + populator or dynamic provisioner. This field + will replace the functionality of the DataSource + field and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, both fields (DataSource and DataSourceRef) + will be set to the same value automatically + if one of them is empty and the other is non-empty. + There are two important differences between + DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed + values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed + value is specified. (Beta) Using this field + requires the AnyVolumeDataSource feature gate + to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup is + not specified, the specified Kind must be + in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + resources: + description: 'resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity + recorded in the status field of the claim. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If + Requests is omitted for a container, it + defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of + the StorageClass required by the claim. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to + the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. TODO: how do we prevent + errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". The default filesystem + depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if + no secret object is specified. If the secret object + contains more than one secret, all secrets are passed + to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored + as metadata -> name on the dataset for Flocker should + be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume + that you want to mount. If omitted, the default is to + mount by volume name. Examples: For volume /dev/sda1, + you specify the partition as "1". Similarly, the volume + partition for /dev/sda is "0" (or you can leave the + property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource + in GCE. Used to identify the disk in GCE. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir + into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, + the volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that + is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that + uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. The + Portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + nfs: + description: 'nfs represents an NFS mount on the host that + shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export + to be mounted with read-only permissions. Defaults to + false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting + in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to + mount Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set + permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this + setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the ConfigMap, the + volume setup will error unless it is marked + optional. Paths must be relative and may not + contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 + and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path + of the file to map the key to. May not + be an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used + to set permissions on this file, must + be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. + YAML accepts both octal and decimal + values, JSON requires decimal values + for mode bits. If not specified, the + volume defaultMode will be used. This + might be in conflict with other options + that affect the file mode, like fsGroup, + and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and + requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are + currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the Secret, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 + and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path + of the file to map the key to. May not + be an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The kubelet + will start trying to rotate the token if the + token is older than 80 percent of its time + to live or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is + no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple + Quobyte Registry services specified as a string as host:port + pair (multiple entries are separated with commas) which + acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in + the Backend Used with dynamically provisioned Quobyte + volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to + serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on + the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is + rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. Default + is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default is + admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not + provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode + bits. Defaults to 0644. Directories within the path + are not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair + in the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be relative + and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal + value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for + mode bits. If not specified, the volume defaultMode + will be used. This might be in conflict with other + options that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in + the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for + obtaining the StorageOS API credentials. If not specified, + default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name of + the StorageOS volume. Volume names are only unique + within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows + the Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name + to override the default behaviour. Set to "default" + if you are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + type: object + s3: + properties: + bucket: + type: string + prefix: + type: string + server: + type: string + required: + - bucket + - server + type: object + type: object + repositorySecrets: + type: string + required: + - backend + - repositorySecrets + type: object + status: + description: RepoStatus defines the observed state of Repo + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/formol.desmojim.fr_restoresessions.yaml b/config/crd/bases/formol.desmojim.fr_restoresessions.yaml new file mode 100644 index 0000000..55735bc --- /dev/null +++ b/config/crd/bases/formol.desmojim.fr_restoresessions.yaml @@ -0,0 +1,220 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: restoresessions.formol.desmojim.fr +spec: + group: formol.desmojim.fr + names: + kind: RestoreSession + listKind: RestoreSessionList + plural: restoresessions + singular: restoresession + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: RestoreSession is the Schema for the restoresessions API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSessionSpec defines the desired state of RestoreSession + properties: + backupSession: + properties: + spec: + description: BackupSessionSpec defines the desired state of BackupSession + properties: + ref: + description: "ObjectReference contains enough information + to let you inspect or modify the referred object. --- New + uses of this type are discouraged because of difficulty + describing its usage when embedded in APIs. 1. Ignored fields. + \ It includes many fields which are not generally honored. + \ For instance, ResourceVersion and FieldPath are both very + rarely valid in actual usage. 2. Invalid usage help. It + is impossible to add specific help for individual usage. + \ In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not + honored\" or \"name must be restricted\". Those cannot be + well described when embedded. 3. Inconsistent validation. + \ Because the usages are different, the validation rules + are different by usage, which makes it hard for users to + predict what will happen. 4. The fields are both imprecise + and overly precise. Kind is not a precise mapping to a + URL. This can produce ambiguity during interpretation and + require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual + struct is irrelevant. 5. We cannot easily change it. Because + this type is embedded in many locations, updates to this + type will affect numerous schemas. Don't make new APIs + embed an underspecified API type they do not control. \n + Instead of using this type, create a locally provided and + used type that is well-focused on your reference. For example, + ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this + pod). This syntax is chosen only to have some well-defined + way of referencing a part of an object. TODO: this design + is not final and this field is subject to change in + the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + status: + description: BackupSessionStatus defines the observed state of + BackupSession + properties: + keep: + type: string + startTime: + format: date-time + type: string + state: + type: string + target: + items: + properties: + backupType: + enum: + - Online + - Snapshot + - Job + type: string + duration: + type: string + snapshotId: + type: string + startTime: + format: date-time + type: string + state: + type: string + targetKind: + enum: + - Deployment + - StatefulSet + - Pod + type: string + targetName: + type: string + try: + type: integer + required: + - backupType + - startTime + - state + - targetKind + - targetName + - try + type: object + type: array + required: + - keep + - startTime + - state + type: object + required: + - spec + - status + type: object + required: + - backupSession + type: object + status: + description: RestoreSessionStatus defines the observed state of RestoreSession + properties: + startTime: + format: date-time + type: string + state: + type: string + targets: + items: + properties: + backupType: + enum: + - Online + - Snapshot + - Job + type: string + duration: + type: string + snapshotId: + type: string + startTime: + format: date-time + type: string + state: + type: string + targetKind: + enum: + - Deployment + - StatefulSet + - Pod + type: string + targetName: + type: string + try: + type: integer + required: + - backupType + - startTime + - state + - targetKind + - targetName + - try + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000..95875d9 --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: docker.io/desmo999r/formolcontroller + newTag: v0.5.1 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 0000000..dde6d5a --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..ff8eba4 --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..a2282c3 --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..5d6a1fa --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..cbf41b3 --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/backupconfiguration_editor_role.yaml b/config/rbac/backupconfiguration_editor_role.yaml new file mode 100644 index 0000000..4013ff0 --- /dev/null +++ b/config/rbac/backupconfiguration_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit backupconfigurations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: backupconfiguration-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: backupconfiguration-editor-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - backupconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - backupconfigurations/status + verbs: + - get diff --git a/config/rbac/backupconfiguration_viewer_role.yaml b/config/rbac/backupconfiguration_viewer_role.yaml new file mode 100644 index 0000000..94b05dd --- /dev/null +++ b/config/rbac/backupconfiguration_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view backupconfigurations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: backupconfiguration-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: backupconfiguration-viewer-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - backupconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - backupconfigurations/status + verbs: + - get diff --git a/config/rbac/backupsession_editor_role.yaml b/config/rbac/backupsession_editor_role.yaml new file mode 100644 index 0000000..931463c --- /dev/null +++ b/config/rbac/backupsession_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit backupsessions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: backupsession-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: backupsession-editor-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - backupsessions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - backupsessions/status + verbs: + - get diff --git a/config/rbac/backupsession_viewer_role.yaml b/config/rbac/backupsession_viewer_role.yaml new file mode 100644 index 0000000..74d6ea5 --- /dev/null +++ b/config/rbac/backupsession_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view backupsessions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: backupsession-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: backupsession-viewer-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - backupsessions + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - backupsessions/status + verbs: + - get diff --git a/config/rbac/function_editor_role.yaml b/config/rbac/function_editor_role.yaml new file mode 100644 index 0000000..20b3950 --- /dev/null +++ b/config/rbac/function_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit functions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: function-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: function-editor-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - functions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - functions/status + verbs: + - get diff --git a/config/rbac/function_viewer_role.yaml b/config/rbac/function_viewer_role.yaml new file mode 100644 index 0000000..3f8f56d --- /dev/null +++ b/config/rbac/function_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view functions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: function-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: function-viewer-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - functions + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - functions/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..731832a --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..5a6c0b1 --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,44 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..70ec17e --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/repo_editor_role.yaml b/config/rbac/repo_editor_role.yaml new file mode 100644 index 0000000..2b11a78 --- /dev/null +++ b/config/rbac/repo_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit repoes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: repo-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: repo-editor-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - repoes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - repoes/status + verbs: + - get diff --git a/config/rbac/repo_viewer_role.yaml b/config/rbac/repo_viewer_role.yaml new file mode 100644 index 0000000..a4cf063 --- /dev/null +++ b/config/rbac/repo_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view repoes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: repo-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: repo-viewer-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - repoes + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - repoes/status + verbs: + - get diff --git a/config/rbac/restoresession_editor_role.yaml b/config/rbac/restoresession_editor_role.yaml new file mode 100644 index 0000000..2a68f23 --- /dev/null +++ b/config/rbac/restoresession_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit restoresessions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: restoresession-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: restoresession-editor-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions/status + verbs: + - get diff --git a/config/rbac/restoresession_viewer_role.yaml b/config/rbac/restoresession_viewer_role.yaml new file mode 100644 index 0000000..f045a90 --- /dev/null +++ b/config/rbac/restoresession_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view restoresessions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: restoresession-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: restoresession-viewer-role +rules: +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..9bb5f79 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,249 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs/status + verbs: + - get +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - '*' + verbs: + - '*' +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions/finalizers + verbs: + - update +- apiGroups: + - formol.desmojim.fr + resources: + - restoresessions/status + verbs: + - get + - patch + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - persistentvolumes + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..bfa0703 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 0000000..44dc9cd --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: formol + app.kubernetes.io/part-of: formol + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..3a4aa45 --- /dev/null +++ b/go.sum @@ -0,0 +1,796 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= +k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= +k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= +k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= +k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= +k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= +k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= +k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= +sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From 9fb54034c244273707eac7fd9a2ef7f0353f3da2 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 23:01:35 +0200 Subject: [PATCH 57/69] Working Makefile --- Makefile | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 9116f85..b591441 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,10 @@ # Image URL to use all building/pushing image targets -IMG ?= controller:latest +VERSION ?= latest +IMG ?= docker.io/desmo999r/formolcontroller:$(VERSION) # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. ENVTEST_K8S_VERSION = 1.25.0 +MANIFEST = formolcontroller-multiarch # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -68,6 +70,7 @@ build: manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go +PLATFORMS ?= linux/arm64,linux/amd64 # If you wish built the manager image targeting other platforms you can use the --platform flag. # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ @@ -75,9 +78,14 @@ run: manifests generate fmt vet ## Run a controller from your host. docker-build: test ## Build docker image with the manager. docker build -t ${IMG} . +.PHONY: docker-build-multiarch +docker-build-multiarch: + buildah bud --manifest $(MANIFEST) --platform=$(PLATFORMS) Dockerfile.multi + .PHONY: docker-push docker-push: ## Push docker image with the manager. - docker push ${IMG} + buildah manifest psuh --all --rm $(MANIFEST) "docker://$(IMG)" + #docker push ${IMG} # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: @@ -85,7 +93,6 @@ docker-push: ## Push docker image with the manager. # - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ # - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) # To properly provided solutions that supports more than one platform you should use this option. -PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le .PHONY: docker-buildx docker-buildx: test ## Build and push docker image for the manager for cross-platform support # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile From 276112e9c0d057f20c9496c3b3327eda134a5c89 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Mon, 17 Apr 2023 23:10:21 +0200 Subject: [PATCH 58/69] typo --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b591441..4602414 100644 --- a/Makefile +++ b/Makefile @@ -84,7 +84,7 @@ docker-build-multiarch: .PHONY: docker-push docker-push: ## Push docker image with the manager. - buildah manifest psuh --all --rm $(MANIFEST) "docker://$(IMG)" + buildah manifest push --all --rm $(MANIFEST) "docker://$(IMG)" #docker push ${IMG} # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple From beb762708e66b22f5b3a5fab9a3d809c5a956454 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Tue, 18 Apr 2023 09:41:28 +0200 Subject: [PATCH 59/69] Updated PROJECT --- PROJECT | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/PROJECT b/PROJECT index dfc3d26..e333bb0 100644 --- a/PROJECT +++ b/PROJECT @@ -22,4 +22,29 @@ resources: kind: BackupSession path: github.com/desmo999r/formol/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: desmojim.fr + group: formol + kind: RestoreSession + path: github.com/desmo999r/formol/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: desmojim.fr + group: formol + kind: Function + path: github.com/desmo999r/formol/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: desmojim.fr + group: formol + kind: Repo + path: github.com/desmo999r/formol/api/v1alpha1 + version: v1alpha1 version: "3" From ae6b682034a6908081bf8f679daf42d3a91338dc Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 09:19:36 +0200 Subject: [PATCH 60/69] build containers with cache enabled --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4602414..af5b974 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ docker-build: test ## Build docker image with the manager. .PHONY: docker-build-multiarch docker-build-multiarch: - buildah bud --manifest $(MANIFEST) --platform=$(PLATFORMS) Dockerfile.multi + buildah bud --manifest $(MANIFEST) --platform=$(PLATFORMS) --layers Dockerfile.multi .PHONY: docker-push docker-push: ## Push docker image with the manager. From 1b31b497a078488a346cff4b547ce0bd9f50eb7c Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 09:21:16 +0200 Subject: [PATCH 61/69] GetVolumeMounts was not working --- api/v1alpha1/common.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index cc70251..7d19eb4 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -3,7 +3,9 @@ package v1alpha1 import ( "fmt" corev1 "k8s.io/api/core/v1" + "os" "path/filepath" + "strings" ) const ( @@ -57,14 +59,19 @@ func GetVolumeMounts(container corev1.Container, targetContainer TargetContainer vm := corev1.VolumeMount{ReadOnly: true} var longest int = 0 var sidecarPath string + path = filepath.Clean(path) + splitPath := strings.Split(path, string(os.PathSeparator)) for _, volumeMount := range container.VolumeMounts { - // if strings.HasPrefix(path, volumeMount.MountPath) && len(volumeMount.MountPath) > longest { - if rel, err := filepath.Rel(volumeMount.MountPath, path); err == nil && len(volumeMount.MountPath) > longest { - longest = len(volumeMount.MountPath) - vm.Name = volumeMount.Name - vm.MountPath = fmt.Sprintf("/%s%d", BACKUP_PREFIX_PATH, i) - vm.SubPath = volumeMount.SubPath - sidecarPath = filepath.Join(vm.MountPath, rel) + splitMountPath := strings.Split(volumeMount.MountPath, string(os.PathSeparator)) + for j, pathItem := range splitMountPath { + if j < len(splitPath) && pathItem == splitPath[j] && j > longest { + longest = j + vm.Name = volumeMount.Name + vm.MountPath = fmt.Sprintf("/%s%d", BACKUP_PREFIX_PATH, i) + vm.SubPath = volumeMount.SubPath + rel, _ := filepath.Rel(volumeMount.MountPath, path) + sidecarPath = filepath.Join(vm.MountPath, rel) + } } } vms = append(vms, vm) From 121e8b11e73198d22ad70e864f4a01916ca8b23d Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 09:23:07 +0200 Subject: [PATCH 62/69] Reworked RBAC creation --- controllers/backupconfiguration_controller.go | 12 +- .../backupconfiguration_controller_helpers.go | 229 ++++++++++++------ 2 files changed, 166 insertions(+), 75 deletions(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 89faab1..74f3048 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -36,6 +36,9 @@ type BackupConfigurationReconciler struct { Scheme *runtime.Scheme Log logr.Logger context.Context + Name string + Namespace string + backupConf formolv1alpha1.BackupConfiguration } //+kubebuilder:rbac:groups=formol.desmojim.fr,resources=*,verbs=* @@ -63,6 +66,8 @@ type BackupConfigurationReconciler struct { func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Context = ctx r.Log = log.FromContext(ctx) + r.Name = req.NamespacedName.Name + r.Namespace = req.NamespacedName.Namespace r.Log.V(1).Info("Enter Reconcile with req", "req", req, "reconciler", r) @@ -74,6 +79,7 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. } return ctrl.Result{}, err } + r.backupConf = backupConf finalizerName := "finalizer.backupconfiguration.formol.desmojim.fr" @@ -82,7 +88,7 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. if controllerutil.ContainsFinalizer(&backupConf, finalizerName) { _ = r.DeleteSidecar(backupConf) _ = r.DeleteCronJob(backupConf) - _ = r.deleteRBACSidecar(backupConf.Namespace) + _ = r.deleteRBAC() controllerutil.RemoveFinalizer(&backupConf, finalizerName) if err := r.Update(ctx, &backupConf); err != nil { r.Log.Error(err, "unable to remove finalizer") @@ -111,6 +117,10 @@ func (r *BackupConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. } else { backupConf.Status.ActiveCronJob = true } + if err = r.createBSCreatorRBAC(); err != nil { + r.Log.Error(err, "unable to create RBAC for the sidecar container") + return ctrl.Result{}, err + } for _, target := range backupConf.Spec.Targets { if err := r.addSidecar(backupConf, target); err != nil { diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 353f0e0..276309d 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -31,9 +31,13 @@ import ( ) const ( - FORMOL_SA = "formol-controller" - FORMOL_SIDECAR_ROLE = "formol:sidecar-role" - FORMOL_SIDECAR_CLUSTERROLE = "formol:sidecar-clusterrole" + FORMOL_BS_CREATOR_SA = "bs-creator" + FORMOL_BS_CREATOR_ROLE = "formol:bs-creator-role" + FORMOL_BS_CREATOR_ROLEBINDING = "formol:bs-creator-rolebinding" + FORMOL_SIDECAR_ROLE = "formol:sidecar-role" + FORMOL_SIDECAR_ROLEBINDING = "formol:sidecar-rolebinding" + FORMOL_SIDECAR_CLUSTERROLE = "formol:sidecar-clusterrole" + FORMOL_SIDECAR_CLUSTERROLEBINDING = "formol:sidecar-clusterrolebinding" ) func (r *BackupConfigurationReconciler) DeleteCronJob(backupConf formolv1alpha1.BackupConfiguration) error { @@ -93,7 +97,7 @@ func (r *BackupConfigurationReconciler) AddCronJob(backupConf formolv1alpha1.Bac Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyOnFailure, - ServiceAccountName: "backupsession-creator", + ServiceAccountName: FORMOL_BS_CREATOR_SA, Containers: []corev1.Container{ corev1.Container{ Name: "job-createbackupsession-" + backupConf.Name, @@ -234,12 +238,7 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac return err } if !hasSidecar(targetPodSpec) { - if err = r.createRBACSidecar(corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: backupConf.Namespace, - Name: targetPodSpec.ServiceAccountName, - }, - }); err != nil { + if err = r.createSidecarRBAC(targetPodSpec); err != nil { r.Log.Error(err, "unable to create RBAC for the sidecar container") return } @@ -290,78 +289,146 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac } // Delete the sidecar role is there is no more sidecar container in the namespace -func (r *BackupConfigurationReconciler) deleteRBACSidecar(namespace string) error { - podList := corev1.PodList{} - if err := r.List(r.Context, &podList, &client.ListOptions{ - Namespace: namespace, - }); err != nil { - r.Log.Error(err, "unable to get the list of pods", "namespace", namespace) - return err - } - for _, pod := range podList.Items { - for _, container := range pod.Spec.Containers { - for _, env := range container.Env { - if env.Name == formolv1alpha1.SIDECARCONTAINER_NAME { - // There is still a sidecar in the namespace. - // cannot delete the sidecar role - return nil - } +func (r *BackupConfigurationReconciler) deleteRBAC() error { + for _, roleBindingName := range []string{FORMOL_BS_CREATOR_ROLEBINDING, FORMOL_SIDECAR_CLUSTERROLEBINDING} { + roleBinding := rbacv1.RoleBinding{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: roleBindingName, + }, &roleBinding); err == nil { + if err = r.Delete(r.Context, &roleBinding); err != nil { + r.Log.Error(err, "unable to delete role binding", "role binding", roleBindingName) } } } - roleBinding := rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: FORMOL_SIDECAR_ROLE, - }, + for _, roleName := range []string{FORMOL_BS_CREATOR_ROLE, FORMOL_SIDECAR_CLUSTERROLE} { + role := rbacv1.Role{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: roleName, + }, &role); err == nil { + if err = r.Delete(r.Context, &role); err != nil { + r.Log.Error(err, "unable to delete role", "role", roleName) + } + } } - if err := r.Delete(r.Context, &roleBinding); err != nil { - r.Log.Error(err, "unable to delete sidecar role binding") + sa := corev1.ServiceAccount{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_SA, + }, &sa); err == nil { + if err = r.Delete(r.Context, &sa); err != nil { + r.Log.Error(err, "unable to delete bs service account role") + } } - role := rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: FORMOL_SIDECAR_ROLE, - }, + clusterRoleBinding := rbacv1.ClusterRoleBinding{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_SIDECAR_CLUSTERROLEBINDING, + }, &clusterRoleBinding); err == nil { + if err = r.Delete(r.Context, &clusterRoleBinding); err != nil { + r.Log.Error(err, "unable to delete sidecar cluster role binding") + } } - if err := r.Delete(r.Context, &role); err != nil { - r.Log.Error(err, "unable to delete sidecar role") + clusterRole := rbacv1.ClusterRole{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_SIDECAR_CLUSTERROLE, + }, &clusterRole); err == nil { + if err = r.Delete(r.Context, &clusterRole); err != nil { + r.Log.Error(err, "unable to delete sidecar cluster role") + } } - clusterRoleBinding := rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: FORMOL_SIDECAR_CLUSTERROLE, - }, + return nil +} + +func (r *BackupConfigurationReconciler) createBSCreatorRBAC() error { + sa := corev1.ServiceAccount{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_SA, + }, &sa); errors.IsNotFound(err) { + sa = corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_SA, + }, + } + if err = r.Create(r.Context, &sa); err != nil { + r.Log.Error(err, "unable to create BS creator SA") + return err + } } - if err := r.Delete(r.Context, &clusterRoleBinding); err != nil { - r.Log.Error(err, "unable to delete sidecar clusterRole binding") + role := rbacv1.Role{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_ROLE, + }, &role); errors.IsNotFound(err) { + role = rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_ROLE, + }, + Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + APIGroups: []string{"formol.desmojim.fr"}, + Resources: []string{"backupsessions"}, + }, + }, + } + r.Log.V(0).Info("Creating formol bs creator role", "role", role) + if err = r.Create(r.Context, &role); err != nil { + r.Log.Error(err, "unable to create bs creator role") + return err + } } - clusterRole := rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: FORMOL_SIDECAR_CLUSTERROLE, - }, - } - if err := r.Delete(r.Context, &clusterRole); err != nil { - r.Log.Error(err, "unable to delete sidecar clusterRole") + rolebinding := rbacv1.RoleBinding{} + if err := r.Get(r.Context, client.ObjectKey{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_ROLEBINDING, + }, &rolebinding); errors.IsNotFound(err) { + rolebinding = rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.Namespace, + Name: FORMOL_BS_CREATOR_ROLEBINDING, + }, + Subjects: []rbacv1.Subject{ + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: FORMOL_BS_CREATOR_SA, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: FORMOL_BS_CREATOR_ROLE, + }, + } + r.Log.V(0).Info("Creating formol bs creator rolebinding", "rolebinding", rolebinding) + if err = r.Create(r.Context, &rolebinding); err != nil { + r.Log.Error(err, "unable to create bs creator rolebinding") + return err + } } return nil } // Creates a role to allow the BackupSession controller in the sidecar to have access to resources // like Repo, Functions, ... -func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccount) error { - if sa.Name == "" { - sa.Name = "default" +func (r *BackupConfigurationReconciler) createSidecarRBAC(podSpec *corev1.PodSpec) error { + sa := podSpec.ServiceAccountName + if sa == "" { + sa = "default" } role := rbacv1.Role{} if err := r.Get(r.Context, client.ObjectKey{ - Namespace: sa.Namespace, + Namespace: r.Namespace, Name: FORMOL_SIDECAR_ROLE, - }, &role); err != nil && errors.IsNotFound(err) { + }, &role); errors.IsNotFound(err) { role = rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: sa.Namespace, + Namespace: r.Namespace, Name: FORMOL_SIDECAR_ROLE, }, Rules: []rbacv1.PolicyRule{ @@ -405,18 +472,18 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou } rolebinding := rbacv1.RoleBinding{} if err := r.Get(r.Context, client.ObjectKey{ - Namespace: sa.Namespace, - Name: FORMOL_SIDECAR_ROLE, - }, &rolebinding); err != nil && errors.IsNotFound(err) { + Namespace: r.Namespace, + Name: FORMOL_SIDECAR_ROLEBINDING, + }, &rolebinding); errors.IsNotFound(err) { rolebinding = rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: sa.Namespace, - Name: FORMOL_SIDECAR_ROLE, + Namespace: r.Namespace, + Name: FORMOL_SIDECAR_ROLEBINDING, }, Subjects: []rbacv1.Subject{ rbacv1.Subject{ Kind: "ServiceAccount", - Name: sa.Name, + Name: sa, }, }, RoleRef: rbacv1.RoleRef{ @@ -430,11 +497,25 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou r.Log.Error(err, "unable to create sidecar rolebinding") return err } + } else { + if err != nil { + r.Log.Error(err, "something went very wrong here") + return err + } + rolebinding.Subjects = append(rolebinding.Subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Name: sa, + }) + r.Log.V(0).Info("Updating formol sidecar rolebinding with the new SA", "rolebinding", rolebinding) + if err = r.Update(r.Context, &rolebinding); err != nil { + r.Log.Error(err, "unable to update sidecar rolebinding") + return err + } } clusterRole := rbacv1.ClusterRole{} if err := r.Get(r.Context, client.ObjectKey{ Name: FORMOL_SIDECAR_CLUSTERROLE, - }, &clusterRole); err != nil && errors.IsNotFound(err) { + }, &clusterRole); errors.IsNotFound(err) { clusterRole = rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: FORMOL_SIDECAR_CLUSTERROLE, @@ -460,18 +541,18 @@ func (r *BackupConfigurationReconciler) createRBACSidecar(sa corev1.ServiceAccou } clusterRolebinding := rbacv1.ClusterRoleBinding{} if err := r.Get(r.Context, client.ObjectKey{ - Namespace: sa.Namespace, - Name: FORMOL_SIDECAR_CLUSTERROLE, - }, &clusterRolebinding); err != nil && errors.IsNotFound(err) { + Namespace: r.Namespace, + Name: FORMOL_SIDECAR_CLUSTERROLEBINDING, + }, &clusterRolebinding); errors.IsNotFound(err) { clusterRolebinding = rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: FORMOL_SIDECAR_CLUSTERROLE, + Name: FORMOL_SIDECAR_CLUSTERROLEBINDING, }, Subjects: []rbacv1.Subject{ rbacv1.Subject{ Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, + Name: sa, + Namespace: r.Namespace, }, }, RoleRef: rbacv1.RoleRef{ From d3d81a0e9d709d85d61ba08d6ce99fe3f64c76de Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 09:28:30 +0200 Subject: [PATCH 63/69] buildah instead of docker --- Dockerfile | 5 +++-- Makefile | 25 +------------------------ 2 files changed, 4 insertions(+), 26 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8f9cca1..4f23fb7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,8 @@ # Build the manager binary -FROM golang:1.19 as builder +FROM --platform=${BUILDPLATFORM} golang:1.19 as builder ARG TARGETOS ARG TARGETARCH +ARG TARGETPLATFORM WORKDIR /workspace # Copy the Go Modules manifests @@ -25,7 +26,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o ma # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM --platform=${TARGETPLATFORM} gcr.io/distroless/static:nonroot WORKDIR / COPY --from=builder /workspace/manager . USER 65532:65532 diff --git a/Makefile b/Makefile index af5b974..a3276e9 100644 --- a/Makefile +++ b/Makefile @@ -71,38 +71,15 @@ run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go PLATFORMS ?= linux/arm64,linux/amd64 -# If you wish built the manager image targeting other platforms you can use the --platform flag. -# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. -# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ -.PHONY: docker-build -docker-build: test ## Build docker image with the manager. - docker build -t ${IMG} . - .PHONY: docker-build-multiarch docker-build-multiarch: - buildah bud --manifest $(MANIFEST) --platform=$(PLATFORMS) --layers Dockerfile.multi + buildah bud --manifest $(MANIFEST) --platform=$(PLATFORMS) --layers . .PHONY: docker-push docker-push: ## Push docker image with the manager. buildah manifest push --all --rm $(MANIFEST) "docker://$(IMG)" #docker push ${IMG} -# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple -# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: -# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ -# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ -# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) -# To properly provided solutions that supports more than one platform you should use this option. -.PHONY: docker-buildx -docker-buildx: test ## Build and push docker image for the manager for cross-platform support - # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile - sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - - docker buildx create --name project-v3-builder - docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - - docker buildx rm project-v3-builder - rm Dockerfile.cross - ##@ Deployment ifndef ignore-not-found From 0175496bb8b26c652b9224a389a75c3e456c4550 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 10:39:20 +0200 Subject: [PATCH 64/69] delete rolebindings --- controllers/backupconfiguration_controller_helpers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 276309d..a55224c 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -290,7 +290,7 @@ func (r *BackupConfigurationReconciler) addSidecar(backupConf formolv1alpha1.Bac // Delete the sidecar role is there is no more sidecar container in the namespace func (r *BackupConfigurationReconciler) deleteRBAC() error { - for _, roleBindingName := range []string{FORMOL_BS_CREATOR_ROLEBINDING, FORMOL_SIDECAR_CLUSTERROLEBINDING} { + for _, roleBindingName := range []string{FORMOL_BS_CREATOR_ROLEBINDING, FORMOL_SIDECAR_ROLEBINDING} { roleBinding := rbacv1.RoleBinding{} if err := r.Get(r.Context, client.ObjectKey{ Namespace: r.Namespace, @@ -301,7 +301,7 @@ func (r *BackupConfigurationReconciler) deleteRBAC() error { } } } - for _, roleName := range []string{FORMOL_BS_CREATOR_ROLE, FORMOL_SIDECAR_CLUSTERROLE} { + for _, roleName := range []string{FORMOL_BS_CREATOR_ROLE, FORMOL_SIDECAR_ROLE} { role := rbacv1.Role{} if err := r.Get(r.Context, client.ObjectKey{ Namespace: r.Namespace, From 6678bc50e1f1b7bdd0745427a8d9f1ac897b4013 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 10:40:04 +0200 Subject: [PATCH 65/69] don't need to log BackupConfiguration not found --- controllers/backupsession_controller.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/controllers/backupsession_controller.go b/controllers/backupsession_controller.go index 7dc14cc..04b41c6 100644 --- a/controllers/backupsession_controller.go +++ b/controllers/backupsession_controller.go @@ -62,7 +62,9 @@ func (r *BackupSessionReconciler) Reconcile(ctx context.Context, req ctrl.Reques Namespace: backupSession.Spec.Ref.Namespace, Name: backupSession.Spec.Ref.Name, }, &backupConf); err != nil { - r.Log.Error(err, "unable to get BackupConfiguration") + if !errors.IsNotFound(err) { + r.Log.Error(err, "unable to get BackupConfiguration") + } return ctrl.Result{}, err } if !backupSession.ObjectMeta.DeletionTimestamp.IsZero() { From 605e1d2f3048d8109ba13b4bcdead444a6115625 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 11:30:33 +0200 Subject: [PATCH 66/69] added configmaps to the role --- controllers/backupconfiguration_controller_helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index a55224c..159dca9 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -445,7 +445,7 @@ func (r *BackupConfigurationReconciler) createSidecarRBAC(podSpec *corev1.PodSpe rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, APIGroups: []string{""}, - Resources: []string{"secrets", "persistentvolumeclaims"}, + Resources: []string{"secrets", "persistentvolumeclaims", "configmaps"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, From c8c53954d7fb60006e6f43e48a82a2f989c3d23d Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 12:00:31 +0200 Subject: [PATCH 67/69] API optional fields --- api/v1alpha1/backupconfiguration_types.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/v1alpha1/backupconfiguration_types.go b/api/v1alpha1/backupconfiguration_types.go index 02d777d..a00afa1 100644 --- a/api/v1alpha1/backupconfiguration_types.go +++ b/api/v1alpha1/backupconfiguration_types.go @@ -79,6 +79,7 @@ type TargetContainer struct { Paths []string `json:"paths,omitempty"` // +optional Steps []Step `json:"steps,omitempty"` + // +optional // +kubebuilder:default:=/formol-shared SharePath string `json:"sharePath"` // +optional @@ -90,6 +91,7 @@ type Target struct { TargetKind `json:"targetKind"` TargetName string `json:"targetName"` Containers []TargetContainer `json:"containers"` + // +optional // +kubebuilder:default:=2 Retry int `json:"retry"` // +optional From 4a5c5b34e12dabbd5300097355e5a6c97d9c68ff Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 12:37:50 +0200 Subject: [PATCH 68/69] More RBAC --- controllers/backupconfiguration_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller.go b/controllers/backupconfiguration_controller.go index 74f3048..e5d3547 100644 --- a/controllers/backupconfiguration_controller.go +++ b/controllers/backupconfiguration_controller.go @@ -49,7 +49,7 @@ type BackupConfigurationReconciler struct { // +kubebuilder:rbac:groups=core,resources=volumesnapshotclasses,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete From 03571f174a751300658c9a26f23e7ba028684855 Mon Sep 17 00:00:00 2001 From: Jean-Marc ANDRE Date: Thu, 20 Apr 2023 12:37:50 +0200 Subject: [PATCH 69/69] More RBAC --- controllers/backupconfiguration_controller_helpers.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/controllers/backupconfiguration_controller_helpers.go b/controllers/backupconfiguration_controller_helpers.go index 159dca9..9b365e4 100644 --- a/controllers/backupconfiguration_controller_helpers.go +++ b/controllers/backupconfiguration_controller_helpers.go @@ -445,7 +445,12 @@ func (r *BackupConfigurationReconciler) createSidecarRBAC(podSpec *corev1.PodSpe rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, APIGroups: []string{""}, - Resources: []string{"secrets", "persistentvolumeclaims", "configmaps"}, + Resources: []string{"persistentvolumeclaims"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{""}, + Resources: []string{"secrets", "configmaps"}, }, rbacv1.PolicyRule{ Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"},