diff --git a/cmd/operator/app/main.go b/cmd/operator/app/main.go index cce4c71fb..7948ee75e 100644 --- a/cmd/operator/app/main.go +++ b/cmd/operator/app/main.go @@ -18,12 +18,14 @@ import ( "context" "flag" "fmt" - log "github.com/altinity/clickhouse-operator/pkg/announcer" - "github.com/altinity/clickhouse-operator/pkg/version" "os" "os/signal" "sync" "syscall" + + log "github.com/altinity/clickhouse-operator/pkg/announcer" + "github.com/altinity/clickhouse-operator/pkg/chop" + "github.com/altinity/clickhouse-operator/pkg/version" ) // CLI parameter variables @@ -42,6 +44,10 @@ var ( // masterURL defines URL of kubernetes master to be used masterURL string + + // configmapsSecretRequest defines request for creating required defaults in configmaps and a secret to be used by the operator. + // This is for OCP deployment only + configmapsSecretRequest bool ) func init() { @@ -49,6 +55,7 @@ func init() { flag.BoolVar(&debugRequest, "debug", false, "Debug run") flag.StringVar(&chopConfigFile, "config", "", "Path to clickhouse-operator config file.") flag.StringVar(&masterURL, "master", "", "The address of custom Kubernetes API server. Makes sense if runs outside of the cluster and not being specified in kube config file only.") + flag.BoolVar(&configmapsSecretRequest, "configmaps-secret", false, "Add configmaps and secret to the namespace the operator resides") } // Run is an entry point of the application @@ -60,6 +67,21 @@ func Run() { os.Exit(0) } + if configmapsSecretRequest { + kubeClient, _, _ := chop.GetClientset(kubeConfigFile, masterURL) + err := chop.HandleConfigmapsCreation(kubeClient) + if err != nil { + fmt.Printf("Error creating ConfigMaps: %v\n", err) + os.Exit(1) + } + err = chop.HandleSecretCreation(kubeClient) + if err != nil { + fmt.Printf("Error creating Secret: %v\n", err) + os.Exit(2) + } + os.Exit(0) + } + log.S().P() defer log.E().P() diff --git a/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml b/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml index a3d350cf6..91046a841 100644 --- a/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml +++ b/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml @@ -1238,6 +1238,56 @@ spec: labels: app: clickhouse-operator spec: + volumes: + - name: etc-clickhouse-operator-folder + configMap: + name: etc-clickhouse-operator-files + optional: true + - name: etc-clickhouse-operator-confd-folder + configMap: + name: etc-clickhouse-operator-confd-files + optional: true + - name: etc-clickhouse-operator-configd-folder + configMap: + name: etc-clickhouse-operator-configd-files + optional: true + - name: etc-clickhouse-operator-templatesd-folder + configMap: + name: etc-clickhouse-operator-templatesd-files + optional: true + - name: etc-clickhouse-operator-usersd-folder + configMap: + name: etc-clickhouse-operator-usersd-files + optional: true + - name: tmp-for-log + emptyDir: {} + initContainers: + - name: add-configmaps-secret + image: docker.io/altinity/clickhouse-operator:${OPERATOR_VERSION} + command: + - /clickhouse-operator + - --configmaps-secret + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: tmp-for-log + mountPath: /tmp + imagePullPolicy: Always + securityContext: + privileged: false + readOnlyRootFilesystem: true + resources: + limits: + cpu: 100m + ephemeral-storage: 200Mi + memory: 128Mi + requests: + cpu: 100m + ephemeral-storage: 50Mi + memory: 128Mi containers: - env: - name: OPERATOR_POD_NODE_NAME @@ -1287,6 +1337,17 @@ spec: image: docker.io/altinity/clickhouse-operator:${OPERATOR_VERSION} imagePullPolicy: Always name: clickhouse-operator + volumeMounts: + - name: etc-clickhouse-operator-folder + mountPath: /etc/clickhouse-operator + - name: etc-clickhouse-operator-confd-folder + mountPath: /etc/clickhouse-operator/conf.d + - name: etc-clickhouse-operator-configd-folder + mountPath: /etc/clickhouse-operator/config.d + - name: etc-clickhouse-operator-templatesd-folder + mountPath: /etc/clickhouse-operator/templates.d + - name: etc-clickhouse-operator-usersd-folder + mountPath: /etc/clickhouse-operator/users.d - image: docker.io/altinity/metrics-exporter:${OPERATOR_VERSION} imagePullPolicy: Always name: metrics-exporter diff --git a/pkg/chop/configmaps_secret.go b/pkg/chop/configmaps_secret.go new file mode 100644 index 000000000..de87a26e6 --- /dev/null +++ b/pkg/chop/configmaps_secret.go @@ -0,0 +1,109 @@ +package chop + +import ( + "context" + "log" + "os" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kube "k8s.io/client-go/kubernetes" +) + +var cmNamePaths = map[string]string{ + "etc-clickhouse-operator-files": "/etc/clickhouse-operator/", + "etc-clickhouse-operator-confd-files": "/etc/clickhouse-operator/conf.d/", + "etc-clickhouse-operator-configd-files": "/etc/clickhouse-operator/config.d/", + "etc-clickhouse-operator-templatesd-files": "/etc/clickhouse-operator/templates.d/", + "etc-clickhouse-operator-usersd-files": "/etc/clickhouse-operator/users.d/", +} + +func HandleConfigmapsCreation(kubeClient *kube.Clientset) error { + var cm_namespace = os.Getenv("POD_NAMESPACE") + + for name, path := range cmNamePaths { + _, err := kubeClient.CoreV1().ConfigMaps(cm_namespace).Get(context.TODO(), name, metav1.GetOptions{}) + + if apiErrors.IsNotFound(err) { + cm := makeConfigmap(name, path) + _, err := kubeClient.CoreV1().ConfigMaps(cm.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + if err != nil { + log.Printf("Creating ConfigMap %s/%s failed with error %v", cm.Namespace, cm.Name, err) + return err + } + } else if err != nil { + log.Printf("Error probing ConfigMap %s/%s with error %v", cm_namespace, name, err) + } else { + log.Printf("ConfigMap %s/%s already exists", cm_namespace, name) + } + } + return nil +} + +func makeConfigmap(name string, path string) *corev1.ConfigMap { + configs := getConfigsFrom(path) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: os.Getenv("POD_NAMESPACE"), + }, + Data: configs, + } + return cm +} + +func getConfigsFrom(path string) map[string]string { + configs := map[string]string{} + + files, err := os.ReadDir(path) + if err != nil { + log.Fatal(err) + } + + for _, f := range files { + if !f.IsDir() && f.Name() != ".gitkeep.xml" { + dat, err := os.ReadFile(path + f.Name()) + if err != nil { + log.Fatal(err.Error()) + } + configs[f.Name()] = string(dat) + } + } + + return configs +} + +func HandleSecretCreation(kubeClient *kube.Clientset) error { + const secret_name = "clickhouse-operator" + var secret_namespace = os.Getenv("POD_NAMESPACE") + + _, err := kubeClient.CoreV1().Secrets(secret_namespace).Get(context.TODO(), secret_name, metav1.GetOptions{}) + + if apiErrors.IsNotFound(err) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: secret_namespace, + Name: secret_name, + }, + StringData: map[string]string{ + "username": "clickhouse_operator", + "password": "clickhouse_operator_password", + }, + Type: corev1.SecretTypeOpaque, + } + + _, err := kubeClient.CoreV1().Secrets(secret_namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + log.Printf("Creating Secret %s/%s failed with error %v", secret.Namespace, secret.Name, err) + return err + } + } else if err != nil { + log.Printf("Error probing Secret %s/%s with error %v", secret_namespace, secret_name, err) + return err + } else { + log.Printf("Secret %s/%s already exists", secret_namespace, secret_name) + } + return nil +}