diff --git a/Makefile b/Makefile
index 1bddfd3..3a190a4 100644
--- a/Makefile
+++ b/Makefile
@@ -151,7 +151,7 @@ module: ## Run go mod tidy->verify against go modules.
$(GO) mod tidy -compat=1.21
$(GO) mod verify
-TEST_PACKAGES ?= ./engines/... ./httpserver/... ./operations/... ./dcs/...
+TEST_PACKAGES ?= ./engines/... ./httpserver/... ./operations/...
OUTPUT_COVERAGE=-coverprofile cover.out
.PHONY: test
diff --git a/README.md b/README.md
index 44cc2b8..930847e 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@ dbctl is a service that provides command execution channels, originally found in
dbctl itself provides two running modes: daemon mode and temporary task mode. You can choose the appropriate mode based on your business scenario.
## Daemon Mode
-In this mode, dbctl runs as a daemon process and provides API services. This can be treated as one form of implementing the engines plugin. KubeBlocks does not impose restrictions on the form of engines plugins; they can run as sidecars, container daemons, or other forms. Currently, dbctl uses the localhost address to communicate with the database processes by default. Therefore, in this mode, it is recommended to deploy dbctl using the sidecar method, with the deployment template as follows:
+In this mode, dbctl runs as a daemon process and provides API services. This can be treated as one form of implementing the engine plugin. KubeBlocks does not impose restrictions on the form of engines plugins; they can run as sidecars, container daemons, or other forms. Currently, dbctl uses the localhost address to communicate with the database processes by default. Therefore, in this mode, it is recommended to deploy dbctl using the sidecar method, with the deployment template as follows:
```
apiVersion: v1
kind: Pod
@@ -14,7 +14,7 @@ spec:
image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/apecloud-mysql-server:8.0.30
...
- name: dbctl
- image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/dbctl:0.1.2
+ image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/dbctl:0.2.0
command:
- dbctl
- mysql
@@ -25,16 +25,6 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- - name: KB_POD_UID
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.uid
- - name: KB_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- name: KB_SERVICE_USER
valueFrom:
secretKeyRef:
@@ -45,18 +35,6 @@ spec:
secretKeyRef:
key: password
name: cluster-mysql-account-root
- - name: KB_CLUSTER_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.labels['app.kubernetes.io/instance']
- - name: KB_COMP_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.labels['apps.kubeblocks.io/component-name']
- - name: KB_ENGINE_TYPE
- value: mysql
```
When using dbctl in daemon mode, action definitions can be implemented by calling the dbctl API:
@@ -67,7 +45,7 @@ When using dbctl in daemon mode, action definitions can be implemented by callin
command:
- /bin/bash
- -c
- - curl -X GET -H 'Content-Type: application/json' 'http://127.0.0.1:3501/v1.0/getrole'
+ - curl -X GET -H 'Content-Type: application/json' 'http://127.0.0.1:5001/v1.0/getrole'
```
## Temporary Task
@@ -77,7 +55,7 @@ In this mode, dbctl completes the corresponding task and then exits immediately.
lifecycleActions:
roleProbe:
exec:
- image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/dbctl:0.1.2
+ image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/dbctl:0.2.0
command:
- dbctl
- mysql
diff --git a/cmd/dbctl/main.go b/cmd/dbctl/main.go
index 003b690..66f3e7e 100644
--- a/cmd/dbctl/main.go
+++ b/cmd/dbctl/main.go
@@ -1,5 +1,5 @@
/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
This file is part of KubeBlocks project
@@ -34,7 +34,6 @@ func init() {
}
func main() {
- // Set GOMAXPROCS
_, _ = maxprocs.Set()
ctl.Execute("", "")
diff --git a/config/dbctl/components/binding_apecloud_postgresql.yaml b/config/dbctl/components/binding_apecloud_postgresql.yaml
deleted file mode 100644
index 6856b42..0000000
--- a/config/dbctl/components/binding_apecloud_postgresql.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-name: apecloud-postgresql
-spec:
- version: v1
- metadata:
- - name: url # Required
- value: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_custom.yaml b/config/dbctl/components/binding_custom.yaml
deleted file mode 100644
index a4ccb3e..0000000
--- a/config/dbctl/components/binding_custom.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-name: custom
-spec:
- type: bindings.custom
- version: v1
- metadata: []
\ No newline at end of file
diff --git a/config/dbctl/components/binding_etcd.yaml b/config/dbctl/components/binding_etcd.yaml
deleted file mode 100644
index 8c013a5..0000000
--- a/config/dbctl/components/binding_etcd.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-name: etcd
-spec:
- version: v1
- metadata:
- - name: endpoint
- value: "127.0.0.1:2379"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_kafka.yaml b/config/dbctl/components/binding_kafka.yaml
deleted file mode 100644
index 5242360..0000000
--- a/config/dbctl/components/binding_kafka.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-name: kafka
-spec:
- version: v1
- metadata:
- - name: topics
- value: "topic1,topic2"
- - name: brokers
- value: "localhost:9092,localhost:9093"
- - name: publishTopic # Optional. Used for output bindings.
- value: "topic3"
- - name: authRequired # Required.
- value: "false"
- - name: initialOffset # Optional. Used for input bindings.
- value: "newest"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_mongodb.yaml b/config/dbctl/components/binding_mongodb.yaml
deleted file mode 100644
index b94ffd0..0000000
--- a/config/dbctl/components/binding_mongodb.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-name: mongodb
-spec:
- version: v1
- metadata:
- - name: host
- value: "127.0.0.1:27017"
- - name: params
- value: "?directConnection=true"
diff --git a/config/dbctl/components/binding_mysql.yaml b/config/dbctl/components/binding_mysql.yaml
deleted file mode 100644
index dd0481d..0000000
--- a/config/dbctl/components/binding_mysql.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-name: mysql
-spec:
- version: v1
- metadata:
- - name: url # Required, define DB connection in DSN format
- value: "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true"
- - name: maxOpenConns
- value: "5"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_oceanbase.yaml b/config/dbctl/components/binding_oceanbase.yaml
deleted file mode 100644
index a9067ab..0000000
--- a/config/dbctl/components/binding_oceanbase.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: oceanbase
-spec:
- version: v1
- metadata:
- - name: url # Required, define DB connection in DSN format
- value: "root:@tcp(127.0.0.1:2881)/mysql?multiStatements=true"
- - name: maxOpenConns
- value: "5"
- - name: username
- value: "root"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_polarx.yaml b/config/dbctl/components/binding_polarx.yaml
deleted file mode 100644
index c9b2372..0000000
--- a/config/dbctl/components/binding_polarx.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: polardbx
-spec:
- type: bindings.polardbx
- version: v1
- metadata:
- - name: url # Required, define DB connection in DSN format
- value: "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true"
- - name: maxOpenConns
- value: "5"
diff --git a/config/dbctl/components/binding_postgresql.yaml b/config/dbctl/components/binding_postgresql.yaml
deleted file mode 100644
index fff4d2b..0000000
--- a/config/dbctl/components/binding_postgresql.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-name: postgresql
-spec:
- version: v1
- metadata:
- - name: url # Required
- value: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10"
\ No newline at end of file
diff --git a/config/dbctl/components/binding_redis.yaml b/config/dbctl/components/binding_redis.yaml
deleted file mode 100644
index 2db926f..0000000
--- a/config/dbctl/components/binding_redis.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-name: redis
-spec:
- version: v1
- metadata:
- - name: redisHost
- value: 127.0.0.1:6379
- - name: enableTLS
- value: false
\ No newline at end of file
diff --git a/config/dbctl/components/middleware_probe.yaml b/config/dbctl/components/middleware_probe.yaml
deleted file mode 100644
index a67435f..0000000
--- a/config/dbctl/components/middleware_probe.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-name: probe
-spec:
- version: v1
diff --git a/config/dbctl/config.yaml b/config/dbctl/config.yaml
deleted file mode 100644
index b919dc6..0000000
--- a/config/dbctl/config.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: dapr.io/v1alpha1
-kind: Configuration
-metadata:
- name: pipeline
- namespace: default
-spec:
- httpPipeline:
- handlers:
- - name: probe
- type: middleware.http.probe
diff --git a/constant/const.go b/constant/const.go
deleted file mode 100644
index c7d62ac..0000000
--- a/constant/const.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package constant
-
-const (
- // AppInstanceLabelKey refer cluster.Name
- AppInstanceLabelKey = "app.kubernetes.io/instance"
- AppManagedByLabelKey = "app.kubernetes.io/managed-by"
- RoleLabelKey = "kubeblocks.io/role" // RoleLabelKey consensusSet and replicationSet role label key
- KBAppComponentLabelKey = "apps.kubeblocks.io/component-name"
-)
-
-const (
- LorryHTTPPortName = "lorry-http-port"
-)
-
-const (
- KubernetesClusterDomainEnv = "KUBERNETES_CLUSTER_DOMAIN"
- DefaultDNSDomain = "cluster.local"
-)
diff --git a/constant/env.go b/constant/env.go
index 01d09aa..60c7fff 100644
--- a/constant/env.go
+++ b/constant/env.go
@@ -25,12 +25,7 @@ import (
"github.com/spf13/viper"
)
-// Lorry
const (
- KBEnvWorkloadType = "KB_WORKLOAD_TYPE"
- KBEnvBuiltinHandler = "KB_BUILTIN_HANDLER"
- KBEnvActionCommands = "KB_ACTION_COMMANDS"
- KBEnvEngineType = "KB_ENGINE_TYPE"
KBEnvServiceUser = "KB_SERVICE_USER"
KBEnvServicePassword = "KB_SERVICE_PASSWORD"
// KBEnvServiceRoles defines the Roles configured in the cluster definition that are visible to users.
@@ -38,44 +33,19 @@ const (
// KBEnvServicePort defines the port of the DB service
KBEnvServicePort = "KB_SERVICE_PORT"
-
- // KBEnvTTL controls the lease expiration time in DCS. If the leader fails to renew its lease within the TTL duration, it will lose the leader role, allowing other replicas to take over.
- KBEnvTTL = "KB_TTL"
-
- // KBEnvMaxLag defines maximum replication lag permitted when performing a switchover.
- KBEnvMaxLag = "KB_MAX_LAG"
-
- // KBEnvEnableHA Whether to enable high availability, true by default.
- KBEnvEnableHA = "KB_ENABLE_HA"
-
- // KBEnvRsmRoleUpdateMechanism defines the method to send events: DirectAPIServerEventUpdate(through lorry service), ReadinessProbeEventUpdate(through kubelet service)
- KBEnvRsmRoleUpdateMechanism = "KB_RSM_ROLE_UPDATE_MECHANISM"
- KBEnvRoleProbeTimeout = "KB_RSM_ROLE_PROBE_TIMEOUT"
- KBEnvRoleProbePeriod = "KB_RSM_ROLE_PROBE_PERIOD"
)
// new envs for KB 1.0
const (
- EnvNamespace = "MY_NAMESPACE"
EnvPodName = "MY_POD_NAME"
- EnvPodIP = "MY_POD_IP"
- EnvPodUID = "MY_POD_UID"
- EnvClusterName = "MY_CLUSTER_NAME"
- EnvComponentName = "MY_COMP_NAME"
EnvClusterCompName = "MY_CLUSTER_COMP_NAME"
)
// old envs for KB 0.9
const (
KBEnvNamespace = "KB_NAMESPACE"
- KBEnvClusterName = "KB_CLUSTER_NAME"
KBEnvClusterCompName = "KB_CLUSTER_COMP_NAME"
- KBEnvCompName = "KB_COMP_NAME"
KBEnvPodName = "KB_POD_NAME"
- KBEnvPodUID = "KB_POD_UID"
- KBEnvPodIP = "KB_POD_IP"
- KBEnvPodFQDN = "KB_POD_FQDN"
- KBEnvNodeName = "KB_NODENAME"
)
func GetPodName() string {
@@ -91,61 +61,6 @@ func GetPodName() string {
}
}
-func GetPodIP() string {
- switch {
- case viper.IsSet(KBEnvPodIP):
- return viper.GetString(KBEnvPodIP)
- case viper.IsSet(EnvPodIP):
- return viper.GetString(EnvPodIP)
- default:
- return ""
- }
-}
-
-func GetPodUID() string {
- switch {
- case viper.IsSet(KBEnvPodUID):
- return viper.GetString(KBEnvPodUID)
- case viper.IsSet(EnvPodUID):
- return viper.GetString(EnvPodUID)
- default:
- return ""
- }
-}
-
-func GetNamespace() string {
- switch {
- case viper.IsSet(KBEnvNamespace):
- return viper.GetString(KBEnvNamespace)
- case viper.IsSet(EnvNamespace):
- return viper.GetString(EnvNamespace)
- default:
- return ""
- }
-}
-
-func GetClusterName() string {
- switch {
- case viper.IsSet(KBEnvClusterName):
- return viper.GetString(KBEnvClusterName)
- case viper.IsSet(EnvClusterName):
- return viper.GetString(EnvClusterName)
- default:
- return ""
- }
-}
-
-func GetComponentName() string {
- switch {
- case viper.IsSet(KBEnvCompName):
- return viper.GetString(KBEnvCompName)
- case viper.IsSet(EnvComponentName):
- return viper.GetString(EnvComponentName)
- default:
- return ""
- }
-}
-
func GetClusterCompName() string {
switch {
case viper.IsSet(KBEnvClusterCompName):
@@ -156,3 +71,8 @@ func GetClusterCompName() string {
return ""
}
}
+
+const (
+ ConfigKeyUserName = "username"
+ ConfigKeyPassword = "password"
+)
diff --git a/constant/lorry.go b/constant/lorry.go
deleted file mode 100644
index 22d33eb..0000000
--- a/constant/lorry.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package constant
-
-const (
- RoleProbeAction = "roleProbe"
-)
diff --git a/ctl/createuser.go b/ctl/createuser.go
deleted file mode 100644
index e6f2927..0000000
--- a/ctl/createuser.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type CreateUserOptions struct {
- OptionsBase
- userName string
- password string
- roleName string
-}
-
-func (options *CreateUserOptions) Validate() error {
- parameters := map[string]any{
- "userName": options.userName,
- "password": options.password,
- }
- if options.roleName != "" {
- parameters["roleName"] = options.roleName
- }
-
- req := &operations.OpsRequest{
- Parameters: parameters,
- }
- options.Request = req
- return options.Operation.PreCheck(context.Background(), req)
-}
-
-func (options *CreateUserOptions) Run() error {
- createUser, ok := options.Operation.(*user.CreateUser)
- if !ok {
- return errors.New("createUser operation not found")
- }
-
- _, err := createUser.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing createUser failed")
- }
- return nil
-}
-
-var createUserOptions = &CreateUserOptions{
- OptionsBase: OptionsBase{
- Action: string(util.CreateUserOp),
- },
-}
-
-var CreateUserCmd = &cobra.Command{
- Use: "createuser",
- Short: "create user.",
- Example: `
-dbctl database createuser --username xxx --password xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(createUserOptions),
-}
-
-func init() {
- CreateUserCmd.Flags().StringVarP(&createUserOptions.userName, "username", "", "", "The name of user to create")
- CreateUserCmd.Flags().StringVarP(&createUserOptions.password, "password", "", "", "The password of user to create")
- CreateUserCmd.Flags().StringVarP(&createUserOptions.roleName, "rolename", "", "", "The role of user to create")
- CreateUserCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(CreateUserCmd)
-}
diff --git a/ctl/ctr.go b/ctl/ctr.go
index 7eb49b4..a4d0e6e 100644
--- a/ctl/ctr.go
+++ b/ctl/ctr.go
@@ -29,19 +29,14 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
- "go.uber.org/zap"
"k8s.io/klog/v2"
- ctrl "sigs.k8s.io/controller-runtime"
kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
const cliVersionTemplateString = "CLI version: %s \nRuntime version: %s\n"
-var configDir string
-var disableDNSChecker bool
var opts = kzap.Options{
Development: true,
- Level: zap.NewAtomicLevelAt(zap.DPanicLevel),
}
var RootCmd = &cobra.Command{
@@ -58,22 +53,6 @@ _ ______ _______ ______ _ _______ _______ _
|_/ \/(_______)|/ \___/ (_______/|/ \___/ (_______/(_______)(_______/|_/ \/\_______) (______/ |/ \___/ (_______/ )_( (_______/
===============================
dbctl command line interface`,
- PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
- err := viper.BindPFlags(pflag.CommandLine)
- if err != nil {
- return errors.Wrap(err, "fatal error viper bindPFlags")
- }
-
- // Initialize logger
- kopts := []kzap.Opts{kzap.UseFlagOptions(&opts)}
- if strings.EqualFold("debug", viper.GetString("zap-log-level")) {
- kopts = append(kopts, kzap.RawZapOpts(zap.AddCaller()))
- }
- ctrl.SetLogger(kzap.New(kopts...))
-
- return nil
- },
-
Run: func(cmd *cobra.Command, _ []string) {
if versionFlag {
printVersion()
@@ -89,10 +68,9 @@ type dbctlVersion struct {
}
var (
- cliVersion string
- versionFlag bool
- dbctlVer dbctlVersion
- dbctlRuntimePath string
+ cliVersion string
+ versionFlag bool
+ dbctlVer dbctlVersion
)
// Execute adds all child commands to the root command.
@@ -136,19 +114,13 @@ func init() {
klog.InitFlags(flag.CommandLine)
opts.BindFlags(flag.CommandLine)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
- RootCmd.PersistentFlags().StringVar(&configDir, "config-path", "/tools/config/dbctl/components/", "dbctl default config directory for builtin type")
- RootCmd.PersistentFlags().BoolVar(&disableDNSChecker, "disable-dns-checker", false, "disable dns checker, for test&dev")
- RootCmd.PersistentFlags().StringVarP(&dbctlRuntimePath, "tools-dir", "", "/tools/", "The directory of tools binaries")
- RootCmd.PersistentFlags().AddFlagSet(pflag.CommandLine)
+ err := viper.BindPFlags(pflag.CommandLine)
+ if err != nil {
+ panic(errors.Wrap(err, "fatal error viper bindPFlags"))
+ }
}
// GetRuntimeVersion returns the version for the local dbctl runtime.
func GetRuntimeVersion() string {
- // dbctlCMD := filepath.Join(dbctlRuntimePath, "dbctl")
-
- // out, err := exec.Command(dbctlCMD, "--version").Output()
- // if err != nil {
- // return "n/a\n"
- // }
- return string("v0.1.0")
+ return "v0.1.0"
}
diff --git a/ctl/databases.go b/ctl/databases.go
index 019e297..be9939d 100644
--- a/ctl/databases.go
+++ b/ctl/databases.go
@@ -26,16 +26,11 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
- "github.com/spf13/viper"
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines/models"
"github.com/apecloud/dbctl/engines/register"
)
-const ()
-
var DatabaseCmd = &cobra.Command{
Use: "database",
Aliases: models.GetEngineTypeListStr(),
@@ -46,7 +41,6 @@ dbctl mongodb createuser --username root --password password
Args: cobra.MinimumNArgs(0),
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
commands := stripFlags(os.Args[1:], cmd)
- // fmt.Println("commands: ", commands)
if len(commands) < 1 {
return errors.New("please specify a database subcommand")
}
@@ -54,12 +48,9 @@ dbctl mongodb createuser --username root --password password
if dbType == "database" {
return errors.New("please specify a database type supported by dbctl, the valid types are: " + strings.Join(models.GetEngineTypeListStr(), ", "))
}
- viper.SetDefault(constant.KBEnvEngineType, commands[0])
- // Initialize DCS (Distributed Control System)
- _ = dcs.InitStore()
// Initialize DB Manager
- err := register.InitDBManager(configDir)
+ err := register.InitDBManager(dbType)
if err != nil {
return errors.Wrap(err, "DB manager initialize failed")
}
@@ -100,7 +91,7 @@ func stripFlags(args []string, c *cobra.Command) []string {
return args
}
- commands := []string{}
+ var commands []string
flags := c.Flags()
Loop:
diff --git a/ctl/deleteuser.go b/ctl/deleteuser.go
deleted file mode 100644
index d6d96a2..0000000
--- a/ctl/deleteuser.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type DeleteUserOptions struct {
- OptionsBase
- userName string
-}
-
-func (options *DeleteUserOptions) Validate() error {
- parameters := map[string]any{
- "userName": options.userName,
- }
- req := &operations.OpsRequest{
- Parameters: parameters,
- }
- options.Request = req
- return options.Operation.PreCheck(context.Background(), req)
-}
-
-func (options *DeleteUserOptions) Run() error {
- deleteUser, ok := options.Operation.(*user.DeleteUser)
- if !ok {
- return errors.New("createUser operation not found")
- }
-
- _, err := deleteUser.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing deleteUser failed")
- }
- return nil
-}
-
-var deleteUserOptions = &DeleteUserOptions{
- OptionsBase: OptionsBase{
- Action: string(util.DeleteUserOp),
- },
-}
-
-var DeleteUserCmd = &cobra.Command{
- Use: "deleteuser",
- Short: "delete user.",
- Example: `
-dbctl database deleteuser --username xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(deleteUserOptions),
-}
-
-func init() {
- DeleteUserCmd.Flags().StringVarP(&deleteUserOptions.userName, "username", "", "", "The name of user to delete")
- DeleteUserCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(DeleteUserCmd)
-}
diff --git a/ctl/describeuser.go b/ctl/describeuser.go
deleted file mode 100644
index 7d6a65f..0000000
--- a/ctl/describeuser.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type DescribeUserOptions struct {
- OptionsBase
- userName string
-}
-
-func (options *DescribeUserOptions) Validate() error {
- parameters := map[string]any{
- "userName": options.userName,
- }
-
- req := &operations.OpsRequest{
- Parameters: parameters,
- }
- options.Request = req
- return options.Operation.PreCheck(context.Background(), req)
-}
-
-func (options *DescribeUserOptions) Run() error {
- describeUser, ok := options.Operation.(*user.DescribeUser)
- if !ok {
- return errors.New("describeUser operation not found")
- }
-
- _, err := describeUser.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing describeUser failed")
- }
- return nil
-}
-
-var describeUserOptions = &DescribeUserOptions{
- OptionsBase: OptionsBase{
- Action: string(util.DescribeUserOp),
- },
-}
-
-var DescribeUserCmd = &cobra.Command{
- Use: "describeuser",
- Short: "describe user.",
- Example: `
-dbctl database describeuser --username xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(describeUserOptions),
-}
-
-func init() {
- DescribeUserCmd.Flags().StringVarP(&describeUserOptions.userName, "username", "", "", "The name of user to describe")
- DescribeUserCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(DescribeUserCmd)
-}
diff --git a/ctl/grantrole.go b/ctl/grantrole.go
deleted file mode 100644
index 019347b..0000000
--- a/ctl/grantrole.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type GrantUserRoleOptions struct {
- OptionsBase
- userName string
- roleName string
-}
-
-func (options *GrantUserRoleOptions) Validate() error {
- parameters := map[string]any{
- "userName": options.userName,
- "roleName": options.roleName,
- }
-
- req := &operations.OpsRequest{
- Parameters: parameters,
- }
- options.Request = req
- return options.Operation.PreCheck(context.Background(), req)
-}
-
-func (options *GrantUserRoleOptions) Run() error {
- grantUser, ok := options.Operation.(*user.GrantRole)
- if !ok {
- return errors.New("grantUser operation not found")
- }
-
- _, err := grantUser.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing grantUser failed")
- }
- return nil
-}
-
-var grantUserRoleOptions = &GrantUserRoleOptions{
- OptionsBase: OptionsBase{
- Action: string(util.GrantUserRoleOp),
- },
-}
-
-var GrantUserRoleCmd = &cobra.Command{
- Use: "grant-role",
- Short: "grant user role.",
- Example: `
-dbctl database grant-role --username xxx --rolename xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(grantUserRoleOptions),
-}
-
-func init() {
- GrantUserRoleCmd.Flags().StringVarP(&grantUserRoleOptions.userName, "username", "", "", "The name of user to grant")
- GrantUserRoleCmd.Flags().StringVarP(&grantUserRoleOptions.roleName, "rolename", "", "", "The name of role to grant")
- GrantUserRoleCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(GrantUserRoleCmd)
-}
diff --git a/ctl/join.go b/ctl/join.go
deleted file mode 100644
index 1ea3fd2..0000000
--- a/ctl/join.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations/replica"
- "github.com/apecloud/dbctl/util"
-)
-
-type JoinOptions struct {
- OptionsBase
-}
-
-func (options *JoinOptions) Run() error {
- join, ok := options.Operation.(*replica.Join)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- _, err := join.Do(context.Background(), nil)
- if err != nil {
- return errors.Wrap(err, "executing join failed")
- }
- return nil
-}
-
-var joinOptions = &JoinOptions{
- OptionsBase: OptionsBase{
- Action: string(util.JoinMemberOperation),
- },
-}
-
-var JoinCmd = &cobra.Command{
- Use: "joinmember",
- Short: "execute a join member request.",
- Example: `
-dbctl database joinmember
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(joinOptions),
-}
-
-func init() {
- JoinCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(JoinCmd)
-}
diff --git a/ctl/leave.go b/ctl/leave.go
deleted file mode 100644
index 4d4a738..0000000
--- a/ctl/leave.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations/replica"
- "github.com/apecloud/dbctl/util"
-)
-
-type LeaveOptions struct {
- OptionsBase
-}
-
-func (options *LeaveOptions) Run() error {
- leave, ok := options.Operation.(*replica.Leave)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- _, err := leave.Do(context.Background(), nil)
- if err != nil {
- return errors.Wrap(err, "executing leave failed")
- }
- return nil
-}
-
-var leaveOptions = &LeaveOptions{
- OptionsBase: OptionsBase{
- Action: string(util.LeaveMemberOperation),
- },
-}
-
-var LeaveCmd = &cobra.Command{
- Use: "leavemember",
- Short: "execute a leave member request.",
- Example: `
-dbctl database leavemember
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(leaveOptions),
-}
-
-func init() {
- LeaveCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(LeaveCmd)
-}
diff --git a/ctl/listsystemaccounts.go b/ctl/listsystemaccounts.go
deleted file mode 100644
index 9aab097..0000000
--- a/ctl/listsystemaccounts.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
- "fmt"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type ListSystemAccountsOptions struct {
- OptionsBase
-}
-
-func (options *ListSystemAccountsOptions) Validate() error {
- return options.Operation.PreCheck(context.Background(), nil)
-}
-
-func (options *ListSystemAccountsOptions) Run() error {
- listSystemAccounts, ok := options.Operation.(*user.ListSystemAccounts)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- users, err := listSystemAccounts.DBManager.ListSystemAccounts(context.Background())
- if err != nil {
- return errors.Wrap(err, "executing listSystemAccounts failed")
- }
- fmt.Printf("list users:\n")
- for _, u := range users {
- fmt.Println("-------------------------")
- fmt.Printf("name: %s\n", u.UserName)
- fmt.Printf("role: %s\n", u.RoleName)
- }
- return nil
-}
-
-var listSystemAccountsOptions = &ListSystemAccountsOptions{
- OptionsBase: OptionsBase{
- Action: string(util.ListSystemAccountsOp),
- },
-}
-
-var ListSystemAccountsCmd = &cobra.Command{
- Use: "listsystemaccounts",
- Short: "list system accounts.",
- Example: `
-dbctl database listsystemaccounts
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(listSystemAccountsOptions),
-}
-
-func init() {
- ListSystemAccountsCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(ListSystemAccountsCmd)
-}
diff --git a/ctl/listusers.go b/ctl/listusers.go
deleted file mode 100644
index a6a1429..0000000
--- a/ctl/listusers.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
- "fmt"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-var (
- lorryAddr string
-)
-
-type ListUsersOptions struct {
- OptionsBase
-}
-
-func (options *ListUsersOptions) Validate() error {
- return options.Operation.PreCheck(context.Background(), nil)
-}
-
-func (options *ListUsersOptions) Run() error {
- listUsers, ok := options.Operation.(*user.ListUsers)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- users, err := listUsers.DBManager.ListUsers(context.Background())
- if err != nil {
- return errors.Wrap(err, "executing listUsers failed")
- }
- fmt.Printf("list users:\n")
- for _, u := range users {
- fmt.Println("-------------------------")
- fmt.Printf("name: %s\n", u.UserName)
- fmt.Printf("role: %s\n", u.RoleName)
- }
- return nil
-}
-
-var listUsersOptions = &ListUsersOptions{
- OptionsBase: OptionsBase{
- Action: string(util.ListUsersOp),
- },
-}
-
-var ListUsersCmd = &cobra.Command{
- Use: "listusers",
- Short: "list normal users.",
- Example: `
-dbctl database listusers
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(listUsersOptions),
-}
-
-func init() {
- ListUsersCmd.Flags().StringVarP(&lorryAddr, "lorry-addr", "", "http://localhost:3501/v1.0/", "The addr of lorry to request")
- ListUsersCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(ListUsersCmd)
-}
diff --git a/ctl/revokerole.go b/ctl/revokerole.go
deleted file mode 100644
index 85f22e9..0000000
--- a/ctl/revokerole.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations/user"
- "github.com/apecloud/dbctl/util"
-)
-
-type RevokeUserRoleOptions struct {
- OptionsBase
- userName string
- roleName string
-}
-
-func (options *RevokeUserRoleOptions) Validate() error {
- return options.Operation.PreCheck(context.Background(), nil)
-}
-
-func (options *RevokeUserRoleOptions) Run() error {
- revokeUserRole, ok := options.Operation.(*user.RevokeRole)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- _, err := revokeUserRole.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing revokeUserRole failed")
- }
- return nil
-}
-
-var revokeUserRoleOptions = &RevokeUserRoleOptions{
- OptionsBase: OptionsBase{
- Action: string(util.RevokeUserRoleOp),
- },
-}
-
-var RevokeUserRoleCmd = &cobra.Command{
- Use: "revoke-role",
- Short: "revoke user role.",
- Example: `
-dbctl database revoke-role --username xxx --rolename xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(revokeUserRoleOptions),
-}
-
-func init() {
- RevokeUserRoleCmd.Flags().StringVarP(&revokeUserRoleOptions.userName, "username", "", "", "The name of user to revoke")
- RevokeUserRoleCmd.Flags().StringVarP(&revokeUserRoleOptions.roleName, "rolename", "", "", "The name of role to revoke")
- RevokeUserRoleCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(RevokeUserRoleCmd)
-}
diff --git a/ctl/service.go b/ctl/service.go
index 3136131..c866e60 100644
--- a/ctl/service.go
+++ b/ctl/service.go
@@ -22,10 +22,15 @@ package ctl
import (
"os"
"os/signal"
+ "strings"
"syscall"
"github.com/pkg/errors"
"github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+ ctrl "sigs.k8s.io/controller-runtime"
+ kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/apecloud/dbctl/httpserver"
opsregister "github.com/apecloud/dbctl/operations/register"
@@ -39,6 +44,13 @@ dbctl service
`,
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
+ // Initialize logger
+ kOpts := []kzap.Opts{kzap.UseFlagOptions(&opts)}
+ if strings.EqualFold("debug", viper.GetString("zap-log-level")) {
+ kOpts = append(kOpts, kzap.RawZapOpts(zap.AddCaller()))
+ }
+ ctrl.SetLogger(kzap.New(kOpts...))
+
// start HTTP Server
ops := opsregister.Operations()
httpServer := httpserver.NewServer(ops)
diff --git a/ctl/switchover.go b/ctl/switchover.go
deleted file mode 100644
index d4d554d..0000000
--- a/ctl/switchover.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/operations/replica"
- "github.com/apecloud/dbctl/util"
-)
-
-type SwitchOptions struct {
- OptionsBase
- primary string
- candidate string
- force bool
-}
-
-func (options *SwitchOptions) Validate() error {
- params := map[string]interface{}{}
- if options.primary != "" {
- params["primary"] = options.primary
- }
- if options.candidate != "" {
- params["candidate"] = options.candidate
- }
- req := &operations.OpsRequest{
- Parameters: params,
- }
- options.Request = req
- return options.Operation.PreCheck(context.Background(), req)
-}
-
-func (options *SwitchOptions) Run() error {
- switchover, ok := options.Operation.(*replica.Switchover)
- if !ok {
- return errors.Errorf("%s operation not found", options.Action)
- }
-
- _, err := switchover.Do(context.Background(), options.Request)
- if err != nil {
- return errors.Wrap(err, "executing switchover failed")
- }
- return nil
-}
-
-var switchoverOptions = &SwitchOptions{
- OptionsBase: OptionsBase{
- Action: string(util.SwitchoverOperation),
- },
-}
-
-var SwitchCmd = &cobra.Command{
- Use: "switchover",
- Short: "execute a switchover request.",
- Example: `
-dbctl database switchover --primary xxx --candidate xxx
- `,
- Args: cobra.MinimumNArgs(0),
- Run: CmdRunner(switchoverOptions),
-}
-
-func init() {
- SwitchCmd.Flags().StringVarP(&switchoverOptions.primary, "primary", "p", "", "The primary pod name")
- SwitchCmd.Flags().StringVarP(&switchoverOptions.candidate, "candidate", "c", "", "The candidate pod name")
- SwitchCmd.Flags().BoolVarP(&switchoverOptions.force, "force", "f", false, "force to swithover if failed")
- SwitchCmd.Flags().BoolP("help", "h", false, "Print this help message")
-
- DatabaseCmd.AddCommand(SwitchCmd)
-}
diff --git a/ctl/util.go b/ctl/util.go
deleted file mode 100644
index 3d273a1..0000000
--- a/ctl/util.go
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package ctl
-
-import (
- "fmt"
- "os"
- "time"
-)
-
-func Printf(format string, args ...any) {
- fmt.Fprintf(os.Stdout, "["+time.Now().Format("2006-01-02T15:04:05 -07:00:00")+"] "+format, args...)
-}
diff --git a/dcs/dcs.go b/dcs/dcs.go
deleted file mode 100644
index 1635aed..0000000
--- a/dcs/dcs.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package dcs
-
-import (
- "github.com/spf13/viper"
-
- "github.com/apecloud/dbctl/constant"
-)
-
-type DCS interface {
- Initialize() error
-
- // cluster manage functions
- GetClusterName() string
- GetCluster() (*Cluster, error)
- GetClusterFromCache() *Cluster
- ResetCluster()
- DeleteCluster()
-
- // cluster scole ha config
- GetHaConfig() (*HaConfig, error)
- UpdateHaConfig() error
-
- // member manager functions
- GetMembers() ([]Member, error)
- AddCurrentMember() error
-
- // manual switchover
- GetSwitchover() (*Switchover, error)
- CreateSwitchover(string, string) error
- DeleteSwitchover() error
-
- // cluster scope leader lock
- AttemptAcquireLease() error
- CreateLease() error
- IsLeaseExist() (bool, error)
- HasLease() bool
- ReleaseLease() error
- UpdateLease() error
-
- GetLeader() (*Leader, error)
-}
-
-var dcs DCS
-
-func init() {
- viper.SetDefault(constant.KBEnvTTL, 15)
- viper.SetDefault(constant.KBEnvMaxLag, 10)
- viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain)
-}
-
-func SetStore(d DCS) {
- dcs = d
-}
-
-func GetStore() DCS {
- return dcs
-}
-
-func InitStore() error {
- store, err := NewKubernetesStore()
- if err != nil {
- return err
- }
- dcs = store
- return nil
-}
diff --git a/dcs/dcs_mock.go b/dcs/dcs_mock.go
deleted file mode 100644
index 08eea83..0000000
--- a/dcs/dcs_mock.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// /*
-// Copyright (C) 2022-2024 ApeCloud Co., Ltd
-//
-// This file is part of KubeBlocks project
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-// */
-//
-//
-
-// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/apecloud/dbctl/dcs (interfaces: DCS)
-
-// Package dcs is a generated GoMock package.
-package dcs
-
-import (
- reflect "reflect"
-
- gomock "github.com/golang/mock/gomock"
-)
-
-// MockDCS is a mock of DCS interface.
-type MockDCS struct {
- ctrl *gomock.Controller
- recorder *MockDCSMockRecorder
-}
-
-// MockDCSMockRecorder is the mock recorder for MockDCS.
-type MockDCSMockRecorder struct {
- mock *MockDCS
-}
-
-// NewMockDCS creates a new mock instance.
-func NewMockDCS(ctrl *gomock.Controller) *MockDCS {
- mock := &MockDCS{ctrl: ctrl}
- mock.recorder = &MockDCSMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockDCS) EXPECT() *MockDCSMockRecorder {
- return m.recorder
-}
-
-// AddCurrentMember mocks base method.
-func (m *MockDCS) AddCurrentMember() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AddCurrentMember")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// AddCurrentMember indicates an expected call of AddCurrentMember.
-func (mr *MockDCSMockRecorder) AddCurrentMember() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCurrentMember", reflect.TypeOf((*MockDCS)(nil).AddCurrentMember))
-}
-
-// AttemptAcquireLease mocks base method.
-func (m *MockDCS) AttemptAcquireLease() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AttemptAcquireLease")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// AttemptAcquireLease indicates an expected call of AttemptAcquireLease.
-func (mr *MockDCSMockRecorder) AttemptAcquireLease() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttemptAcquireLease", reflect.TypeOf((*MockDCS)(nil).AttemptAcquireLease))
-}
-
-// CreateLease mocks base method.
-func (m *MockDCS) CreateLease() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateLease")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CreateLease indicates an expected call of CreateLease.
-func (mr *MockDCSMockRecorder) CreateLease() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLease", reflect.TypeOf((*MockDCS)(nil).CreateLease))
-}
-
-// CreateSwitchover mocks base method.
-func (m *MockDCS) CreateSwitchover(arg0, arg1 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateSwitchover", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CreateSwitchover indicates an expected call of CreateSwitchover.
-func (mr *MockDCSMockRecorder) CreateSwitchover(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSwitchover", reflect.TypeOf((*MockDCS)(nil).CreateSwitchover), arg0, arg1)
-}
-
-// DeleteCluster mocks base method.
-func (m *MockDCS) DeleteCluster() {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "DeleteCluster")
-}
-
-// DeleteCluster indicates an expected call of DeleteCluster.
-func (mr *MockDCSMockRecorder) DeleteCluster() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockDCS)(nil).DeleteCluster))
-}
-
-// DeleteSwitchover mocks base method.
-func (m *MockDCS) DeleteSwitchover() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteSwitchover")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeleteSwitchover indicates an expected call of DeleteSwitchover.
-func (mr *MockDCSMockRecorder) DeleteSwitchover() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSwitchover", reflect.TypeOf((*MockDCS)(nil).DeleteSwitchover))
-}
-
-// GetCluster mocks base method.
-func (m *MockDCS) GetCluster() (*Cluster, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCluster")
- ret0, _ := ret[0].(*Cluster)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetCluster indicates an expected call of GetCluster.
-func (mr *MockDCSMockRecorder) GetCluster() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockDCS)(nil).GetCluster))
-}
-
-// GetClusterFromCache mocks base method.
-func (m *MockDCS) GetClusterFromCache() *Cluster {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetClusterFromCache")
- ret0, _ := ret[0].(*Cluster)
- return ret0
-}
-
-// GetClusterFromCache indicates an expected call of GetClusterFromCache.
-func (mr *MockDCSMockRecorder) GetClusterFromCache() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterFromCache", reflect.TypeOf((*MockDCS)(nil).GetClusterFromCache))
-}
-
-// GetClusterName mocks base method.
-func (m *MockDCS) GetClusterName() string {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetClusterName")
- ret0, _ := ret[0].(string)
- return ret0
-}
-
-// GetClusterName indicates an expected call of GetClusterName.
-func (mr *MockDCSMockRecorder) GetClusterName() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterName", reflect.TypeOf((*MockDCS)(nil).GetClusterName))
-}
-
-// GetHaConfig mocks base method.
-func (m *MockDCS) GetHaConfig() (*HaConfig, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetHaConfig")
- ret0, _ := ret[0].(*HaConfig)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetHaConfig indicates an expected call of GetHaConfig.
-func (mr *MockDCSMockRecorder) GetHaConfig() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHaConfig", reflect.TypeOf((*MockDCS)(nil).GetHaConfig))
-}
-
-// GetLeader mocks base method.
-func (m *MockDCS) GetLeader() (*Leader, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetLeader")
- ret0, _ := ret[0].(*Leader)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetLeader indicates an expected call of GetLeader.
-func (mr *MockDCSMockRecorder) GetLeader() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLeader", reflect.TypeOf((*MockDCS)(nil).GetLeader))
-}
-
-// GetMembers mocks base method.
-func (m *MockDCS) GetMembers() ([]Member, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetMembers")
- ret0, _ := ret[0].([]Member)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetMembers indicates an expected call of GetMembers.
-func (mr *MockDCSMockRecorder) GetMembers() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMembers", reflect.TypeOf((*MockDCS)(nil).GetMembers))
-}
-
-// GetSwitchover mocks base method.
-func (m *MockDCS) GetSwitchover() (*Switchover, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetSwitchover")
- ret0, _ := ret[0].(*Switchover)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetSwitchover indicates an expected call of GetSwitchover.
-func (mr *MockDCSMockRecorder) GetSwitchover() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSwitchover", reflect.TypeOf((*MockDCS)(nil).GetSwitchover))
-}
-
-// HasLease mocks base method.
-func (m *MockDCS) HasLease() bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "HasLease")
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// HasLease indicates an expected call of HasLease.
-func (mr *MockDCSMockRecorder) HasLease() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasLease", reflect.TypeOf((*MockDCS)(nil).HasLease))
-}
-
-// Initialize mocks base method.
-func (m *MockDCS) Initialize() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Initialize")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Initialize indicates an expected call of Initialize.
-func (mr *MockDCSMockRecorder) Initialize() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockDCS)(nil).Initialize))
-}
-
-// IsLeaseExist mocks base method.
-func (m *MockDCS) IsLeaseExist() (bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsLeaseExist")
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// IsLeaseExist indicates an expected call of IsLeaseExist.
-func (mr *MockDCSMockRecorder) IsLeaseExist() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLeaseExist", reflect.TypeOf((*MockDCS)(nil).IsLeaseExist))
-}
-
-// ReleaseLease mocks base method.
-func (m *MockDCS) ReleaseLease() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ReleaseLease")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// ReleaseLease indicates an expected call of ReleaseLease.
-func (mr *MockDCSMockRecorder) ReleaseLease() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseLease", reflect.TypeOf((*MockDCS)(nil).ReleaseLease))
-}
-
-// ResetCluster mocks base method.
-func (m *MockDCS) ResetCluster() {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "ResetCluster")
-}
-
-// ResetCluster indicates an expected call of ResetCluster.
-func (mr *MockDCSMockRecorder) ResetCluster() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetCluster", reflect.TypeOf((*MockDCS)(nil).ResetCluster))
-}
-
-// UpdateHaConfig mocks base method.
-func (m *MockDCS) UpdateHaConfig() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateHaConfig")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// UpdateHaConfig indicates an expected call of UpdateHaConfig.
-func (mr *MockDCSMockRecorder) UpdateHaConfig() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHaConfig", reflect.TypeOf((*MockDCS)(nil).UpdateHaConfig))
-}
-
-// UpdateLease mocks base method.
-func (m *MockDCS) UpdateLease() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "UpdateLease")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// UpdateLease indicates an expected call of UpdateLease.
-func (mr *MockDCSMockRecorder) UpdateLease() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLease", reflect.TypeOf((*MockDCS)(nil).UpdateLease))
-}
diff --git a/dcs/generate.go b/dcs/generate.go
deleted file mode 100644
index 0b9d642..0000000
--- a/dcs/generate.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package dcs
-
-//go:generate go run github.com/golang/mock/mockgen -copyright_file ../hack/boilerplate.go.txt -package dcs -destination dcs_mock.go github.com/apecloud/dbctl/dcs DCS
diff --git a/dcs/k8s.go b/dcs/k8s.go
deleted file mode 100644
index cfbb4ae..0000000
--- a/dcs/k8s.go
+++ /dev/null
@@ -1,701 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package dcs
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strconv"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/spf13/viper"
-
- "github.com/pkg/errors"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
-
- appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
-
- "github.com/apecloud/dbctl/constant"
- k8s "github.com/apecloud/dbctl/util/kubernetes"
-)
-
-type KubernetesStore struct {
- ctx context.Context
- clusterName string
- componentName string
- clusterCompName string
- currentMemberName string
- namespace string
- cluster *Cluster
- client *rest.RESTClient
- clientset *kubernetes.Clientset
- LeaderObservedTime int64
- logger logr.Logger
- IsLeaderClusterWide bool
-}
-
-func NewKubernetesStore() (*KubernetesStore, error) {
- ctx := context.Background()
- logger := ctrl.Log.WithName("DCS-K8S")
- clientset, err := k8s.GetClientSet()
- if err != nil {
- err = errors.Wrap(err, "clientset init failed")
- return nil, err
- }
- client, err := k8s.GetRESTClientForKB()
- if err != nil {
- err = errors.Wrap(err, "restClient init failed")
- return nil, err
- }
-
- clusterName := constant.GetClusterName()
- if clusterName == "" {
- return nil, errors.New(fmt.Sprintf("%s or %s must be set", constant.EnvClusterName, constant.KBEnvClusterName))
- }
-
- componentName := constant.GetComponentName()
- if componentName == "" {
- return nil, errors.New(fmt.Sprintf("%s or %s must be set", constant.EnvComponentName, constant.KBEnvCompName))
- }
-
- clusterCompName := constant.GetClusterCompName()
- if clusterCompName == "" {
- clusterCompName = clusterName + "-" + componentName
- }
-
- currentMemberName := constant.GetPodName()
- if currentMemberName == "" {
- return nil, errors.New("get hostname failed")
- }
-
- namespace := constant.GetNamespace()
- if namespace == "" {
- return nil, errors.New(fmt.Sprintf("%s or %s must be set", constant.EnvNamespace, constant.KBEnvNamespace))
- }
-
- isLeaderClusterWide := false
- store := &KubernetesStore{
- ctx: ctx,
- clusterName: clusterName,
- componentName: componentName,
- clusterCompName: clusterCompName,
- currentMemberName: currentMemberName,
- namespace: namespace,
- client: client,
- clientset: clientset,
- logger: logger,
- IsLeaderClusterWide: isLeaderClusterWide,
- }
- return store, err
-}
-
-func (store *KubernetesStore) Initialize() error {
- store.logger.Info("k8s store initializing")
- _, err := store.GetCluster()
- if err != nil {
- return err
- }
-
- err = store.CreateHaConfig()
- if err != nil {
- store.logger.Error(err, "Create Ha ConfigMap failed")
- }
-
- err = store.CreateLease()
- if err != nil {
- store.logger.Error(err, "Create Leader ConfigMap failed")
- }
- return err
-}
-
-func (store *KubernetesStore) GetClusterName() string {
- return store.clusterName
-}
-
-func (store *KubernetesStore) SetCompName(componentName string) {
- store.componentName = componentName
- store.clusterCompName = store.clusterName + "-" + componentName
-}
-
-func (store *KubernetesStore) GetClusterFromCache() *Cluster {
- if store.cluster != nil {
- return store.cluster
- }
- cluster, _ := store.GetCluster()
- return cluster
-}
-
-func (store *KubernetesStore) GetCluster() (*Cluster, error) {
- clusterResource := &appsv1alpha1.Cluster{}
- err := store.client.Get().
- Namespace(store.namespace).
- Resource("clusters").
- Name(store.clusterName).
- VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec).
- Do(store.ctx).
- Into(clusterResource)
- if err != nil {
- store.logger.Error(err, "k8s get cluster error")
- return nil, err
- }
-
- var replicas int32
- for _, component := range clusterResource.Spec.ComponentSpecs {
- if store.IsLeaderClusterWide {
- replicas += component.Replicas
- } else if component.Name == store.componentName {
- replicas = component.Replicas
- break
- }
- }
-
- var members []Member
- if store.cluster != nil {
- hasPodIP := true
- for _, m := range store.cluster.Members {
- if m.PodIP == "" {
- hasPodIP = false
- break
- }
- }
- if hasPodIP && int(replicas) == len(store.cluster.Members) {
- members = store.cluster.Members
- }
- }
- if len(members) == 0 {
- members, err = store.GetMembers()
- if err != nil {
- return nil, err
- }
- }
-
- leader, err := store.GetLeader()
- if err != nil {
- store.logger.Info("get leader failed", "error", err.Error())
- }
-
- switchover, err := store.GetSwitchover()
- if err != nil {
- store.logger.Info("get switchover failed", "error", err.Error())
- }
-
- haConfig, err := store.GetHaConfig()
- if err != nil {
- store.logger.Info("get HaConfig failed", "error", err.Error())
- }
-
- cluster := &Cluster{
- ClusterCompName: store.clusterCompName,
- Namespace: store.namespace,
- Replicas: replicas,
- Members: members,
- Leader: leader,
- Switchover: switchover,
- HaConfig: haConfig,
- Resource: clusterResource,
- }
-
- store.cluster = cluster
- return cluster, nil
-}
-
-func (store *KubernetesStore) GetMembers() ([]Member, error) {
- labelsMap := map[string]string{
- constant.AppInstanceLabelKey: store.clusterName,
- constant.AppManagedByLabelKey: "kubeblocks",
- }
- if !store.IsLeaderClusterWide {
- labelsMap[constant.KBAppComponentLabelKey] = store.componentName
- }
-
- selector := labels.SelectorFromSet(labelsMap)
- store.logger.Info(fmt.Sprintf("pod selector: %s", selector.String()))
- podList, err := store.clientset.CoreV1().Pods(store.namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
- if err != nil {
- return nil, err
- }
-
- store.logger.Info(fmt.Sprintf("podlist: %d", len(podList.Items)))
- members := make([]Member, 0, len(podList.Items))
- for _, pod := range podList.Items {
- componentName := pod.Labels[constant.KBAppComponentLabelKey]
- if componentName == "" {
- // it is not a member pod
- continue
- }
- member := Member{}
- member.Name = pod.Name
- // member.Name = fmt.Sprintf("%s.%s-headless.%s.svc", pod.Name, store.clusterCompName, store.namespace)
- member.Role = pod.Labels[constant.RoleLabelKey]
- member.ComponentName = componentName
- member.PodIP = pod.Status.PodIP
- member.DBPort = getDBPort(&pod)
- member.LorryPort = getLorryPort(&pod)
- member.UID = string(pod.UID)
- if pod.Spec.HostNetwork {
- member.UseIP = true
- }
- member.resource = pod.DeepCopy()
- members = append(members, member)
- }
-
- return members, nil
-}
-
-func (store *KubernetesStore) ResetCluster() {}
-func (store *KubernetesStore) DeleteCluster() {}
-
-func (store *KubernetesStore) GetLeaderConfigMap() (*corev1.ConfigMap, error) {
- leaderName := store.getLeaderName()
- leaderConfigMap, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Get(store.ctx, leaderName, metav1.GetOptions{})
- if err != nil {
- if apierrors.IsNotFound(err) {
- store.logger.Info("Leader configmap is not found", "configmap", leaderName)
- return nil, nil
- }
- store.logger.Error(err, "Get Leader configmap failed")
- }
- return leaderConfigMap, err
-}
-
-func (store *KubernetesStore) IsLeaseExist() (bool, error) {
- leaderConfigMap, err := store.GetLeaderConfigMap()
- appCluster, ok := store.cluster.Resource.(*appsv1alpha1.Cluster)
- if leaderConfigMap != nil && ok && leaderConfigMap.CreationTimestamp.Before(&appCluster.CreationTimestamp) {
- store.logger.Info("A previous leader configmap resource exists, delete it", "name", leaderConfigMap.Name)
- _ = store.DeleteLeader()
- return false, nil
- }
- return leaderConfigMap != nil, err
-}
-
-func (store *KubernetesStore) CreateLease() error {
- isExist, err := store.IsLeaseExist()
- if isExist || err != nil {
- return err
- }
-
- leaderConfigMapName := store.getLeaderName()
- leaderName := store.currentMemberName
- now := time.Now().Unix()
- nowStr := strconv.FormatInt(now, 10)
- ttl := viper.GetString(constant.KBEnvTTL)
- leaderConfigMap := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: leaderConfigMapName,
- Annotations: map[string]string{
- "leader": leaderName,
- "acquire-time": nowStr,
- "renew-time": nowStr,
- "ttl": ttl,
- "extra": "",
- },
- },
- }
-
- store.logger.Info(fmt.Sprintf("K8S store initializing, create leader ConfigMap: %s", leaderConfigMapName))
- err = store.createConfigMap(leaderConfigMap)
- if err != nil {
- store.logger.Error(err, "Create Leader ConfigMap failed")
- return err
- }
- return nil
-}
-
-func (store *KubernetesStore) GetLeader() (*Leader, error) {
- configmap, err := store.GetLeaderConfigMap()
- if err != nil {
- return nil, err
- }
-
- if configmap == nil {
- return nil, nil
- }
-
- annotations := configmap.Annotations
- acquireTime, err := strconv.ParseInt(annotations["acquire-time"], 10, 64)
- if err != nil {
- acquireTime = 0
- }
- renewTime, err := strconv.ParseInt(annotations["renew-time"], 10, 64)
- if err != nil {
- renewTime = 0
- }
- ttl, err := strconv.Atoi(annotations["ttl"])
- if err != nil {
- ttl = viper.GetInt(constant.KBEnvTTL)
- }
- leader := annotations["leader"]
- stateStr, ok := annotations["dbstate"]
- var dbState *DBState
- if ok {
- dbState = new(DBState)
- err = json.Unmarshal([]byte(stateStr), &dbState)
- if err != nil {
- store.logger.Info("get leader dbstate failed", "annotations", annotations, "error", err.Error())
- }
- }
-
- if ttl > 0 && time.Now().Unix()-renewTime > int64(ttl) {
- store.logger.Info(fmt.Sprintf("lock expired: %v, now: %d", annotations, time.Now().Unix()))
- leader = ""
- }
-
- return &Leader{
- Index: configmap.ResourceVersion,
- Name: leader,
- AcquireTime: acquireTime,
- RenewTime: renewTime,
- TTL: ttl,
- Resource: configmap,
- DBState: dbState,
- }, nil
-}
-
-func (store *KubernetesStore) DeleteLeader() error {
- leaderName := store.getLeaderName()
- err := store.clientset.CoreV1().ConfigMaps(store.namespace).Delete(store.ctx, leaderName, metav1.DeleteOptions{})
- if err != nil {
- store.logger.Error(err, "Delete leader configmap failed")
- }
- return err
-}
-
-func (store *KubernetesStore) AttemptAcquireLease() error {
- timestamp := time.Now().Unix()
- now := strconv.FormatInt(timestamp, 10)
- ttl := store.cluster.HaConfig.ttl
- leaderName := store.currentMemberName
- annotation := map[string]string{
- "leader": leaderName,
- "ttl": strconv.Itoa(ttl),
- "renew-time": now,
- "acquire-time": now,
- }
-
- configMap := store.cluster.Leader.Resource.(*corev1.ConfigMap)
- configMap.SetAnnotations(annotation)
- if store.cluster.Leader.DBState != nil {
- str, _ := json.Marshal(store.cluster.Leader.DBState)
- configMap.Annotations["dbstate"] = string(str)
- }
- cm, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
- if err != nil {
- store.logger.Error(err, "Acquire lease failed")
- return err
- }
-
- store.cluster.Leader.Resource = cm
- store.cluster.Leader.AcquireTime = timestamp
- store.cluster.Leader.RenewTime = timestamp
- return nil
-}
-
-func (store *KubernetesStore) HasLease() bool {
- return store.cluster != nil && store.cluster.Leader != nil && store.cluster.Leader.Name == store.currentMemberName
-}
-
-func (store *KubernetesStore) UpdateLease() error {
- configMap := store.cluster.Leader.Resource.(*corev1.ConfigMap)
-
- annotations := configMap.GetAnnotations()
- if annotations["leader"] != store.currentMemberName {
- return errors.Errorf("lost lease")
- }
- ttl := store.cluster.HaConfig.ttl
- annotations["ttl"] = strconv.Itoa(ttl)
- annotations["renew-time"] = strconv.FormatInt(time.Now().Unix(), 10)
-
- if store.cluster.Leader.DBState != nil {
- str, _ := json.Marshal(store.cluster.Leader.DBState)
- configMap.Annotations["dbstate"] = string(str)
- }
- configMap.SetAnnotations(annotations)
-
- _, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
- return err
-}
-
-func (store *KubernetesStore) ReleaseLease() error {
- store.logger.Info("release lease")
- configMap := store.cluster.Leader.Resource.(*corev1.ConfigMap)
- configMap.Annotations["leader"] = ""
- store.cluster.Leader.Name = ""
-
- if store.cluster.Leader.DBState != nil {
- str, _ := json.Marshal(store.cluster.Leader.DBState)
- configMap.Annotations["dbstate"] = string(str)
- }
- _, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
- if err != nil {
- store.logger.Error(err, "release lease failed")
- }
- // TODO: if response status code is 409, it means operation conflict.
- return err
-}
-
-func (store *KubernetesStore) CreateHaConfig() error {
- haName := store.getHAConfigName()
- haConfig, _ := store.GetHaConfig()
- if haConfig.resource != nil {
- return nil
- }
-
- store.logger.Info(fmt.Sprintf("Create Ha ConfigMap: %s", haName))
- ttl := viper.GetString(constant.KBEnvTTL)
- maxLag := viper.GetString(constant.KBEnvMaxLag)
- enableHA := viper.GetString(constant.KBEnvEnableHA)
- if enableHA == "" {
- // enable HA by default
- enableHA = "true"
- }
- haConfigMap := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: haName,
- Annotations: map[string]string{
- "ttl": ttl,
- "enable": enableHA,
- "MaxLagOnSwitchover": maxLag,
- },
- },
- }
-
- err := store.createConfigMap(haConfigMap)
- if err != nil {
- store.logger.Error(err, "Create Ha ConfigMap failed")
- }
- return err
-}
-
-func (store *KubernetesStore) GetHaConfig() (*HaConfig, error) {
- configmapName := store.getHAConfigName()
- deleteMembers := make(map[string]MemberToDelete)
- configmap, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Get(context.TODO(), configmapName, metav1.GetOptions{})
- if err != nil {
- if !apierrors.IsNotFound(err) {
- store.logger.Error(err, fmt.Sprintf("Get ha configmap [%s] error", configmapName))
- } else {
- err = nil
- }
- return &HaConfig{
- index: "",
- ttl: viper.GetInt(constant.KBEnvTTL),
- maxLagOnSwitchover: 1048576,
- DeleteMembers: deleteMembers,
- }, err
- }
-
- annotations := configmap.Annotations
- ttl, err := strconv.Atoi(annotations["ttl"])
- if err != nil {
- ttl = viper.GetInt(constant.KBEnvTTL)
- }
- maxLagOnSwitchover, err := strconv.Atoi(annotations["MaxLagOnSwitchover"])
- if err != nil {
- maxLagOnSwitchover = 1048576
- }
-
- enable := false
- enableStr := annotations["enable"]
- if enableStr != "" {
- enable, err = strconv.ParseBool(enableStr)
- }
-
- str := annotations["delete-members"]
- if str != "" {
- err := json.Unmarshal([]byte(str), &deleteMembers)
- if err != nil {
- store.logger.Error(err, fmt.Sprintf("Get delete members [%s] error", str))
- }
- }
-
- return &HaConfig{
- index: configmap.ResourceVersion,
- ttl: ttl,
- enable: enable,
- maxLagOnSwitchover: int64(maxLagOnSwitchover),
- DeleteMembers: deleteMembers,
- resource: configmap,
- }, err
-}
-
-func (store *KubernetesStore) UpdateHaConfig() error {
- haConfig := store.cluster.HaConfig
- if haConfig.resource == nil {
- return errors.New("No HA configmap")
- }
-
- configMap := haConfig.resource.(*corev1.ConfigMap)
- annotations := configMap.Annotations
- annotations["ttl"] = strconv.Itoa(haConfig.ttl)
- deleteMembers, err := json.Marshal(haConfig.DeleteMembers)
- if err != nil {
- store.logger.Error(err, fmt.Sprintf("marsha delete members [%v]", haConfig))
- }
- annotations["delete-members"] = string(deleteMembers)
- annotations["MaxLagOnSwitchover"] = strconv.Itoa(int(haConfig.maxLagOnSwitchover))
-
- _, err = store.clientset.CoreV1().ConfigMaps(store.namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
- return err
-}
-
-func (store *KubernetesStore) GetSwitchOverConfigMap() (*corev1.ConfigMap, error) {
- switchoverName := store.getSwitchoverName()
- switchoverConfigMap, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Get(store.ctx, switchoverName, metav1.GetOptions{})
- if err != nil {
- if apierrors.IsNotFound(err) {
- return nil, nil
- }
- store.logger.Error(err, "Get switchover configmap failed")
- }
- store.logger.Info("Found switchover Setting", "configmap", switchoverConfigMap.Annotations)
- return switchoverConfigMap, err
-}
-
-func (store *KubernetesStore) GetSwitchover() (*Switchover, error) {
- switchOverConfigMap, _ := store.GetSwitchOverConfigMap()
- if switchOverConfigMap == nil {
- return nil, nil
- }
- annotations := switchOverConfigMap.Annotations
- scheduledAt, _ := strconv.Atoi(annotations["scheduled-at"])
- switchOver := newSwitchover(switchOverConfigMap.ResourceVersion, annotations["leader"], annotations["candidate"], int64(scheduledAt))
- return switchOver, nil
-}
-
-func (store *KubernetesStore) CreateSwitchover(leader, candidate string) error {
- switchoverName := store.getSwitchoverName()
- switchover, _ := store.GetSwitchover()
- if switchover != nil {
- return fmt.Errorf("there is another switchover %s unfinished", switchoverName)
- }
-
- store.logger.Info(fmt.Sprintf("Create switchover configmap %s", switchoverName))
- swConfigMap := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: switchoverName,
- Annotations: map[string]string{
- "leader": leader,
- "candidate": candidate,
- },
- },
- }
-
- err := store.createConfigMap(swConfigMap)
- if err != nil {
- store.logger.Error(err, "Create switchover configmap failed")
- return err
- }
- return nil
-}
-
-func (store *KubernetesStore) DeleteSwitchover() error {
- switchoverName := store.getSwitchoverName()
- err := store.clientset.CoreV1().ConfigMaps(store.namespace).Delete(store.ctx, switchoverName, metav1.DeleteOptions{})
- if err != nil {
- store.logger.Error(err, "Delete switchOver configmap failed")
- }
- return err
-}
-
-func (store *KubernetesStore) getLeaderName() string {
- if store.IsLeaderClusterWide {
- return store.clusterName + "-leader"
- }
- return store.clusterCompName + "-leader"
-}
-
-func (store *KubernetesStore) getHAConfigName() string {
- if store.IsLeaderClusterWide {
- return store.clusterName + "-haconfig"
- }
- return store.clusterCompName + "-haconfig"
-}
-
-func (store *KubernetesStore) getSwitchoverName() string {
- if store.IsLeaderClusterWide {
- return store.clusterName + "-switchover"
- }
- return store.clusterCompName + "-switchover"
-}
-
-func (store *KubernetesStore) createConfigMap(configMap *corev1.ConfigMap) error {
- labelsMap := map[string]string{
- constant.AppInstanceLabelKey: store.clusterName,
- constant.AppManagedByLabelKey: "kubeblocks",
- }
- if !store.IsLeaderClusterWide {
- labelsMap[constant.KBAppComponentLabelKey] = store.componentName
- }
-
- configMap.Labels = labelsMap
- configMap.Namespace = store.namespace
- configMap.OwnerReferences = []metav1.OwnerReference{getOwnerRef(store.cluster)}
- _, err := store.clientset.CoreV1().ConfigMaps(store.namespace).Create(store.ctx, configMap, metav1.CreateOptions{})
- if err != nil {
- return err
- }
- return nil
-}
-
-func (store *KubernetesStore) AddCurrentMember() error {
- return nil
-}
-
-// TODO: Use the database instance's character type to determine its port number more precisely
-func getDBPort(pod *corev1.Pod) string {
- mainContainer := pod.Spec.Containers[0]
- port := mainContainer.Ports[0]
- dbPort := port.ContainerPort
- return strconv.Itoa(int(dbPort))
-}
-
-func getLorryPort(pod *corev1.Pod) string {
- for _, container := range pod.Spec.Containers {
- for _, port := range container.Ports {
- if port.Name == constant.LorryHTTPPortName {
- return strconv.Itoa(int(port.ContainerPort))
- }
- }
- }
- return ""
-}
-
-func getOwnerRef(cluster *Cluster) metav1.OwnerReference {
- clusterObj := cluster.Resource.(*appsv1alpha1.Cluster)
- gvk, _ := apiutil.GVKForObject(clusterObj, scheme.Scheme)
- ownerRef := metav1.OwnerReference{
- APIVersion: gvk.GroupVersion().String(),
- Kind: gvk.Kind,
- UID: clusterObj.UID,
- Name: clusterObj.Name,
- }
- return ownerRef
-}
diff --git a/dcs/types.go b/dcs/types.go
deleted file mode 100644
index 593d242..0000000
--- a/dcs/types.go
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package dcs
-
-import (
- "fmt"
- "strings"
-
- "github.com/spf13/viper"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/util"
-)
-
-type Cluster struct {
- ClusterCompName string
- Namespace string
- Replicas int32
- HaConfig *HaConfig
- Leader *Leader
- Members []Member
- Switchover *Switchover
- Extra map[string]string
- Resource any
-}
-
-func (c *Cluster) HasMember(memberName string) bool {
- for _, member := range c.Members {
- if memberName == member.Name {
- return true
- }
- }
- return false
-}
-
-func (c *Cluster) GetLeaderMember() *Member {
- if c.Leader == nil || c.Leader.Name == "" {
- return nil
- }
-
- return c.GetMemberWithName(c.Leader.Name)
-}
-
-func (c *Cluster) GetMemberWithName(name string) *Member {
- for _, m := range c.Members {
- if m.Name == name {
- return &m
- }
- }
-
- return nil
-}
-
-func (c *Cluster) GetMemberWithHost(host string) *Member {
- for _, m := range c.Members {
- if strings.HasPrefix(host, m.Name) || strings.HasPrefix(host, m.PodIP) {
- return &m
- }
- }
-
- return nil
-}
-
-func (c *Cluster) GetMemberName() []string {
- var memberList []string
- for _, member := range c.Members {
- memberList = append(memberList, member.Name)
- }
-
- return memberList
-}
-
-func (c *Cluster) IsLocked() bool {
- return c.Leader != nil && c.Leader.Name != ""
-}
-
-func (c *Cluster) GetMemberAddrWithPort(member Member) string {
- addr := c.GetMemberAddr(member)
- return fmt.Sprintf("%s:%s", addr, member.DBPort)
-}
-
-func (c *Cluster) GetMemberAddr(member Member) string {
- if member.UseIP {
- return member.PodIP
- }
- clusterDomain := viper.GetString(constant.KubernetesClusterDomainEnv)
- clusterCompName := ""
- index := strings.LastIndex(member.Name, "-")
- if index > 0 {
- clusterCompName = member.Name[:index]
- }
- return fmt.Sprintf("%s.%s-headless.%s.svc.%s", member.Name, clusterCompName, c.Namespace, clusterDomain)
-}
-
-func (c *Cluster) GetMemberShortAddr(member Member) string {
- clusterCompName := ""
- index := strings.LastIndex(member.Name, "-")
- if index > 0 {
- clusterCompName = member.Name[:index]
- }
- return fmt.Sprintf("%s.%s-headless", member.Name, clusterCompName)
-}
-
-func (c *Cluster) GetMemberAddrs() []string {
- hosts := make([]string, len(c.Members))
- for i, member := range c.Members {
- hosts[i] = c.GetMemberAddrWithPort(member)
- }
- return hosts
-}
-
-type MemberToDelete struct {
- UID string
- IsFinished bool
-}
-
-type HaConfig struct {
- index string
- ttl int
- enable bool
- maxLagOnSwitchover int64
- DeleteMembers map[string]MemberToDelete
- resource any
-}
-
-func (c *HaConfig) GetTTL() int {
- return c.ttl
-}
-
-func (c *HaConfig) IsEnable() bool {
- return c.enable
-}
-
-func (c *HaConfig) SetEnable(enable bool) {
- c.enable = enable
-}
-
-func (c *HaConfig) GetMaxLagOnSwitchover() int64 {
- return c.maxLagOnSwitchover
-}
-
-func (c *HaConfig) IsDeleting(member *Member) bool {
- memberToDelete := c.GetMemberToDelete(member)
- return memberToDelete != nil
-}
-
-func (c *HaConfig) IsDeleted(member *Member) bool {
- memberToDelete := c.GetMemberToDelete(member)
- if memberToDelete == nil {
- return false
- }
- return memberToDelete.IsFinished
-}
-
-func (c *HaConfig) FinishDeleted(member *Member) {
- memberToDelete := c.GetMemberToDelete(member)
- memberToDelete.IsFinished = true
- c.DeleteMembers[member.Name] = *memberToDelete
-}
-
-func (c *HaConfig) GetMemberToDelete(member *Member) *MemberToDelete {
- memberToDelete, ok := c.DeleteMembers[member.Name]
- if !ok {
- return nil
- }
-
- if memberToDelete.UID != member.UID {
- return nil
- }
- return &memberToDelete
-}
-
-func (c *HaConfig) AddMemberToDelete(member *Member) {
- memberToDelete := MemberToDelete{
- UID: member.UID,
- IsFinished: false,
- }
- c.DeleteMembers[member.Name] = memberToDelete
-}
-
-type Leader struct {
- DBState *DBState
- Index string
- Name string
- AcquireTime int64
- RenewTime int64
- TTL int
- Resource any
-}
-
-type DBState struct {
- OpTimestamp int64
- Extra map[string]string
-}
-type Member struct {
- Index string
- Name string
- Role string
- PodIP string
- DBPort string
- LorryPort string
- HAPort string
- UID string
- UseIP bool
- resource any
- ComponentName string
-}
-
-func (m *Member) GetName() string {
- return m.Name
-}
-
-func (m *Member) IsLorryReady() bool {
- if m.PodIP == "" {
- return false
- }
- ready, err := util.IsTCPReady(m.PodIP, m.LorryPort)
- if err != nil {
- return false
- }
- return ready
-}
-
-// func newMember(index string, name string, role string, url string) *Member {
-// return &Member{
-// Index: index,
-// Name: name,
-// Role: role,
-// }
-// }
-
-type Switchover struct {
- Index string
- Leader string
- Candidate string
- ScheduledAt int64
-}
-
-func newSwitchover(index string, leader string, candidate string, scheduledAt int64) *Switchover {
- return &Switchover{
- Index: index,
- Leader: leader,
- Candidate: candidate,
- ScheduledAt: scheduledAt,
- }
-}
-
-func (s *Switchover) GetLeader() string {
- return s.Leader
-}
-
-func (s *Switchover) GetCandidate() string {
- return s.Candidate
-}
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 5b8be36..ab27429 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -41,8 +41,6 @@ RUN --mount=type=bind,target=. \
FROM docker.io/alpine:3.22 as dist
ARG APK_MIRROR
-# copy files
-COPY config/ /config/
COPY --from=builder /out/dbctl /bin
USER 65532:65532
diff --git a/docker/docker.mk b/docker/docker.mk
index bb0e341..e94b107 100644
--- a/docker/docker.mk
+++ b/docker/docker.mk
@@ -31,7 +31,7 @@ DOCKERFILE_DIR?=./docker
BUILDX_PLATFORMS ?= linux/amd64,linux/arm64
# Image URL to use all building/pushing image targets
-LORRY_IMG ?= docker.io/apecloud/$(APP_NAME)
+DBCTL_IMG ?= docker.io/apecloud/$(APP_NAME)
# Update whenever you upgrade dev container image
DEV_CONTAINER_VERSION_TAG ?= latest
@@ -45,30 +45,30 @@ DOCKER_BUILD_ARGS ?=
DOCKER_BUILD_ARGS += $(GO_BUILD_ARGS) $(BUILD_ARGS)
-.PHONY: build-lorry-image
-build-lorry-image: install-docker-buildx ## Build lorry container image.
+.PHONY: build-dbctl-image
+build-dbctl-image: install-docker-buildx ## Build dbctl container image.
ifneq ($(BUILDX_ENABLED), true)
- $(DOCKER) build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --tag ${LORRY_IMG}:${VERSION} --tag ${LORRY_IMG}:latest
+ $(DOCKER) build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --tag ${DBCTL_IMG}:${VERSION} --tag ${DBCTL_IMG}:latest
else
ifeq ($(TAG_LATEST), true)
- $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${LORRY_IMG}:latest
+ $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${DBCTL_IMG}:latest
else
- $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${LORRY_IMG}:${VERSION}
+ $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${DBCTL_IMG}:${VERSION}
endif
endif
-.PHONY: push-lorry-image
-push-lorry-image: install-docker-buildx ## Push lorry container image.
+.PHONY: push-dbctl-image
+push-dbctl-image: install-docker-buildx ## Push dbctl container image.
ifneq ($(BUILDX_ENABLED), true)
ifeq ($(TAG_LATEST), true)
- $(DOCKER) push ${LORRY_IMG}:latest
+ $(DOCKER) push ${DBCTL_IMG}:latest
else
- $(DOCKER) push ${LORRY_IMG}:${VERSION}
+ $(DOCKER) push ${DBCTL_IMG}:${VERSION}
endif
else
ifeq ($(TAG_LATEST), true)
- $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${LORRY_IMG}:latest --push
+ $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${DBCTL_IMG}:latest --push
else
- $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${LORRY_IMG}:${VERSION} --push
+ $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile --platform $(BUILDX_PLATFORMS) --tag ${DBCTL_IMG}:${VERSION} --push
endif
endif
\ No newline at end of file
diff --git a/docs/user_docs/dbctl.md b/docs/user_docs/dbctl.md
index f4aa060..2791b5c 100644
--- a/docs/user_docs/dbctl.md
+++ b/docs/user_docs/dbctl.md
@@ -4,75 +4,15 @@ description: KubeBlocks dbctl overview
sidebar_position: 1
---
-## [createuser](dbctl_database_createuser.md)
-
-create user.
-
-
-
-## [deleteuser](dbctl_database_deleteuser.md)
-
-delete user.
-
-
-
-## [describeuser](dbctl_database_describeuser.md)
-
-describe user.
-
-
-
## [getrole](dbctl_database_getrole.md)
get role of the replica.
-## [grant-role](dbctl_database_grant-role.md)
-
-grant user role.
-
-
-
-## [joinmember](dbctl_database_joinmember.md)
-
-execute a join member request.
-
-
-
-## [leavemember](dbctl_database_leavemember.md)
-
-execute a leave member request.
-
-
-
-## [listsystemaccounts](dbctl_database_listsystemaccounts.md)
-
-list system accounts.
-
-
-
-## [listusers](dbctl_database_listusers.md)
-
-list normal users.
-
-
-
-## [revoke-role](dbctl_database_revoke-role.md)
-
-revoke user role.
-
-
-
## [service](dbctl_database_service.md)
Run dbctl as a daemon and provide api service.
-## [switchover](dbctl_database_switchover.md)
-
-execute a switchover request.
-
-
-
diff --git a/docs/user_docs/dbctl_database.md b/docs/user_docs/dbctl_database.md
index 3e14d1c..3c1e9e4 100644
--- a/docs/user_docs/dbctl_database.md
+++ b/docs/user_docs/dbctl_database.md
@@ -33,8 +33,6 @@ dbctl mongodb createuser --username root --password password
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
--kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
@@ -45,7 +43,6 @@ dbctl mongodb createuser --username root --password password
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
--stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
@@ -58,18 +55,8 @@ dbctl mongodb createuser --username root --password password
### SEE ALSO
-* [dbctl database createuser](dbctl_database_createuser.md) - create user.
-* [dbctl database deleteuser](dbctl_database_deleteuser.md) - delete user.
-* [dbctl database describeuser](dbctl_database_describeuser.md) - describe user.
* [dbctl database getrole](dbctl_database_getrole.md) - get role of the replica.
-* [dbctl database grant-role](dbctl_database_grant-role.md) - grant user role.
-* [dbctl database joinmember](dbctl_database_joinmember.md) - execute a join member request.
-* [dbctl database leavemember](dbctl_database_leavemember.md) - execute a leave member request.
-* [dbctl database listsystemaccounts](dbctl_database_listsystemaccounts.md) - list system accounts.
-* [dbctl database listusers](dbctl_database_listusers.md) - list normal users.
-* [dbctl database revoke-role](dbctl_database_revoke-role.md) - revoke user role.
* [dbctl database service](dbctl_database_service.md) - Run dbctl as a daemon and provide api service.
-* [dbctl database switchover](dbctl_database_switchover.md) - execute a switchover request.
#### Go Back to [dbctl Overview](dbctl.md) Homepage.
diff --git a/docs/user_docs/dbctl_database_createuser.md b/docs/user_docs/dbctl_database_createuser.md
deleted file mode 100644
index 8017bbe..0000000
--- a/docs/user_docs/dbctl_database_createuser.md
+++ /dev/null
@@ -1,60 +0,0 @@
----
-title: dbctl database createuser
----
-
-create user.
-
-```
-dbctl database createuser [flags]
-```
-
-### Examples
-
-```
-
-dbctl database createuser --username xxx --password xxx
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --password string The password of user to create
- --rolename string The role of user to create
- --username string The name of user to create
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_deleteuser.md b/docs/user_docs/dbctl_database_deleteuser.md
deleted file mode 100644
index 0276abb..0000000
--- a/docs/user_docs/dbctl_database_deleteuser.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-title: dbctl database deleteuser
----
-
-delete user.
-
-```
-dbctl database deleteuser [flags]
-```
-
-### Examples
-
-```
-
-dbctl database deleteuser --username xxx
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --username string The name of user to delete
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_describeuser.md b/docs/user_docs/dbctl_database_describeuser.md
deleted file mode 100644
index 170ae60..0000000
--- a/docs/user_docs/dbctl_database_describeuser.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-title: dbctl database describeuser
----
-
-describe user.
-
-```
-dbctl database describeuser [flags]
-```
-
-### Examples
-
-```
-
-dbctl database describeuser --username xxx
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --username string The name of user to describe
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_getrole.md b/docs/user_docs/dbctl_database_getrole.md
index fbc9627..157eead 100644
--- a/docs/user_docs/dbctl_database_getrole.md
+++ b/docs/user_docs/dbctl_database_getrole.md
@@ -27,8 +27,6 @@ dbctl database getrole
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
--kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
@@ -39,7 +37,6 @@ dbctl database getrole
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
--stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
diff --git a/docs/user_docs/dbctl_database_grant-role.md b/docs/user_docs/dbctl_database_grant-role.md
deleted file mode 100644
index 74d2e13..0000000
--- a/docs/user_docs/dbctl_database_grant-role.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: dbctl database grant-role
----
-
-grant user role.
-
-```
-dbctl database grant-role [flags]
-```
-
-### Examples
-
-```
-
-dbctl database grant-role --username xxx --rolename xxx
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --rolename string The name of role to grant
- --username string The name of user to grant
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_joinmember.md b/docs/user_docs/dbctl_database_joinmember.md
deleted file mode 100644
index fa317bc..0000000
--- a/docs/user_docs/dbctl_database_joinmember.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: dbctl database joinmember
----
-
-execute a join member request.
-
-```
-dbctl database joinmember [flags]
-```
-
-### Examples
-
-```
-
-dbctl database joinmember
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_leavemember.md b/docs/user_docs/dbctl_database_leavemember.md
deleted file mode 100644
index ce6c87a..0000000
--- a/docs/user_docs/dbctl_database_leavemember.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: dbctl database leavemember
----
-
-execute a leave member request.
-
-```
-dbctl database leavemember [flags]
-```
-
-### Examples
-
-```
-
-dbctl database leavemember
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_listsystemaccounts.md b/docs/user_docs/dbctl_database_listsystemaccounts.md
deleted file mode 100644
index a314ee7..0000000
--- a/docs/user_docs/dbctl_database_listsystemaccounts.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: dbctl database listsystemaccounts
----
-
-list system accounts.
-
-```
-dbctl database listsystemaccounts [flags]
-```
-
-### Examples
-
-```
-
-dbctl database listsystemaccounts
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_listusers.md b/docs/user_docs/dbctl_database_listusers.md
deleted file mode 100644
index 3c70f0b..0000000
--- a/docs/user_docs/dbctl_database_listusers.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-title: dbctl database listusers
----
-
-list normal users.
-
-```
-dbctl database listusers [flags]
-```
-
-### Examples
-
-```
-
-dbctl database listusers
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --lorry-addr string The addr of lorry to request (default "http://localhost:3501/v1.0/")
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_revoke-role.md b/docs/user_docs/dbctl_database_revoke-role.md
deleted file mode 100644
index a36cd0d..0000000
--- a/docs/user_docs/dbctl_database_revoke-role.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: dbctl database revoke-role
----
-
-revoke user role.
-
-```
-dbctl database revoke-role [flags]
-```
-
-### Examples
-
-```
-
-dbctl database revoke-role --username xxx --rolename xxx
-
-```
-
-### Options
-
-```
- -h, --help Print this help message
- --rolename string The name of role to revoke
- --username string The name of user to revoke
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/docs/user_docs/dbctl_database_service.md b/docs/user_docs/dbctl_database_service.md
index 4527720..5391164 100644
--- a/docs/user_docs/dbctl_database_service.md
+++ b/docs/user_docs/dbctl_database_service.md
@@ -19,10 +19,10 @@ dbctl service
### Options
```
- --address string The HTTP Server listen address for Lorry service. (default "0.0.0.0")
- --api-logging Enable api logging for Lorry request. (default true)
+ --address string The HTTP Server listen address for dbctl service. (default "0.0.0.0")
+ --api-logging Enable api logging for dbctl request. (default true)
-h, --help Print this help message
- --port int The HTTP Server listen port for Lorry service. (default 3501)
+ --port int The HTTP Server listen port for dbctl service. (default 5001)
```
### Options inherited from parent commands
@@ -30,8 +30,6 @@ dbctl service
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
--kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
@@ -42,7 +40,6 @@ dbctl service
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
--stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
diff --git a/docs/user_docs/dbctl_database_switchover.md b/docs/user_docs/dbctl_database_switchover.md
deleted file mode 100644
index 0d8fe4f..0000000
--- a/docs/user_docs/dbctl_database_switchover.md
+++ /dev/null
@@ -1,60 +0,0 @@
----
-title: dbctl database switchover
----
-
-execute a switchover request.
-
-```
-dbctl database switchover [flags]
-```
-
-### Examples
-
-```
-
-dbctl database switchover --primary xxx --candidate xxx
-
-```
-
-### Options
-
-```
- -c, --candidate string The candidate pod name
- -f, --force force to swithover if failed
- -h, --help Print this help message
- -p, --primary string The primary pod name
-```
-
-### Options inherited from parent commands
-
-```
- --add_dir_header If true, adds the file directory to the header of the log messages
- --alsologtostderr log to standard error as well as files (no effect when -logtostderr=true)
- --config-path string dbctl default config directory for builtin type (default "/tools/config/dbctl/components/")
- --disable-dns-checker disable dns checker, for test&dev
- --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory (no effect when -logtostderr=true)
- --log_file string If non-empty, use this log file (no effect when -logtostderr=true)
- --log_file_max_size uint Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- --logtostderr log to standard error instead of files (default true)
- --one_output If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- --skip_headers If true, avoid header prefixes in the log messages
- --skip_log_headers If true, avoid headers when opening log files (no effect when -logtostderr=true)
- --stderrthreshold severity logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) (default 2)
- --tools-dir string The directory of tools binaries (default "/tools/")
- -v, --v Level number for the log level verbosity
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zap-devel Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true)
- --zap-encoder encoder Zap log encoding (one of 'json' or 'console')
- --zap-log-level level Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
- --zap-stacktrace-level level Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').
- --zap-time-encoding time-encoding Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.
-```
-
-### SEE ALSO
-
-* [dbctl database](dbctl_database.md) - specify database.
-
-#### Go Back to [dbctl Overview](dbctl.md) Homepage.
-
diff --git a/engines/base.go b/engines/base.go
index 2b5705c..162b4b3 100644
--- a/engines/base.go
+++ b/engines/base.go
@@ -21,26 +21,19 @@ package engines
import (
"context"
- "strings"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines/models"
)
type DBManagerBase struct {
CurrentMemberName string
- CurrentMemberIP string
ClusterCompName string
- Namespace string
- DataDir string
Logger logr.Logger
DBStartupReady bool
- IsLocked bool
- DBState *dcs.DBState
}
func NewDBManagerBase(logger logr.Logger) (*DBManagerBase, error) {
@@ -51,9 +44,7 @@ func NewDBManagerBase(logger logr.Logger) (*DBManagerBase, error) {
mgr := DBManagerBase{
CurrentMemberName: currentMemberName,
- CurrentMemberIP: constant.GetPodIP(),
ClusterCompName: constant.GetClusterCompName(),
- Namespace: constant.GetNamespace(),
Logger: logger,
}
return &mgr, nil
@@ -63,124 +54,10 @@ func (mgr *DBManagerBase) IsDBStartupReady() bool {
return mgr.DBStartupReady
}
-func (mgr *DBManagerBase) GetLogger() logr.Logger {
- return mgr.Logger
-}
-
func (mgr *DBManagerBase) SetLogger(logger logr.Logger) {
mgr.Logger = logger
}
-func (mgr *DBManagerBase) GetCurrentMemberName() string {
- return mgr.CurrentMemberName
-}
-
-func (mgr *DBManagerBase) IsFirstMember() bool {
- return strings.HasSuffix(mgr.CurrentMemberName, "-0")
-}
-
-func (mgr *DBManagerBase) IsPromoted(context.Context) bool {
- return true
-}
-
-func (mgr *DBManagerBase) Promote(context.Context, *dcs.Cluster) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) Demote(context.Context) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) Follow(context.Context, *dcs.Cluster) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) Recover(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *DBManagerBase) IsLeader(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- return false, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) IsLeaderMember(context.Context, *dcs.Cluster, *dcs.Member) (bool, error) {
- return false, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) GetMemberAddrs(context.Context, *dcs.Cluster) []string {
- return nil
-}
-
-func (mgr *DBManagerBase) InitializeCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *DBManagerBase) IsClusterInitialized(context.Context, *dcs.Cluster) (bool, error) {
- return true, nil
-}
-
-func (mgr *DBManagerBase) IsClusterHealthy(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (mgr *DBManagerBase) MemberHealthyCheck(context.Context, *dcs.Cluster, *dcs.Member) error {
- return nil
-}
-
-func (mgr *DBManagerBase) LeaderHealthyCheck(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *DBManagerBase) CurrentMemberHealthyCheck(ctx context.Context, cluster *dcs.Cluster) error {
- member := cluster.GetMemberWithName(mgr.CurrentMemberName)
- return mgr.MemberHealthyCheck(ctx, cluster, member)
-}
-
-func (mgr *DBManagerBase) HasOtherHealthyLeader(context.Context, *dcs.Cluster) *dcs.Member {
- return nil
-}
-
-func (mgr *DBManagerBase) HasOtherHealthyMembers(context.Context, *dcs.Cluster, string) []*dcs.Member {
- return nil
-}
-
-func (mgr *DBManagerBase) IsMemberHealthy(context.Context, *dcs.Cluster, *dcs.Member) bool {
- return false
-}
-
-func (mgr *DBManagerBase) IsCurrentMemberHealthy(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (mgr *DBManagerBase) IsCurrentMemberInCluster(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (mgr *DBManagerBase) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *DBManagerBase) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error {
- return nil
-}
-
-func (mgr *DBManagerBase) IsMemberLagging(context.Context, *dcs.Cluster, *dcs.Member) (bool, int64) {
- return false, 0
-}
-
-func (mgr *DBManagerBase) GetLag(context.Context, *dcs.Cluster) (int64, error) {
- return 0, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) GetDBState(context.Context, *dcs.Cluster) *dcs.DBState {
- // mgr.DBState = DBState
- return nil
-}
-
-func (mgr *DBManagerBase) MoveData(context.Context, *dcs.Cluster) error {
- return nil
-}
-
func (mgr *DBManagerBase) GetReplicaRole(context.Context) (string, error) {
return "", models.ErrNotImplemented
}
@@ -193,66 +70,6 @@ func (mgr *DBManagerBase) Query(context.Context, string) ([]byte, error) {
return []byte{}, models.ErrNotImplemented
}
-func (mgr *DBManagerBase) GetPort() (int, error) {
- return 0, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) IsRootCreated(context.Context) (bool, error) {
- return true, nil
-}
-
-func (mgr *DBManagerBase) ListUsers(context.Context) ([]models.UserInfo, error) {
- return nil, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) ListSystemAccounts(context.Context) ([]models.UserInfo, error) {
- return nil, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) CreateUser(context.Context, string, string) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) DeleteUser(context.Context, string) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) DescribeUser(context.Context, string) (*models.UserInfo, error) {
- return nil, models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) GrantUserRole(context.Context, string, string) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) RevokeUserRole(context.Context, string, string) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) IsRunning() bool {
- return false
-}
-
-func (mgr *DBManagerBase) Lock(context.Context, string) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) Unlock(context.Context) error {
- return models.ErrNotImplemented
-}
-
-func (mgr *DBManagerBase) Start(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *DBManagerBase) Stop() error {
- return nil
-}
-
-func (mgr *DBManagerBase) CreateRoot(context.Context) error {
- return nil
-}
-
func (mgr *DBManagerBase) ShutDownWithWait() {
mgr.Logger.Info("Override me if need")
}
diff --git a/engines/dbmanager_mock.go b/engines/dbmanager_mock.go
index 6e25911..b5e45ab 100644
--- a/engines/dbmanager_mock.go
+++ b/engines/dbmanager_mock.go
@@ -29,9 +29,6 @@ import (
context "context"
reflect "reflect"
- dcs "github.com/apecloud/dbctl/dcs"
- models "github.com/apecloud/dbctl/engines/models"
- logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
)
@@ -58,91 +55,6 @@ func (m *MockDBManager) EXPECT() *MockDBManagerMockRecorder {
return m.recorder
}
-// CreateRoot mocks base method.
-func (m *MockDBManager) CreateRoot(arg0 context.Context) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateRoot", arg0)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CreateRoot indicates an expected call of CreateRoot.
-func (mr *MockDBManagerMockRecorder) CreateRoot(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRoot", reflect.TypeOf((*MockDBManager)(nil).CreateRoot), arg0)
-}
-
-// CreateUser mocks base method.
-func (m *MockDBManager) CreateUser(arg0 context.Context, arg1, arg2 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateUser", arg0, arg1, arg2)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CreateUser indicates an expected call of CreateUser.
-func (mr *MockDBManagerMockRecorder) CreateUser(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockDBManager)(nil).CreateUser), arg0, arg1, arg2)
-}
-
-// CurrentMemberHealthyCheck mocks base method.
-func (m *MockDBManager) CurrentMemberHealthyCheck(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CurrentMemberHealthyCheck", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// CurrentMemberHealthyCheck indicates an expected call of CurrentMemberHealthyCheck.
-func (mr *MockDBManagerMockRecorder) CurrentMemberHealthyCheck(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentMemberHealthyCheck", reflect.TypeOf((*MockDBManager)(nil).CurrentMemberHealthyCheck), arg0, arg1)
-}
-
-// DeleteUser mocks base method.
-func (m *MockDBManager) DeleteUser(arg0 context.Context, arg1 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteUser", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// DeleteUser indicates an expected call of DeleteUser.
-func (mr *MockDBManagerMockRecorder) DeleteUser(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockDBManager)(nil).DeleteUser), arg0, arg1)
-}
-
-// Demote mocks base method.
-func (m *MockDBManager) Demote(arg0 context.Context) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Demote", arg0)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Demote indicates an expected call of Demote.
-func (mr *MockDBManagerMockRecorder) Demote(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Demote", reflect.TypeOf((*MockDBManager)(nil).Demote), arg0)
-}
-
-// DescribeUser mocks base method.
-func (m *MockDBManager) DescribeUser(arg0 context.Context, arg1 string) (*models.UserInfo, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DescribeUser", arg0, arg1)
- ret0, _ := ret[0].(*models.UserInfo)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// DescribeUser indicates an expected call of DescribeUser.
-func (mr *MockDBManagerMockRecorder) DescribeUser(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUser", reflect.TypeOf((*MockDBManager)(nil).DescribeUser), arg0, arg1)
-}
-
// Exec mocks base method.
func (m *MockDBManager) Exec(arg0 context.Context, arg1 string) (int64, error) {
m.ctrl.T.Helper()
@@ -158,106 +70,6 @@ func (mr *MockDBManagerMockRecorder) Exec(arg0, arg1 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockDBManager)(nil).Exec), arg0, arg1)
}
-// Follow mocks base method.
-func (m *MockDBManager) Follow(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Follow", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Follow indicates an expected call of Follow.
-func (mr *MockDBManagerMockRecorder) Follow(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Follow", reflect.TypeOf((*MockDBManager)(nil).Follow), arg0, arg1)
-}
-
-// GetCurrentMemberName mocks base method.
-func (m *MockDBManager) GetCurrentMemberName() string {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentMemberName")
- ret0, _ := ret[0].(string)
- return ret0
-}
-
-// GetCurrentMemberName indicates an expected call of GetCurrentMemberName.
-func (mr *MockDBManagerMockRecorder) GetCurrentMemberName() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentMemberName", reflect.TypeOf((*MockDBManager)(nil).GetCurrentMemberName))
-}
-
-// GetDBState mocks base method.
-func (m *MockDBManager) GetDBState(arg0 context.Context, arg1 *dcs.Cluster) *dcs.DBState {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetDBState", arg0, arg1)
- ret0, _ := ret[0].(*dcs.DBState)
- return ret0
-}
-
-// GetDBState indicates an expected call of GetDBState.
-func (mr *MockDBManagerMockRecorder) GetDBState(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDBState", reflect.TypeOf((*MockDBManager)(nil).GetDBState), arg0, arg1)
-}
-
-// GetLag mocks base method.
-func (m *MockDBManager) GetLag(arg0 context.Context, arg1 *dcs.Cluster) (int64, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetLag", arg0, arg1)
- ret0, _ := ret[0].(int64)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetLag indicates an expected call of GetLag.
-func (mr *MockDBManagerMockRecorder) GetLag(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLag", reflect.TypeOf((*MockDBManager)(nil).GetLag), arg0, arg1)
-}
-
-// GetLogger mocks base method.
-func (m *MockDBManager) GetLogger() logr.Logger {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetLogger")
- ret0, _ := ret[0].(logr.Logger)
- return ret0
-}
-
-// GetLogger indicates an expected call of GetLogger.
-func (mr *MockDBManagerMockRecorder) GetLogger() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockDBManager)(nil).GetLogger))
-}
-
-// GetMemberAddrs mocks base method.
-func (m *MockDBManager) GetMemberAddrs(arg0 context.Context, arg1 *dcs.Cluster) []string {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetMemberAddrs", arg0, arg1)
- ret0, _ := ret[0].([]string)
- return ret0
-}
-
-// GetMemberAddrs indicates an expected call of GetMemberAddrs.
-func (mr *MockDBManagerMockRecorder) GetMemberAddrs(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemberAddrs", reflect.TypeOf((*MockDBManager)(nil).GetMemberAddrs), arg0, arg1)
-}
-
-// GetPort mocks base method.
-func (m *MockDBManager) GetPort() (int, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPort")
- ret0, _ := ret[0].(int)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetPort indicates an expected call of GetPort.
-func (mr *MockDBManagerMockRecorder) GetPort() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPort", reflect.TypeOf((*MockDBManager)(nil).GetPort))
-}
-
// GetReplicaRole mocks base method.
func (m *MockDBManager) GetReplicaRole(arg0 context.Context) (string, error) {
m.ctrl.T.Helper()
@@ -273,119 +85,6 @@ func (mr *MockDBManagerMockRecorder) GetReplicaRole(arg0 interface{}) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicaRole", reflect.TypeOf((*MockDBManager)(nil).GetReplicaRole), arg0)
}
-// GrantUserRole mocks base method.
-func (m *MockDBManager) GrantUserRole(arg0 context.Context, arg1, arg2 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GrantUserRole", arg0, arg1, arg2)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// GrantUserRole indicates an expected call of GrantUserRole.
-func (mr *MockDBManagerMockRecorder) GrantUserRole(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrantUserRole", reflect.TypeOf((*MockDBManager)(nil).GrantUserRole), arg0, arg1, arg2)
-}
-
-// HasOtherHealthyLeader mocks base method.
-func (m *MockDBManager) HasOtherHealthyLeader(arg0 context.Context, arg1 *dcs.Cluster) *dcs.Member {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "HasOtherHealthyLeader", arg0, arg1)
- ret0, _ := ret[0].(*dcs.Member)
- return ret0
-}
-
-// HasOtherHealthyLeader indicates an expected call of HasOtherHealthyLeader.
-func (mr *MockDBManagerMockRecorder) HasOtherHealthyLeader(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasOtherHealthyLeader", reflect.TypeOf((*MockDBManager)(nil).HasOtherHealthyLeader), arg0, arg1)
-}
-
-// HasOtherHealthyMembers mocks base method.
-func (m *MockDBManager) HasOtherHealthyMembers(arg0 context.Context, arg1 *dcs.Cluster, arg2 string) []*dcs.Member {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "HasOtherHealthyMembers", arg0, arg1, arg2)
- ret0, _ := ret[0].([]*dcs.Member)
- return ret0
-}
-
-// HasOtherHealthyMembers indicates an expected call of HasOtherHealthyMembers.
-func (mr *MockDBManagerMockRecorder) HasOtherHealthyMembers(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasOtherHealthyMembers", reflect.TypeOf((*MockDBManager)(nil).HasOtherHealthyMembers), arg0, arg1, arg2)
-}
-
-// InitializeCluster mocks base method.
-func (m *MockDBManager) InitializeCluster(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "InitializeCluster", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// InitializeCluster indicates an expected call of InitializeCluster.
-func (mr *MockDBManagerMockRecorder) InitializeCluster(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeCluster", reflect.TypeOf((*MockDBManager)(nil).InitializeCluster), arg0, arg1)
-}
-
-// IsClusterHealthy mocks base method.
-func (m *MockDBManager) IsClusterHealthy(arg0 context.Context, arg1 *dcs.Cluster) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsClusterHealthy", arg0, arg1)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsClusterHealthy indicates an expected call of IsClusterHealthy.
-func (mr *MockDBManagerMockRecorder) IsClusterHealthy(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsClusterHealthy", reflect.TypeOf((*MockDBManager)(nil).IsClusterHealthy), arg0, arg1)
-}
-
-// IsClusterInitialized mocks base method.
-func (m *MockDBManager) IsClusterInitialized(arg0 context.Context, arg1 *dcs.Cluster) (bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsClusterInitialized", arg0, arg1)
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// IsClusterInitialized indicates an expected call of IsClusterInitialized.
-func (mr *MockDBManagerMockRecorder) IsClusterInitialized(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsClusterInitialized", reflect.TypeOf((*MockDBManager)(nil).IsClusterInitialized), arg0, arg1)
-}
-
-// IsCurrentMemberHealthy mocks base method.
-func (m *MockDBManager) IsCurrentMemberHealthy(arg0 context.Context, arg1 *dcs.Cluster) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsCurrentMemberHealthy", arg0, arg1)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsCurrentMemberHealthy indicates an expected call of IsCurrentMemberHealthy.
-func (mr *MockDBManagerMockRecorder) IsCurrentMemberHealthy(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCurrentMemberHealthy", reflect.TypeOf((*MockDBManager)(nil).IsCurrentMemberHealthy), arg0, arg1)
-}
-
-// IsCurrentMemberInCluster mocks base method.
-func (m *MockDBManager) IsCurrentMemberInCluster(arg0 context.Context, arg1 *dcs.Cluster) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsCurrentMemberInCluster", arg0, arg1)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsCurrentMemberInCluster indicates an expected call of IsCurrentMemberInCluster.
-func (mr *MockDBManagerMockRecorder) IsCurrentMemberInCluster(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCurrentMemberInCluster", reflect.TypeOf((*MockDBManager)(nil).IsCurrentMemberInCluster), arg0, arg1)
-}
-
// IsDBStartupReady mocks base method.
func (m *MockDBManager) IsDBStartupReady() bool {
m.ctrl.T.Helper()
@@ -400,250 +99,6 @@ func (mr *MockDBManagerMockRecorder) IsDBStartupReady() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDBStartupReady", reflect.TypeOf((*MockDBManager)(nil).IsDBStartupReady))
}
-// IsFirstMember mocks base method.
-func (m *MockDBManager) IsFirstMember() bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsFirstMember")
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsFirstMember indicates an expected call of IsFirstMember.
-func (mr *MockDBManagerMockRecorder) IsFirstMember() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsFirstMember", reflect.TypeOf((*MockDBManager)(nil).IsFirstMember))
-}
-
-// IsLeader mocks base method.
-func (m *MockDBManager) IsLeader(arg0 context.Context, arg1 *dcs.Cluster) (bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsLeader", arg0, arg1)
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// IsLeader indicates an expected call of IsLeader.
-func (mr *MockDBManagerMockRecorder) IsLeader(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLeader", reflect.TypeOf((*MockDBManager)(nil).IsLeader), arg0, arg1)
-}
-
-// IsLeaderMember mocks base method.
-func (m *MockDBManager) IsLeaderMember(arg0 context.Context, arg1 *dcs.Cluster, arg2 *dcs.Member) (bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsLeaderMember", arg0, arg1, arg2)
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// IsLeaderMember indicates an expected call of IsLeaderMember.
-func (mr *MockDBManagerMockRecorder) IsLeaderMember(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLeaderMember", reflect.TypeOf((*MockDBManager)(nil).IsLeaderMember), arg0, arg1, arg2)
-}
-
-// IsMemberHealthy mocks base method.
-func (m *MockDBManager) IsMemberHealthy(arg0 context.Context, arg1 *dcs.Cluster, arg2 *dcs.Member) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsMemberHealthy", arg0, arg1, arg2)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsMemberHealthy indicates an expected call of IsMemberHealthy.
-func (mr *MockDBManagerMockRecorder) IsMemberHealthy(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsMemberHealthy", reflect.TypeOf((*MockDBManager)(nil).IsMemberHealthy), arg0, arg1, arg2)
-}
-
-// IsMemberLagging mocks base method.
-func (m *MockDBManager) IsMemberLagging(arg0 context.Context, arg1 *dcs.Cluster, arg2 *dcs.Member) (bool, int64) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsMemberLagging", arg0, arg1, arg2)
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(int64)
- return ret0, ret1
-}
-
-// IsMemberLagging indicates an expected call of IsMemberLagging.
-func (mr *MockDBManagerMockRecorder) IsMemberLagging(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsMemberLagging", reflect.TypeOf((*MockDBManager)(nil).IsMemberLagging), arg0, arg1, arg2)
-}
-
-// IsPromoted mocks base method.
-func (m *MockDBManager) IsPromoted(arg0 context.Context) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsPromoted", arg0)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsPromoted indicates an expected call of IsPromoted.
-func (mr *MockDBManagerMockRecorder) IsPromoted(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPromoted", reflect.TypeOf((*MockDBManager)(nil).IsPromoted), arg0)
-}
-
-// IsRootCreated mocks base method.
-func (m *MockDBManager) IsRootCreated(arg0 context.Context) (bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsRootCreated", arg0)
- ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// IsRootCreated indicates an expected call of IsRootCreated.
-func (mr *MockDBManagerMockRecorder) IsRootCreated(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRootCreated", reflect.TypeOf((*MockDBManager)(nil).IsRootCreated), arg0)
-}
-
-// IsRunning mocks base method.
-func (m *MockDBManager) IsRunning() bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "IsRunning")
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// IsRunning indicates an expected call of IsRunning.
-func (mr *MockDBManagerMockRecorder) IsRunning() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRunning", reflect.TypeOf((*MockDBManager)(nil).IsRunning))
-}
-
-// JoinCurrentMemberToCluster mocks base method.
-func (m *MockDBManager) JoinCurrentMemberToCluster(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "JoinCurrentMemberToCluster", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// JoinCurrentMemberToCluster indicates an expected call of JoinCurrentMemberToCluster.
-func (mr *MockDBManagerMockRecorder) JoinCurrentMemberToCluster(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "JoinCurrentMemberToCluster", reflect.TypeOf((*MockDBManager)(nil).JoinCurrentMemberToCluster), arg0, arg1)
-}
-
-// LeaderHealthyCheck mocks base method.
-func (m *MockDBManager) LeaderHealthyCheck(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "LeaderHealthyCheck", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// LeaderHealthyCheck indicates an expected call of LeaderHealthyCheck.
-func (mr *MockDBManagerMockRecorder) LeaderHealthyCheck(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaderHealthyCheck", reflect.TypeOf((*MockDBManager)(nil).LeaderHealthyCheck), arg0, arg1)
-}
-
-// LeaveMemberFromCluster mocks base method.
-func (m *MockDBManager) LeaveMemberFromCluster(arg0 context.Context, arg1 *dcs.Cluster, arg2 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "LeaveMemberFromCluster", arg0, arg1, arg2)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// LeaveMemberFromCluster indicates an expected call of LeaveMemberFromCluster.
-func (mr *MockDBManagerMockRecorder) LeaveMemberFromCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveMemberFromCluster", reflect.TypeOf((*MockDBManager)(nil).LeaveMemberFromCluster), arg0, arg1, arg2)
-}
-
-// ListSystemAccounts mocks base method.
-func (m *MockDBManager) ListSystemAccounts(arg0 context.Context) ([]models.UserInfo, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ListSystemAccounts", arg0)
- ret0, _ := ret[0].([]models.UserInfo)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// ListSystemAccounts indicates an expected call of ListSystemAccounts.
-func (mr *MockDBManagerMockRecorder) ListSystemAccounts(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSystemAccounts", reflect.TypeOf((*MockDBManager)(nil).ListSystemAccounts), arg0)
-}
-
-// ListUsers mocks base method.
-func (m *MockDBManager) ListUsers(arg0 context.Context) ([]models.UserInfo, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ListUsers", arg0)
- ret0, _ := ret[0].([]models.UserInfo)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// ListUsers indicates an expected call of ListUsers.
-func (mr *MockDBManagerMockRecorder) ListUsers(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockDBManager)(nil).ListUsers), arg0)
-}
-
-// Lock mocks base method.
-func (m *MockDBManager) Lock(arg0 context.Context, arg1 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Lock", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Lock indicates an expected call of Lock.
-func (mr *MockDBManagerMockRecorder) Lock(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockDBManager)(nil).Lock), arg0, arg1)
-}
-
-// MemberHealthyCheck mocks base method.
-func (m *MockDBManager) MemberHealthyCheck(arg0 context.Context, arg1 *dcs.Cluster, arg2 *dcs.Member) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "MemberHealthyCheck", arg0, arg1, arg2)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// MemberHealthyCheck indicates an expected call of MemberHealthyCheck.
-func (mr *MockDBManagerMockRecorder) MemberHealthyCheck(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MemberHealthyCheck", reflect.TypeOf((*MockDBManager)(nil).MemberHealthyCheck), arg0, arg1, arg2)
-}
-
-// MoveData mocks base method.
-func (m *MockDBManager) MoveData(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "MoveData", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// MoveData indicates an expected call of MoveData.
-func (mr *MockDBManagerMockRecorder) MoveData(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveData", reflect.TypeOf((*MockDBManager)(nil).MoveData), arg0, arg1)
-}
-
-// Promote mocks base method.
-func (m *MockDBManager) Promote(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Promote", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Promote indicates an expected call of Promote.
-func (mr *MockDBManagerMockRecorder) Promote(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Promote", reflect.TypeOf((*MockDBManager)(nil).Promote), arg0, arg1)
-}
-
// Query mocks base method.
func (m *MockDBManager) Query(arg0 context.Context, arg1 string) ([]byte, error) {
m.ctrl.T.Helper()
@@ -659,34 +114,6 @@ func (mr *MockDBManagerMockRecorder) Query(arg0, arg1 interface{}) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockDBManager)(nil).Query), arg0, arg1)
}
-// Recover mocks base method.
-func (m *MockDBManager) Recover(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Recover", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Recover indicates an expected call of Recover.
-func (mr *MockDBManagerMockRecorder) Recover(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recover", reflect.TypeOf((*MockDBManager)(nil).Recover), arg0, arg1)
-}
-
-// RevokeUserRole mocks base method.
-func (m *MockDBManager) RevokeUserRole(arg0 context.Context, arg1, arg2 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "RevokeUserRole", arg0, arg1, arg2)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// RevokeUserRole indicates an expected call of RevokeUserRole.
-func (mr *MockDBManagerMockRecorder) RevokeUserRole(arg0, arg1, arg2 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevokeUserRole", reflect.TypeOf((*MockDBManager)(nil).RevokeUserRole), arg0, arg1, arg2)
-}
-
// ShutDownWithWait mocks base method.
func (m *MockDBManager) ShutDownWithWait() {
m.ctrl.T.Helper()
@@ -698,45 +125,3 @@ func (mr *MockDBManagerMockRecorder) ShutDownWithWait() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShutDownWithWait", reflect.TypeOf((*MockDBManager)(nil).ShutDownWithWait))
}
-
-// Start mocks base method.
-func (m *MockDBManager) Start(arg0 context.Context, arg1 *dcs.Cluster) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Start", arg0, arg1)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Start indicates an expected call of Start.
-func (mr *MockDBManagerMockRecorder) Start(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockDBManager)(nil).Start), arg0, arg1)
-}
-
-// Stop mocks base method.
-func (m *MockDBManager) Stop() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Stop")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Stop indicates an expected call of Stop.
-func (mr *MockDBManagerMockRecorder) Stop() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockDBManager)(nil).Stop))
-}
-
-// Unlock mocks base method.
-func (m *MockDBManager) Unlock(arg0 context.Context) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Unlock", arg0)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Unlock indicates an expected call of Unlock.
-func (mr *MockDBManagerMockRecorder) Unlock(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockDBManager)(nil).Unlock), arg0)
-}
diff --git a/engines/etcd/manager.go b/engines/etcd/manager.go
index bf83c02..e7f3ae5 100644
--- a/engines/etcd/manager.go
+++ b/engines/etcd/manager.go
@@ -21,8 +21,6 @@ package etcd
import (
"context"
- "strconv"
- "strings"
"time"
v3 "go.etcd.io/etcd/client/v3"
@@ -32,9 +30,8 @@ import (
)
const (
- endpoint = "endpoint"
+ defaultEndpoint = "endpoint"
- defaultPort = 2379
defaultDialTimeout = 600 * time.Millisecond
)
@@ -46,8 +43,11 @@ type Manager struct {
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("ETCD")
+ properties := map[string]string{
+ defaultEndpoint: "127.0.0.1:2379",
+ }
managerBase, err := engines.NewDBManagerBase(logger)
if err != nil {
@@ -59,7 +59,7 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
}
var endpoints []string
- endpoint, ok := properties[endpoint]
+ endpoint, ok := properties[defaultEndpoint]
if ok {
mgr.endpoint = endpoint
endpoints = []string{endpoint}
@@ -94,16 +94,3 @@ func (mgr *Manager) IsDBStartupReady() bool {
mgr.Logger.Info("DB startup ready")
return true
}
-
-func (mgr *Manager) GetRunningPort() int {
- index := strings.Index(mgr.endpoint, ":")
- if index < 0 {
- return defaultPort
- }
- port, err := strconv.Atoi(mgr.endpoint[index+1:])
- if err != nil {
- return defaultPort
- }
-
- return port
-}
diff --git a/engines/etcd/manager_test.go b/engines/etcd/manager_test.go
index 47aadb4..bfcf679 100644
--- a/engines/etcd/manager_test.go
+++ b/engines/etcd/manager_test.go
@@ -29,11 +29,6 @@ import (
"github.com/spf13/viper"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/engines"
-)
-
-const (
- urlWithPort = "127.0.0.1:2379"
)
// Test case for Init() function
@@ -43,20 +38,10 @@ var _ = Describe("ETCD DBManager", func() {
viper.Set(constant.KBEnvServicePassword, "testpassword")
Context("new db manager", func() {
It("with right configurations", func() {
- properties := engines.Properties{
- "endpoint": urlWithPort,
- }
- dbManger, err := NewManager(properties)
+ dbManger, err := NewManager()
Expect(err).Should(Succeed())
Expect(dbManger).ShouldNot(BeNil())
})
-
- It("with wrong configurations", func() {
- properties := engines.Properties{}
- dbManger, err := NewManager(properties)
- Expect(err).Should(HaveOccurred())
- Expect(dbManger).Should(BeNil())
- })
})
Context("is db startup ready", func() {
@@ -76,11 +61,7 @@ var _ = Describe("ETCD DBManager", func() {
etcdServer, err := StartEtcdServer()
Expect(err).Should(BeNil())
etcdServer.Stop()
- testEndpoint := fmt.Sprintf("http://%s", etcdServer.ETCD.Clients[0].Addr().(*net.TCPAddr).String())
- properties := engines.Properties{
- "endpoint": testEndpoint,
- }
- manager, err := NewManager(properties)
+ manager, err := NewManager()
Expect(err).Should(BeNil())
Expect(manager).ShouldNot(BeNil())
Expect(manager.IsDBStartupReady()).Should(BeFalse())
@@ -99,7 +80,7 @@ var _ = Describe("ETCD DBManager", func() {
}
role, err := manager.GetReplicaRole(context.Background())
Expect(err).Should(BeNil())
- Expect(role).Should(Equal("Leader"))
+ Expect(role).Should(Equal("leader"))
})
})
})
diff --git a/engines/etcd/suite_test.go b/engines/etcd/suite_test.go
index 5d058e3..c446e76 100644
--- a/engines/etcd/suite_test.go
+++ b/engines/etcd/suite_test.go
@@ -30,7 +30,6 @@ import (
. "github.com/onsi/gomega"
"github.com/go-logr/logr"
- "github.com/golang/mock/gomock"
"github.com/spf13/viper"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
@@ -39,7 +38,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
)
const (
@@ -47,9 +45,7 @@ const (
)
var (
- dcsStore dcs.DCS
- mockDCSStore *dcs.MockDCS
- etcdServer *EmbeddedETCD
+ etcdServer *EmbeddedETCD
)
func init() {
@@ -65,28 +61,10 @@ func TestETCDDBManager(t *testing.T) {
RunSpecs(t, "ETCD DBManager. Suite")
}
-var _ = BeforeSuite(func() {
- // Init mock dcs store
- InitMockDCSStore()
-
- // Start ETCD Server
- // server, err := StartEtcdServer()
- // Expect(err).Should(BeNil())
- // etcdServer = server
-})
-
var _ = AfterSuite(func() {
StopEtcdServer(etcdServer)
})
-func InitMockDCSStore() {
- ctrl := gomock.NewController(GinkgoT())
- mockDCSStore = dcs.NewMockDCS(ctrl)
- mockDCSStore.EXPECT().GetClusterFromCache().Return(&dcs.Cluster{}).AnyTimes()
- dcs.SetStore(mockDCSStore)
- dcsStore = mockDCSStore
-}
-
func StartEtcdServer() (*EmbeddedETCD, error) {
peerAddress := "http://localhost:0"
@@ -143,5 +121,5 @@ func (e *EmbeddedETCD) Start(peerAddress string) error {
func (e *EmbeddedETCD) Stop() {
e.ETCD.Close()
e.ETCD.Server.Stop()
- os.RemoveAll(e.tmpDir)
+ _ = os.RemoveAll(e.tmpDir)
}
diff --git a/engines/interface.go b/engines/interface.go
index 8233b5c..43ceecb 100644
--- a/engines/interface.go
+++ b/engines/interface.go
@@ -21,108 +21,15 @@ package engines
import (
"context"
-
- "github.com/go-logr/logr"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/models"
)
type DBManager interface {
- IsRunning() bool
-
IsDBStartupReady() bool
- // Functions related to cluster initialization.
- InitializeCluster(context.Context, *dcs.Cluster) error
- IsClusterInitialized(context.Context, *dcs.Cluster) (bool, error)
- // IsCurrentMemberInCluster checks if current member is configured in cluster for consensus.
- // it will always return true for replicationset.
- IsCurrentMemberInCluster(context.Context, *dcs.Cluster) bool
-
- // IsClusterHealthy is only for consensus cluster healthy check.
- // For Replication cluster IsClusterHealthy will always return true,
- // and its cluster's healthy is equal to leader member's healthy.
- IsClusterHealthy(context.Context, *dcs.Cluster) bool
-
- // Member healthy check
- MemberHealthyCheck(context.Context, *dcs.Cluster, *dcs.Member) error
- LeaderHealthyCheck(context.Context, *dcs.Cluster) error
- CurrentMemberHealthyCheck(context.Context, *dcs.Cluster) error
- // IsMemberHealthy focuses on the database's read and write capabilities.
- IsMemberHealthy(context.Context, *dcs.Cluster, *dcs.Member) bool
- IsCurrentMemberHealthy(context.Context, *dcs.Cluster) bool
- // IsMemberLagging focuses on the latency between the leader and standby
- IsMemberLagging(context.Context, *dcs.Cluster, *dcs.Member) (bool, int64)
- GetLag(context.Context, *dcs.Cluster) (int64, error)
-
- // GetDBState will get most required database kernel states of current member in one HA loop to Avoiding duplicate queries and conserve I/O.
- // We believe that the states of database kernel remains unchanged within a single HA loop.
- GetDBState(context.Context, *dcs.Cluster) *dcs.DBState
-
- // HasOtherHealthyLeader is applicable only to consensus cluster,
- // where the db's internal role services as the source of truth.
- // for replicationset cluster, HasOtherHealthyLeader will always be nil.
- HasOtherHealthyLeader(context.Context, *dcs.Cluster) *dcs.Member
- HasOtherHealthyMembers(context.Context, *dcs.Cluster, string) []*dcs.Member
-
- // Functions related to replica member relationship.
- IsLeader(context.Context, *dcs.Cluster) (bool, error)
- IsLeaderMember(context.Context, *dcs.Cluster, *dcs.Member) (bool, error)
- IsFirstMember() bool
GetReplicaRole(context.Context) (string, error)
- JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error
- LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error
-
- // IsPromoted is applicable only to consensus cluster, which is used to
- // check if DB has complete switchover.
- // for replicationset cluster, it will always be true.
- IsPromoted(context.Context) bool
- // Functions related to HA
- // The functions should be idempotent, indicating that if they have been executed in one ha cycle,
- // any subsequent calls during that cycle will have no effect.
- Promote(context.Context, *dcs.Cluster) error
- Demote(context.Context) error
- Follow(context.Context, *dcs.Cluster) error
- Recover(context.Context, *dcs.Cluster) error
-
- // Start and Stop just send signal to lorryctl
- Start(context.Context, *dcs.Cluster) error
- Stop() error
-
- // GetHealthiestMember(*dcs.Cluster, string) *dcs.Member
- // IsHealthiestMember(*dcs.Cluster) bool
-
- GetCurrentMemberName() string
- GetMemberAddrs(context.Context, *dcs.Cluster) []string
-
- // Functions related to account manage
- IsRootCreated(context.Context) (bool, error)
- CreateRoot(context.Context) error
-
- // Readonly lock for disk full
- Lock(context.Context, string) error
- Unlock(context.Context) error
-
- // sql query
Exec(context.Context, string) (int64, error)
Query(context.Context, string) ([]byte, error)
- // user management
- ListUsers(context.Context) ([]models.UserInfo, error)
- ListSystemAccounts(context.Context) ([]models.UserInfo, error)
- CreateUser(context.Context, string, string) error
- DeleteUser(context.Context, string) error
- DescribeUser(context.Context, string) (*models.UserInfo, error)
- GrantUserRole(context.Context, string, string) error
- RevokeUserRole(context.Context, string, string) error
-
- GetPort() (int, error)
-
- MoveData(context.Context, *dcs.Cluster) error
-
- GetLogger() logr.Logger
-
ShutDownWithWait()
}
diff --git a/engines/kafka/auth.go b/engines/kafka/auth.go
deleted file mode 100644
index 9a880f9..0000000
--- a/engines/kafka/auth.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
-
- "github.com/IBM/sarama"
-)
-
-func updatePasswordAuthInfo(config *sarama.Config, metadata *kafkaMetadata, saslUsername, saslPassword string) {
- config.Net.SASL.Enable = true
- config.Net.SASL.User = saslUsername
- config.Net.SASL.Password = saslPassword
- switch metadata.SaslMechanism {
- case "SHA-256":
- config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
- config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256
- case "SHA-512":
- config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
- config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
- default:
- config.Net.SASL.Mechanism = sarama.SASLTypePlaintext
- }
-}
-
-func updateMTLSAuthInfo(config *sarama.Config, metadata *kafkaMetadata) error {
- if metadata.TLSDisable {
- return fmt.Errorf("kafka: cannot configure mTLS authentication when TLSDisable is 'true'")
- }
- cert, err := tls.X509KeyPair([]byte(metadata.TLSClientCert), []byte(metadata.TLSClientKey))
- if err != nil {
- return fmt.Errorf("unable to load client certificate and key pair. Err: %w", err)
- }
- config.Net.TLS.Config.Certificates = []tls.Certificate{cert}
- return nil
-}
-
-func updateTLSConfig(config *sarama.Config, metadata *kafkaMetadata) error {
- if metadata.TLSDisable || metadata.AuthType == noAuthType {
- config.Net.TLS.Enable = false
- return nil
- }
- config.Net.TLS.Enable = true
-
- if !metadata.TLSSkipVerify && metadata.TLSCaCert == "" {
- return nil
- }
- //nolint:gosec
- config.Net.TLS.Config = &tls.Config{InsecureSkipVerify: metadata.TLSSkipVerify, MinVersion: tls.VersionTLS12}
- if metadata.TLSCaCert != "" {
- caCertPool := x509.NewCertPool()
- if ok := caCertPool.AppendCertsFromPEM([]byte(metadata.TLSCaCert)); !ok {
- return errors.New("kafka error: unable to load ca certificate")
- }
- config.Net.TLS.Config.RootCAs = caCertPool
- }
-
- return nil
-}
-
-func updateOidcAuthInfo(config *sarama.Config, metadata *kafkaMetadata) error {
- tokenProvider := newOAuthTokenSource(metadata.OidcTokenEndpoint, metadata.OidcClientID, metadata.OidcClientSecret, metadata.OidcScopes)
-
- if metadata.TLSCaCert != "" {
- err := tokenProvider.addCa(metadata.TLSCaCert)
- if err != nil {
- return fmt.Errorf("kafka: error setting oauth client trusted CA: %w", err)
- }
- }
-
- tokenProvider.skipCaVerify = metadata.TLSSkipVerify
-
- config.Net.SASL.Enable = true
- config.Net.SASL.Mechanism = sarama.SASLTypeOAuth
- config.Net.SASL.TokenProvider = &tokenProvider
-
- return nil
-}
diff --git a/engines/kafka/consumer.go b/engines/kafka/consumer.go
deleted file mode 100644
index 5e7e846..0000000
--- a/engines/kafka/consumer.go
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/IBM/sarama"
- "github.com/cenkalti/backoff/v4"
-
- "github.com/apecloud/dbctl/engines/kafka/thirdparty"
-)
-
-type consumer struct {
- k *Kafka
- ready chan bool
- running chan struct{}
- stopped atomic.Bool
- once sync.Once
-}
-
-func (consumer *consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
- b := consumer.k.backOffConfig.NewBackOffWithContext(session.Context())
-
- for {
- select {
- case message, ok := <-claim.Messages():
- if !ok {
- return nil
- }
-
- if consumer.k.consumeRetryEnabled {
- if err := thirdparty.NotifyRecover(func() error {
- return consumer.doCallback(session, message)
- }, b, func(err error, d time.Duration) {
- consumer.k.logger.Error(err, fmt.Sprintf("Error processing Kafka message: %s/%d/%d [key=%s]. Retrying...", message.Topic, message.Partition, message.Offset, asBase64String(message.Key)))
- }, func() {
- consumer.k.logger.Info(fmt.Sprintf("Successfully processed Kafka message after it previously failed: %s/%d/%d [key=%s]", message.Topic, message.Partition, message.Offset, asBase64String(message.Key)))
- }); err != nil {
- consumer.k.logger.Error(err, fmt.Sprintf("Too many failed attempts at processing Kafka message: %s/%d/%d [key=%s]. ", message.Topic, message.Partition, message.Offset, asBase64String(message.Key)))
- }
- } else {
- err := consumer.doCallback(session, message)
- if err != nil {
- consumer.k.logger.Error(err, "Error processing Kafka message: %s/%d/%d [key=%s].", message.Topic, message.Partition, message.Offset, asBase64String(message.Key))
- }
- }
- // Should return when `session.Context()` is done.
- // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see:
- // https://github.com/IBM/sarama/issues/1192
- case <-session.Context().Done():
- return nil
- }
- }
-}
-
-func (consumer *consumer) doCallback(session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error {
- consumer.k.logger.Info(fmt.Sprintf("Processing Kafka message: %s/%d/%d [key=%s]", message.Topic, message.Partition, message.Offset, asBase64String(message.Key)))
- handlerConfig, err := consumer.k.GetTopicHandlerConfig(message.Topic)
- if err != nil {
- return err
- }
- if !handlerConfig.IsBulkSubscribe && handlerConfig.Handler == nil {
- return errors.New("invalid handler config for subscribe call")
- }
- event := NewEvent{
- Topic: message.Topic,
- Data: message.Value,
- }
- // This is true only when headers are set (Kafka > 0.11)
- if len(message.Headers) > 0 {
- event.Metadata = make(map[string]string, len(message.Headers))
- for _, header := range message.Headers {
- event.Metadata[string(header.Key)] = string(header.Value)
- }
- }
- err = handlerConfig.Handler(session.Context(), &event)
- if err == nil {
- session.MarkMessage(message, "")
- }
- return err
-}
-
-func (consumer *consumer) Cleanup(sarama.ConsumerGroupSession) error {
- return nil
-}
-
-func (consumer *consumer) Setup(sarama.ConsumerGroupSession) error {
- consumer.once.Do(func() {
- close(consumer.ready)
- })
-
- return nil
-}
-
-// AddTopicHandler adds a handler and configuration for a topic
-func (k *Kafka) AddTopicHandler(topic string, handlerConfig SubscriptionHandlerConfig) {
- k.subscribeLock.Lock()
- k.subscribeTopics[topic] = handlerConfig
- k.subscribeLock.Unlock()
-}
-
-// RemoveTopicHandler removes a topic handler
-func (k *Kafka) RemoveTopicHandler(topic string) {
- k.subscribeLock.Lock()
- delete(k.subscribeTopics, topic)
- k.subscribeLock.Unlock()
-}
-
-// GetTopicHandlerConfig returns the handlerConfig for a topic
-func (k *Kafka) GetTopicHandlerConfig(topic string) (SubscriptionHandlerConfig, error) {
- handlerConfig, ok := k.subscribeTopics[topic]
- if ok && (!handlerConfig.IsBulkSubscribe && handlerConfig.Handler != nil) {
- return handlerConfig, nil
- }
- return SubscriptionHandlerConfig{},
- fmt.Errorf("any handler for messages of topic %s not found", topic)
-}
-
-// Subscribe to topic in the Kafka cluster, in a background goroutine
-func (k *Kafka) Subscribe(ctx context.Context) error {
- if k.consumerGroup == "" {
- return errors.New("kafka: consumerGroup must be set to subscribe")
- }
-
- k.subscribeLock.Lock()
- defer k.subscribeLock.Unlock()
-
- // Close resources and reset synchronization primitives
- k.closeSubscriptionResources()
-
- topics := k.subscribeTopics.TopicList()
- if len(topics) == 0 {
- // Nothing to subscribe to
- return nil
- }
-
- cg, err := sarama.NewConsumerGroup(k.brokers, k.consumerGroup, k.config)
- if err != nil {
- return err
- }
-
- k.cg = cg
-
- ready := make(chan bool)
- k.consumer = consumer{
- k: k,
- ready: ready,
- running: make(chan struct{}),
- }
-
- go func() {
- k.logger.Info("Subscribed and listening to topics", "topics", topics)
-
- for {
- // If the context was cancelled, as is the case when handling SIGINT and SIGTERM below, then this pops
- // us out of the consume loop
- if ctx.Err() != nil {
- break
- }
-
- k.logger.Info("Starting loop to consume.")
-
- // Consume the requested topics
- bo := backoff.WithContext(backoff.NewConstantBackOff(k.consumeRetryInterval), ctx)
- innerErr := thirdparty.NotifyRecover(func() error {
- if ctxErr := ctx.Err(); ctxErr != nil {
- return backoff.Permanent(ctxErr)
- }
- return k.cg.Consume(ctx, topics, &(k.consumer))
- }, bo, func(err error, t time.Duration) {
- k.logger.Error(err, fmt.Sprintf("Error consuming %v. Retrying...", topics))
- }, func() {
- k.logger.Info(fmt.Sprintf("Recovered consuming %v", topics))
- })
- if innerErr != nil && !errors.Is(innerErr, context.Canceled) {
- k.logger.Error(innerErr, fmt.Sprintf("Permanent error consuming %v", topics))
- }
- }
-
- k.logger.Info(fmt.Sprintf("Closing ConsumerGroup for topics: %v", topics))
- err := k.cg.Close()
- if err != nil {
- k.logger.Error(err, "Error closing consumer group")
- }
-
- // Ensure running channel is only closed once.
- if k.consumer.stopped.CompareAndSwap(false, true) {
- close(k.consumer.running)
- }
- }()
-
- <-ready
-
- return nil
-}
-
-// Close down consumer group resources, refresh once.
-func (k *Kafka) closeSubscriptionResources() {
- if k.cg != nil {
- err := k.cg.Close()
- if err != nil {
- k.logger.Error(err, "Error closing consumer group")
- }
-
- k.consumer.once.Do(func() {
- // Wait for shutdown to be complete
- <-k.consumer.running
- close(k.consumer.ready)
- k.consumer.once = sync.Once{}
- })
- }
-}
diff --git a/engines/kafka/kafka.go b/engines/kafka/kafka.go
deleted file mode 100644
index 416f0c8..0000000
--- a/engines/kafka/kafka.go
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/IBM/sarama"
- "github.com/go-logr/logr"
-
- "github.com/apecloud/dbctl/engines/kafka/thirdparty"
-)
-
-// Kafka allows reading/writing to a Kafka consumer group.
-type Kafka struct {
- Producer sarama.SyncProducer
- broker *sarama.Broker
- consumerGroup string
- brokers []string
- logger logr.Logger
- authType string
- saslUsername string
- saslPassword string
- initialOffset int64
- cg sarama.ConsumerGroup
- consumer consumer
- config *sarama.Config
- subscribeTopics TopicHandlerConfig
- subscribeLock sync.Mutex
-
- backOffConfig thirdparty.Config
-
- // The default value should be true for kafka pubsub component and false for kafka binding component
- // This default value can be overridden by metadata consumeRetryEnabled
- DefaultConsumeRetryEnabled bool
- consumeRetryEnabled bool
- consumeRetryInterval time.Duration
-}
-
-func NewKafka(logger logr.Logger) *Kafka {
- return &Kafka{
- logger: logger,
- subscribeTopics: make(TopicHandlerConfig),
- subscribeLock: sync.Mutex{},
- }
-}
-
-// Init does metadata parsing and connection establishment.
-func (k *Kafka) Init(_ context.Context, metadata map[string]string) error {
- upgradedMetadata, err := k.upgradeMetadata(metadata)
- if err != nil {
- return err
- }
-
- meta, err := k.getKafkaMetadata(upgradedMetadata)
- if err != nil {
- return err
- }
-
- k.brokers = meta.Brokers
- k.consumerGroup = meta.ConsumerGroup
- k.initialOffset = meta.InitialOffset
- k.authType = meta.AuthType
-
- k.broker = sarama.NewBroker(k.brokers[0])
-
- config := sarama.NewConfig()
- config.Version = meta.Version
- config.Consumer.Offsets.Initial = k.initialOffset
-
- if meta.ClientID != "" {
- config.ClientID = meta.ClientID
- }
-
- err = updateTLSConfig(config, meta)
- if err != nil {
- return err
- }
-
- switch k.authType {
- case oidcAuthType:
- k.logger.Info("Configuring SASL OAuth2/OIDC authentication")
- err = updateOidcAuthInfo(config, meta)
- if err != nil {
- return err
- }
- case passwordAuthType:
- k.logger.Info("Configuring SASL Password authentication")
- k.saslUsername = meta.SaslUsername
- k.saslPassword = meta.SaslPassword
- updatePasswordAuthInfo(config, meta, k.saslUsername, k.saslPassword)
- case mtlsAuthType:
- k.logger.Info("Configuring mTLS authentcation")
- err = updateMTLSAuthInfo(config, meta)
- if err != nil {
- return err
- }
- }
-
- k.config = config
- sarama.Logger = SaramaLogBridge{logger: k.logger}
-
- k.Producer, err = getSyncProducer(*k.config, k.brokers, meta.MaxMessageBytes)
- if err != nil {
- return err
- }
-
- // Default retry configuration is used if no
- // backOff properties are set.
- if err := thirdparty.DecodeConfigWithPrefix(
- &k.backOffConfig,
- metadata,
- "backOff"); err != nil {
- return err
- }
- k.consumeRetryEnabled = meta.ConsumeRetryEnabled
- k.consumeRetryInterval = meta.ConsumeRetryInterval
-
- k.logger.Info("Kafka message bus initialization complete")
-
- return nil
-}
-
-func (k *Kafka) Close() (err error) {
- k.closeSubscriptionResources()
-
- if k.Producer != nil {
- err = k.Producer.Close()
- k.Producer = nil
- }
-
- return err
-}
-
-// EventHandler is the handler used to handle the subscribed event.
-type EventHandler func(ctx context.Context, msg *NewEvent) error
-
-// BulkEventHandler is the handler used to handle the subscribed bulk event.
-// type BulkEventHandler func(ctx context.Context, msg *KafkaBulkMessage) ([]pubsub.BulkSubscribeResponseEntry, error)
-
-// SubscriptionHandlerConfig is the handler and configuration for subscription.
-type SubscriptionHandlerConfig struct {
- IsBulkSubscribe bool
- Handler EventHandler
-}
-
-// NewEvent is an event arriving from a message bus instance.
-type NewEvent struct {
- Data []byte `json:"data"`
- Topic string `json:"topic"`
- Metadata map[string]string `json:"metadata"`
- ContentType *string `json:"contentType,omitempty"`
-}
-
-// KafkaBulkMessage is a bulk event arriving from a message bus instance.
-type KafkaBulkMessage struct {
- Entries []KafkaBulkMessageEntry `json:"entries"`
- Topic string `json:"topic"`
- Metadata map[string]string `json:"metadata"`
-}
-
-// KafkaBulkMessageEntry is an item contained inside bulk event arriving from a message bus instance.
-type KafkaBulkMessageEntry struct {
- EntryID string `json:"entryId"` //nolint:stylecheck
- Event []byte `json:"event"`
- ContentType string `json:"contentType,omitempty"`
- Metadata map[string]string `json:"metadata"`
-}
-
-func (k *Kafka) BrokerOpen() error {
- connected, err := k.broker.Connected()
- if err != nil {
- k.logger.Info("broker connected err:%v", err)
- return err
- }
- if !connected {
- err = k.broker.Open(k.config)
- if err != nil {
- k.logger.Info("broker connected err:%v", err)
- return err
- }
- }
-
- return nil
-}
-
-func (k *Kafka) BrokerClose() {
- _ = k.broker.Close()
-}
-
-func (k *Kafka) BrokerCreateTopics(topic string) error {
- req := &sarama.CreateTopicsRequest{
- Version: 1,
- TopicDetails: map[string]*sarama.TopicDetail{
- topic: {
- NumPartitions: -1,
- ReplicationFactor: -1,
- },
- },
- Timeout: time.Second,
- ValidateOnly: false,
- }
-
- resp, err := k.broker.CreateTopics(req)
- if err != nil {
- k.logger.Error(err, "CheckStatus error")
- return err
- } else {
- respErr := resp.TopicErrors[topic]
- // ErrNo details: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
- if respErr.Err != 0 {
- k.logger.Error(respErr, "CheckStatus error", "errNo", int16(respErr.Err))
- return respErr
- }
- return nil
- }
-}
diff --git a/engines/kafka/metadata.go b/engines/kafka/metadata.go
deleted file mode 100644
index 635c5e3..0000000
--- a/engines/kafka/metadata.go
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/IBM/sarama"
-)
-
-const (
- key = "partitionKey"
- skipVerify = "skipVerify"
- caCert = "caCert"
- clientCert = "clientCert"
- clientKey = "clientKey"
- consumeRetryEnabled = "consumeRetryEnabled"
- consumeRetryInterval = "consumeRetryInterval"
- authType = "authType"
- passwordAuthType = "password"
- oidcAuthType = "oidc"
- mtlsAuthType = "mtls"
- noAuthType = "none"
-)
-
-type kafkaMetadata struct {
- Brokers []string
- ConsumerGroup string
- ClientID string
- AuthType string
- SaslUsername string
- SaslPassword string
- SaslMechanism string
- InitialOffset int64
- MaxMessageBytes int
- OidcTokenEndpoint string
- OidcClientID string
- OidcClientSecret string
- OidcScopes []string
- TLSDisable bool
- TLSSkipVerify bool
- TLSCaCert string
- TLSClientCert string
- TLSClientKey string
- ConsumeRetryEnabled bool
- ConsumeRetryInterval time.Duration
- Version sarama.KafkaVersion
-}
-
-// upgradeMetadata updates metadata properties based on deprecated usage.
-func (k *Kafka) upgradeMetadata(metadata map[string]string) (map[string]string, error) {
- authTypeVal, authTypePres := metadata[authType]
- authReqVal, authReqPres := metadata["authRequired"]
- saslPassVal, saslPassPres := metadata["saslPassword"]
-
- // If authType is not set, derive it from authRequired.
- if (!authTypePres || authTypeVal == "") && authReqPres && authReqVal != "" {
- k.logger.Info("AuthRequired is deprecated, use AuthType instead.")
- validAuthRequired, err := strconv.ParseBool(authReqVal)
- if err == nil {
- if validAuthRequired {
- // If legacy authRequired was used, either SASL username or mtls is the method.
- if saslPassPres && saslPassVal != "" {
- // User has specified saslPassword, so intend for password auth.
- metadata[authType] = passwordAuthType
- } else {
- metadata[authType] = mtlsAuthType
- }
- } else {
- metadata[authType] = noAuthType
- }
- } else {
- return metadata, errors.New("kafka error: invalid value for 'authRequired' attribute")
- }
- }
-
- // if consumeRetryEnabled is not present, use component default value
- consumeRetryEnabledVal, consumeRetryEnabledPres := metadata[consumeRetryEnabled]
- if !consumeRetryEnabledPres || consumeRetryEnabledVal == "" {
- metadata[consumeRetryEnabled] = strconv.FormatBool(k.DefaultConsumeRetryEnabled)
- }
-
- return metadata, nil
-}
-
-// getKafkaMetadata returns new Kafka metadata.
-func (k *Kafka) getKafkaMetadata(metadata map[string]string) (*kafkaMetadata, error) {
- meta := kafkaMetadata{
- ConsumeRetryInterval: 100 * time.Millisecond,
- }
- // use the runtimeConfig.ID as the consumer group so that each dapr runtime creates its own consumergroup
- if val, ok := metadata["consumerID"]; ok && val != "" {
- meta.ConsumerGroup = val
- k.logger.Info(fmt.Sprintf("Using %s as ConsumerGroup", meta.ConsumerGroup))
- }
-
- if val, ok := metadata["consumerGroup"]; ok && val != "" {
- meta.ConsumerGroup = val
- k.logger.Info(fmt.Sprintf("Using %s as ConsumerGroup", meta.ConsumerGroup))
- }
-
- if val, ok := metadata["clientID"]; ok && val != "" {
- meta.ClientID = val
- k.logger.Info(fmt.Sprintf("Using %s as ClientID", meta.ClientID))
- }
-
- if val, ok := metadata["saslMechanism"]; ok && val != "" {
- meta.SaslMechanism = val
- k.logger.Info(fmt.Sprintf("Using %s as saslMechanism", meta.SaslMechanism))
- }
-
- initialOffset, err := parseInitialOffset(metadata["initialOffset"])
- if err != nil {
- return nil, err
- }
- meta.InitialOffset = initialOffset
-
- if val, ok := metadata["brokers"]; ok && val != "" {
- meta.Brokers = strings.Split(val, ",")
- } else {
- return nil, errors.New("kafka error: missing 'brokers' attribute")
- }
-
- k.logger.Info("Found brokers", "brokers", meta.Brokers)
-
- val, ok := metadata["authType"]
- if !ok {
- return nil, errors.New("kafka error: missing 'authType' attribute")
- }
- if val == "" {
- return nil, errors.New("kafka error: 'authType' attribute was empty")
- }
-
- switch strings.ToLower(val) {
- case passwordAuthType:
- meta.AuthType = val
- if val, ok = metadata["saslUsername"]; ok && val != "" {
- meta.SaslUsername = val
- } else {
- return nil, errors.New("kafka error: missing SASL Username for authType 'password'")
- }
-
- if val, ok = metadata["saslPassword"]; ok && val != "" {
- meta.SaslPassword = val
- } else {
- return nil, errors.New("kafka error: missing SASL Password for authType 'password'")
- }
- k.logger.Info("Configuring SASL password authentication.")
- case oidcAuthType:
- meta.AuthType = val
- if val, ok = metadata["oidcTokenEndpoint"]; ok && val != "" {
- meta.OidcTokenEndpoint = val
- } else {
- return nil, errors.New("kafka error: missing OIDC Token Endpoint for authType 'oidc'")
- }
- if val, ok = metadata["oidcClientID"]; ok && val != "" {
- meta.OidcClientID = val
- } else {
- return nil, errors.New("kafka error: missing OIDC Client ID for authType 'oidc'")
- }
- if val, ok = metadata["oidcClientSecret"]; ok && val != "" {
- meta.OidcClientSecret = val
- } else {
- return nil, errors.New("kafka error: missing OIDC Client Secret for authType 'oidc'")
- }
- if val, ok = metadata["oidcScopes"]; ok && val != "" {
- meta.OidcScopes = strings.Split(val, ",")
- } else {
- k.logger.Info("Warning: no OIDC scopes specified, using default 'openid' scope only. This is a security risk for token reuse.")
- meta.OidcScopes = []string{"openid"}
- }
- k.logger.Info("Configuring SASL token authentication via OIDC.")
- case mtlsAuthType:
- meta.AuthType = val
- if val, ok = metadata[clientCert]; ok && val != "" {
- if !isValidPEM(val) {
- return nil, errors.New("kafka error: invalid client certificate")
- }
- meta.TLSClientCert = val
- }
- if val, ok = metadata[clientKey]; ok && val != "" {
- if !isValidPEM(val) {
- return nil, errors.New("kafka error: invalid client key")
- }
- meta.TLSClientKey = val
- }
- // clientKey and clientCert need to be all specified or all not specified.
- if (meta.TLSClientKey == "") != (meta.TLSClientCert == "") {
- return nil, errors.New("kafka error: clientKey or clientCert is missing")
- }
- k.logger.Info("Configuring mTLS authentication.")
- case noAuthType:
- meta.AuthType = val
- k.logger.Info("No authentication configured.")
- default:
- return nil, errors.New("kafka error: invalid value for 'authType' attribute")
- }
-
- if val, ok := metadata["maxMessageBytes"]; ok && val != "" {
- maxBytes, err := strconv.Atoi(val)
- if err != nil {
- return nil, fmt.Errorf("kafka error: cannot parse maxMessageBytes: %w", err)
- }
-
- meta.MaxMessageBytes = maxBytes
- }
-
- if val, ok := metadata[caCert]; ok && val != "" {
- if !isValidPEM(val) {
- return nil, errors.New("kafka error: invalid ca certificate")
- }
- meta.TLSCaCert = val
- }
-
- if val, ok := metadata["disableTls"]; ok && val != "" {
- boolVal, err := strconv.ParseBool(val)
- if err != nil {
- return nil, fmt.Errorf("kafka: invalid value for 'tlsDisable' attribute: %w", err)
- }
- meta.TLSDisable = boolVal
- if meta.TLSDisable {
- k.logger.Info("kafka: TLS connectivity to broker disabled")
- }
- }
-
- if val, ok := metadata[skipVerify]; ok && val != "" {
- boolVal, err := strconv.ParseBool(val)
- if err != nil {
- return nil, fmt.Errorf("kafka error: invalid value for '%s' attribute: %w", skipVerify, err)
- }
- meta.TLSSkipVerify = boolVal
- if boolVal {
- k.logger.Info("kafka: you are using 'skipVerify' to skip server config verify which is unsafe!")
- }
- }
-
- if val, ok := metadata[consumeRetryEnabled]; ok && val != "" {
- boolVal, err := strconv.ParseBool(val)
- if err != nil {
- return nil, fmt.Errorf("kafka error: invalid value for '%s' attribute: %w", consumeRetryEnabled, err)
- }
- meta.ConsumeRetryEnabled = boolVal
- }
-
- if val, ok := metadata[consumeRetryInterval]; ok && val != "" {
- durationVal, err := time.ParseDuration(val)
- if err != nil {
- intVal, err := strconv.ParseUint(val, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("kafka error: invalid value for '%s' attribute: %w", consumeRetryInterval, err)
- }
- durationVal = time.Duration(intVal) * time.Millisecond
- }
- meta.ConsumeRetryInterval = durationVal
- }
-
- if val, ok := metadata["version"]; ok && val != "" {
- version, err := sarama.ParseKafkaVersion(val)
- if err != nil {
- return nil, errors.New("kafka error: invalid kafka version")
- }
- meta.Version = version
- } else {
- meta.Version = sarama.V2_0_0_0 //nolint:nosnakecase
- }
-
- return &meta, nil
-}
diff --git a/engines/kafka/metadata_test.go b/engines/kafka/metadata_test.go
deleted file mode 100644
index a155c65..0000000
--- a/engines/kafka/metadata_test.go
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/go-logr/zapr"
- "go.uber.org/zap"
-
- "github.com/IBM/sarama"
- "github.com/stretchr/testify/require"
-)
-
-var (
- clientCertPemMock = `-----BEGIN CERTIFICATE-----
-Y2xpZW50Q2VydA==
------END CERTIFICATE-----`
- clientKeyMock = `-----BEGIN RSA PRIVATE KEY-----
-Y2xpZW50S2V5
------END RSA PRIVATE KEY-----`
- caCertMock = `-----BEGIN CERTIFICATE-----
-Y2FDZXJ0
------END CERTIFICATE-----`
-)
-
-func getKafka() *Kafka {
- development, _ := zap.NewDevelopment()
- return &Kafka{logger: zapr.NewLogger(development)}
-}
-
-func getBaseMetadata() map[string]string {
- return map[string]string{"consumerGroup": "a", "clientID": "a", "brokers": "a", "disableTls": "true", "authType": mtlsAuthType, "maxMessageBytes": "2048", "initialOffset": "newest"}
-}
-
-func getCompleteMetadata() map[string]string {
- return map[string]string{
- "consumerGroup": "a", "clientID": "a", "brokers": "a", "authType": mtlsAuthType, "maxMessageBytes": "2048",
- skipVerify: "true", clientCert: clientCertPemMock, clientKey: clientKeyMock, caCert: caCertMock,
- "consumeRetryInterval": "200", "initialOffset": "newest",
- }
-}
-
-func TestParseMetadata(t *testing.T) {
- k := getKafka()
- t.Run("default kafka version", func(t *testing.T) {
- m := getCompleteMetadata()
- meta, err := k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.NotNil(t, meta)
- assertMetadata(t, meta)
- require.Equal(t, sarama.V2_0_0_0, meta.Version) //nolint:nosnakecase
- })
-
- t.Run("specific kafka version", func(t *testing.T) {
- m := getCompleteMetadata()
- m["version"] = "0.10.2.0"
- meta, err := k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.NotNil(t, meta)
- assertMetadata(t, meta)
- require.Equal(t, sarama.V0_10_2_0, meta.Version) //nolint:nosnakecase
- })
-
- t.Run("invalid kafka version", func(t *testing.T) {
- m := getCompleteMetadata()
- m["version"] = "not_valid_version"
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
- require.Equal(t, "kafka error: invalid kafka version", err.Error())
- })
-}
-
-func assertMetadata(t *testing.T, meta *kafkaMetadata) {
- require.Equal(t, "a", meta.Brokers[0])
- require.Equal(t, "a", meta.ConsumerGroup)
- require.Equal(t, "a", meta.ClientID)
- require.Equal(t, 2048, meta.MaxMessageBytes)
- require.Equal(t, true, meta.TLSSkipVerify)
- require.Equal(t, clientCertPemMock, meta.TLSClientCert)
- require.Equal(t, clientKeyMock, meta.TLSClientKey)
- require.Equal(t, caCertMock, meta.TLSCaCert)
- require.Equal(t, 200*time.Millisecond, meta.ConsumeRetryInterval)
-}
-
-func TestMissingBrokers(t *testing.T) {
- m := map[string]string{"initialOffset": "newest"}
- k := getKafka()
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: missing 'brokers' attribute", err.Error())
-}
-
-func TestMissingAuthType(t *testing.T) {
- m := map[string]string{"brokers": "kafka.com:9092", "initialOffset": "newest"}
- k := getKafka()
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: missing 'authType' attribute", err.Error())
-}
-
-func TestMetadataUpgradeNoAuth(t *testing.T) {
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authRequired": "false"}
- k := getKafka()
- upgraded, err := k.upgradeMetadata(m)
- require.Nil(t, err)
- require.Equal(t, noAuthType, upgraded["authType"])
-}
-
-func TestMetadataUpgradePasswordAuth(t *testing.T) {
- k := getKafka()
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authRequired": "true", "saslPassword": "sassapass"}
- upgraded, err := k.upgradeMetadata(m)
- require.Nil(t, err)
- require.Equal(t, passwordAuthType, upgraded["authType"])
-}
-
-func TestMetadataUpgradePasswordMTLSAuth(t *testing.T) {
- k := getKafka()
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authRequired": "true"}
- upgraded, err := k.upgradeMetadata(m)
- require.Nil(t, err)
- require.Equal(t, mtlsAuthType, upgraded["authType"])
-}
-
-func TestMissingSaslValues(t *testing.T) {
- k := getKafka()
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authType": "password"}
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, fmt.Sprintf("kafka error: missing SASL Username for authType '%s'", passwordAuthType), err.Error())
-
- m["saslUsername"] = "sassafras"
-
- meta, err = k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, fmt.Sprintf("kafka error: missing SASL Password for authType '%s'", passwordAuthType), err.Error())
-}
-
-func TestMissingSaslValuesOnUpgrade(t *testing.T) {
- k := getKafka()
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authRequired": "true", "saslPassword": "sassapass"}
- upgraded, err := k.upgradeMetadata(m)
- require.Nil(t, err)
- meta, err := k.getKafkaMetadata(upgraded)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, fmt.Sprintf("kafka error: missing SASL Username for authType '%s'", passwordAuthType), err.Error())
-}
-
-func TestMissingOidcValues(t *testing.T) {
- k := getKafka()
- m := map[string]string{"brokers": "akfak.com:9092", "initialOffset": "newest", "authType": oidcAuthType}
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
- require.Equal(t, fmt.Sprintf("kafka error: missing OIDC Token Endpoint for authType '%s'", oidcAuthType), err.Error())
-
- m["oidcTokenEndpoint"] = "https://sassa.fra/"
- meta, err = k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
- require.Equal(t, fmt.Sprintf("kafka error: missing OIDC Client ID for authType '%s'", oidcAuthType), err.Error())
-
- m["oidcClientID"] = "sassafras"
- meta, err = k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
- require.Equal(t, fmt.Sprintf("kafka error: missing OIDC Client Secret for authType '%s'", oidcAuthType), err.Error())
-
- // Check if missing scopes causes the default 'openid' to be used.
- m["oidcClientSecret"] = "sassapass"
- meta, err = k.getKafkaMetadata(m)
- require.Nil(t, err)
- require.Contains(t, meta.OidcScopes, "openid")
-}
-
-func TestPresentSaslValues(t *testing.T) {
- k := getKafka()
- m := map[string]string{
- "brokers": "akfak.com:9092",
- "authType": passwordAuthType,
- "saslUsername": "sassafras",
- "saslPassword": "sassapass",
- "initialOffset": "newest",
- }
- meta, err := k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.NotNil(t, meta)
-
- require.Equal(t, "sassafras", meta.SaslUsername)
- require.Equal(t, "sassapass", meta.SaslPassword)
-}
-
-func TestPresentOidcValues(t *testing.T) {
- k := getKafka()
- m := map[string]string{
- "brokers": "akfak.com:9092",
- "authType": oidcAuthType,
- "oidcTokenEndpoint": "https://sassa.fras",
- "oidcClientID": "sassafras",
- "oidcClientSecret": "sassapass",
- "oidcScopes": "akfak",
- "initialOffset": "newest",
- }
- meta, err := k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.NotNil(t, meta)
-
- require.Equal(t, "https://sassa.fras", meta.OidcTokenEndpoint)
- require.Equal(t, "sassafras", meta.OidcClientID)
- require.Equal(t, "sassapass", meta.OidcClientSecret)
- require.Contains(t, meta.OidcScopes, "akfak")
-}
-
-func TestInvalidAuthRequiredFlag(t *testing.T) {
- m := map[string]string{"brokers": "akfak.com:9092", "authRequired": "maybe?????????????"}
- k := getKafka()
- _, err := k.upgradeMetadata(m)
- require.Error(t, err)
-
- require.Equal(t, "kafka error: invalid value for 'authRequired' attribute", err.Error())
-}
-
-func TestInitialOffset(t *testing.T) {
- m := map[string]string{"consumerGroup": "a", "brokers": "a", "authRequired": "false", "initialOffset": "oldest"}
- k := getKafka()
- upgraded, err := k.upgradeMetadata(m)
- require.NoError(t, err)
- meta, err := k.getKafkaMetadata(upgraded)
- require.NoError(t, err)
- require.Equal(t, sarama.OffsetOldest, meta.InitialOffset)
- m["initialOffset"] = "newest"
- meta, err = k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.Equal(t, sarama.OffsetNewest, meta.InitialOffset)
-}
-
-func TestTls(t *testing.T) {
- k := getKafka()
-
- t.Run("disable tls", func(t *testing.T) {
- m := getBaseMetadata()
- meta, err := k.getKafkaMetadata(m)
- require.NoError(t, err)
- require.NotNil(t, meta)
- c := &sarama.Config{}
- err = updateTLSConfig(c, meta)
- require.NoError(t, err)
- require.Equal(t, false, c.Net.TLS.Enable)
- })
-
- t.Run("wrong client cert format", func(t *testing.T) {
- m := getBaseMetadata()
- m[clientCert] = "clientCert"
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: invalid client certificate", err.Error())
- })
-
- t.Run("wrong client key format", func(t *testing.T) {
- m := getBaseMetadata()
- m[clientKey] = "clientKey"
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: invalid client key", err.Error())
- })
-
- t.Run("miss client key", func(t *testing.T) {
- m := getBaseMetadata()
- m[clientCert] = clientCertPemMock
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: clientKey or clientCert is missing", err.Error())
- })
-
- t.Run("miss client cert", func(t *testing.T) {
- m := getBaseMetadata()
- m[clientKey] = clientKeyMock
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: clientKey or clientCert is missing", err.Error())
- })
-
- t.Run("wrong ca cert format", func(t *testing.T) {
- m := getBaseMetadata()
- m[caCert] = "caCert"
- meta, err := k.getKafkaMetadata(m)
- require.Error(t, err)
- require.Nil(t, meta)
-
- require.Equal(t, "kafka error: invalid ca certificate", err.Error())
- })
-}
diff --git a/engines/kafka/producer.go b/engines/kafka/producer.go
deleted file mode 100644
index c3a1bb7..0000000
--- a/engines/kafka/producer.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/IBM/sarama"
-)
-
-func getSyncProducer(config sarama.Config, brokers []string, maxMessageBytes int) (sarama.SyncProducer, error) {
- // Add SyncProducer specific properties to copy of base config
- config.Producer.RequiredAcks = sarama.WaitForAll
- config.Producer.Retry.Max = 5
- config.Producer.Return.Successes = true
-
- if maxMessageBytes > 0 {
- config.Producer.MaxMessageBytes = maxMessageBytes
- }
-
- producer, err := sarama.NewSyncProducer(brokers, &config)
- if err != nil {
- return nil, err
- }
-
- return producer, nil
-}
-
-// Publish message to Kafka cluster.
-func (k *Kafka) Publish(_ context.Context, topic string, data []byte, metadata map[string]string) error {
- if k.Producer == nil {
- return errors.New("component is closed")
- }
- // k.logger.Debugf("Publishing topic %v with data: %v", topic, string(data))
- k.logger.Info(fmt.Sprintf("Publishing on topic %v", topic))
-
- msg := &sarama.ProducerMessage{
- Topic: topic,
- Value: sarama.ByteEncoder(data),
- }
-
- for name, value := range metadata {
- if name == key {
- msg.Key = sarama.StringEncoder(value)
- } else {
- if msg.Headers == nil {
- msg.Headers = make([]sarama.RecordHeader, 0, len(metadata))
- }
- msg.Headers = append(msg.Headers, sarama.RecordHeader{
- Key: []byte(name),
- Value: []byte(value),
- })
- }
- }
-
- partition, offset, err := k.Producer.SendMessage(msg)
-
- k.logger.Info(fmt.Sprintf("Partition: %v, offset: %v", partition, offset))
-
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/engines/kafka/sarama_log_bridge.go b/engines/kafka/sarama_log_bridge.go
deleted file mode 100644
index 58d00e5..0000000
--- a/engines/kafka/sarama_log_bridge.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "fmt"
-
- "github.com/go-logr/logr"
-)
-
-type SaramaLogBridge struct {
- logger logr.Logger
-}
-
-func (b SaramaLogBridge) Print(v ...interface{}) {
- b.logger.Info(fmt.Sprint(v...))
-}
-
-func (b SaramaLogBridge) Printf(format string, v ...interface{}) {
- b.logger.Info(fmt.Sprintf(format, v...))
-}
-
-func (b SaramaLogBridge) Println(v ...interface{}) {
- b.Print(v...)
-}
diff --git a/engines/kafka/sasl_oauthbearer.go b/engines/kafka/sasl_oauthbearer.go
deleted file mode 100644
index e1333de..0000000
--- a/engines/kafka/sasl_oauthbearer.go
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- ctx "context"
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "net/http"
- "time"
-
- "github.com/IBM/sarama"
- "golang.org/x/oauth2"
- ccred "golang.org/x/oauth2/clientcredentials"
-)
-
-type OAuthTokenSource struct {
- CachedToken oauth2.Token
- Extensions map[string]string
- TokenEndpoint oauth2.Endpoint
- ClientID string
- ClientSecret string
- Scopes []string
- httpClient *http.Client
- trustedCas []*x509.Certificate
- skipCaVerify bool
-}
-
-func newOAuthTokenSource(oidcTokenEndpoint, oidcClientID, oidcClientSecret string, oidcScopes []string) OAuthTokenSource {
- return OAuthTokenSource{TokenEndpoint: oauth2.Endpoint{TokenURL: oidcTokenEndpoint}, ClientID: oidcClientID, ClientSecret: oidcClientSecret, Scopes: oidcScopes}
-}
-
-var tokenRequestTimeout, _ = time.ParseDuration("30s")
-
-func (ts *OAuthTokenSource) addCa(caPem string) error {
- pemBytes := []byte(caPem)
-
- block, _ := pem.Decode(pemBytes)
-
- if block == nil || block.Type != "CERTIFICATE" {
- return fmt.Errorf("PEM data not valid or not of a valid type (CERTIFICATE)")
- }
-
- caCert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return fmt.Errorf("error parsing PEM certificate: %w", err)
- }
-
- if ts.trustedCas == nil {
- ts.trustedCas = make([]*x509.Certificate, 0)
- }
- ts.trustedCas = append(ts.trustedCas, caCert)
-
- return nil
-}
-
-func (ts *OAuthTokenSource) configureClient() {
- if ts.httpClient != nil {
- return
- }
-
- tlsConfig := &tls.Config{
- MinVersion: tls.VersionTLS12,
- InsecureSkipVerify: ts.skipCaVerify, //nolint:gosec
- }
-
- if ts.trustedCas != nil {
- caPool, err := x509.SystemCertPool()
- if err != nil {
- caPool = x509.NewCertPool()
- }
-
- for _, c := range ts.trustedCas {
- caPool.AddCert(c)
- }
- tlsConfig.RootCAs = caPool
- }
-
- ts.httpClient = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: tlsConfig,
- },
- }
-}
-
-func (ts *OAuthTokenSource) Token() (*sarama.AccessToken, error) {
- if ts.CachedToken.Valid() {
- return ts.asSaramaToken(), nil
- }
-
- if ts.TokenEndpoint.TokenURL == "" || ts.ClientID == "" || ts.ClientSecret == "" {
- return nil, fmt.Errorf("cannot generate token, OAuthTokenSource not fully configured")
- }
-
- oidcCfg := ccred.Config{ClientID: ts.ClientID, ClientSecret: ts.ClientSecret, Scopes: ts.Scopes, TokenURL: ts.TokenEndpoint.TokenURL, AuthStyle: ts.TokenEndpoint.AuthStyle}
-
- timeoutCtx, cancel := ctx.WithTimeout(ctx.TODO(), tokenRequestTimeout)
- defer cancel()
-
- ts.configureClient()
-
- timeoutCtx = ctx.WithValue(timeoutCtx, oauth2.HTTPClient, ts.httpClient)
-
- token, err := oidcCfg.Token(timeoutCtx)
- if err != nil {
- return nil, fmt.Errorf("error generating oauth2 token: %w", err)
- }
-
- ts.CachedToken = *token
- return ts.asSaramaToken(), nil
-}
-
-func (ts *OAuthTokenSource) asSaramaToken() *sarama.AccessToken {
- return &(sarama.AccessToken{Token: ts.CachedToken.AccessToken, Extensions: ts.Extensions})
-}
diff --git a/engines/kafka/scram_client.go b/engines/kafka/scram_client.go
deleted file mode 100644
index 49d0cdd..0000000
--- a/engines/kafka/scram_client.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "crypto/sha256"
- "crypto/sha512"
-
- "github.com/xdg-go/scram"
-)
-
-var (
- SHA256 scram.HashGeneratorFcn = sha256.New
- SHA512 scram.HashGeneratorFcn = sha512.New
-)
-
-type XDGSCRAMClient struct {
- *scram.Client
- *scram.ClientConversation
- scram.HashGeneratorFcn
-}
-
-func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {
- x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)
- if err != nil {
- return err
- }
- x.ClientConversation = x.Client.NewConversation()
- return nil
-}
-
-func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) {
- response, err = x.ClientConversation.Step(challenge)
- return
-}
-
-func (x *XDGSCRAMClient) Done() bool {
- return x.ClientConversation.Done()
-}
diff --git a/engines/kafka/thirdparty/retry.go b/engines/kafka/thirdparty/retry.go
deleted file mode 100644
index a6f45d5..0000000
--- a/engines/kafka/thirdparty/retry.go
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package thirdparty
-
-import (
- "context"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
- "unicode"
-
- "github.com/mitchellh/mapstructure"
-
- "github.com/cenkalti/backoff/v4"
- "github.com/pkg/errors"
-)
-
-// PolicyType denotes if the back off delay should be constant or exponential.
-type PolicyType int
-
-const (
- // PolicyConstant is a backoff policy that always returns the same backoff delay.
- PolicyConstant PolicyType = iota
- // PolicyExponential is a backoff implementation that increases the backoff period
- // for each retry attempt using a randomization function that grows exponentially.
- PolicyExponential
-)
-
-// Config encapsulates the back off policy configuration.
-type Config struct {
- Policy PolicyType `mapstructure:"policy"`
-
- // Constant back off
- Duration time.Duration `mapstructure:"duration"`
-
- // Exponential back off
- InitialInterval time.Duration `mapstructure:"initialInterval"`
- RandomizationFactor float32 `mapstructure:"randomizationFactor"`
- Multiplier float32 `mapstructure:"multiplier"`
- MaxInterval time.Duration `mapstructure:"maxInterval"`
- MaxElapsedTime time.Duration `mapstructure:"maxElapsedTime"`
-
- // Additional options
- MaxRetries int64 `mapstructure:"maxRetries"`
-}
-
-// String implements fmt.Stringer and is used for debugging.
-func (c Config) String() string {
- return fmt.Sprintf(
- "policy='%s' duration='%v' initialInterval='%v' randomizationFactor='%f' multiplier='%f' maxInterval='%v' maxElapsedTime='%v' maxRetries='%d'",
- c.Policy, c.Duration, c.InitialInterval, c.RandomizationFactor, c.Multiplier, c.MaxInterval, c.MaxElapsedTime, c.MaxRetries,
- )
-}
-
-// DefaultConfig represents the default configuration for a `Config`.
-func DefaultConfig() Config {
- return Config{
- Policy: PolicyConstant,
- Duration: 5 * time.Second,
- InitialInterval: backoff.DefaultInitialInterval,
- RandomizationFactor: backoff.DefaultRandomizationFactor,
- Multiplier: backoff.DefaultMultiplier,
- MaxInterval: backoff.DefaultMaxInterval,
- MaxElapsedTime: backoff.DefaultMaxElapsedTime,
- MaxRetries: -1,
- }
-}
-
-// NewBackOff returns a BackOff instance for use with `NotifyRecover`
-// or `backoff.RetryNotify` directly. The instance will not stop due to
-// context cancellation. To support cancellation (recommended), use
-// `NewBackOffWithContext`.
-//
-// Since the underlying backoff implementations are not always thread safe,
-// `NewBackOff` or `NewBackOffWithContext` should be called each time
-// `RetryNotifyRecover` or `backoff.RetryNotify` is used.
-func (c *Config) NewBackOff() backoff.BackOff {
- var b backoff.BackOff
- switch c.Policy {
- case PolicyConstant:
- b = backoff.NewConstantBackOff(c.Duration)
- case PolicyExponential:
- eb := backoff.NewExponentialBackOff()
- eb.InitialInterval = c.InitialInterval
- eb.RandomizationFactor = float64(c.RandomizationFactor)
- eb.Multiplier = float64(c.Multiplier)
- eb.MaxInterval = c.MaxInterval
- eb.MaxElapsedTime = c.MaxElapsedTime
- b = eb
- }
-
- if c.MaxRetries >= 0 {
- b = backoff.WithMaxRetries(b, uint64(c.MaxRetries))
- }
-
- return b
-}
-
-// NewBackOffWithContext returns a BackOff instance for use with `RetryNotifyRecover`
-// or `backoff.RetryNotify` directly. The provided context is used to cancel retries
-// if it is canceled.
-//
-// Since the underlying backoff implementations are not always thread safe,
-// `NewBackOff` or `NewBackOffWithContext` should be called each time
-// `RetryNotifyRecover` or `backoff.RetryNotify` is used.
-func (c *Config) NewBackOffWithContext(ctx context.Context) backoff.BackOff {
- b := c.NewBackOff()
-
- return backoff.WithContext(b, ctx)
-}
-
-// DecodeConfigWithPrefix decodes a Go struct into a `Config`.
-func DecodeConfigWithPrefix(c *Config, input interface{}, prefix string) error {
- input, err := PrefixedBy(input, prefix)
- if err != nil {
- return err
- }
-
- return DecodeConfig(c, input)
-}
-
-// DecodeConfig decodes a Go struct into a `Config`.
-func DecodeConfig(c *Config, input interface{}) error {
- // Use the deefault config if `c` is empty/zero value.
- var emptyConfig Config
- if *c == emptyConfig {
- *c = DefaultConfig()
- }
-
- return Decode(input, c)
-}
-func Decode(input interface{}, output interface{}) error {
- decoder, err := mapstructure.NewDecoder(
- &mapstructure.DecoderConfig{ //nolint: exhaustruct
- Result: output,
- DecodeHook: decodeString,
- })
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-var (
- typeDuration = reflect.TypeOf(time.Duration(5)) //nolint: gochecknoglobals
- typeTime = reflect.TypeOf(time.Time{}) //nolint: gochecknoglobals
- typeStringDecoder = reflect.TypeOf((*StringDecoder)(nil)).Elem() //nolint: gochecknoglobals
-)
-
-type StringDecoder interface {
- DecodeString(value string) error
-}
-
-//nolint:cyclop
-func decodeString(f reflect.Type, t reflect.Type, data any) (any, error) {
- if t.Kind() == reflect.String && f.Kind() != reflect.String {
- return fmt.Sprintf("%v", data), nil
- }
- if f.Kind() == reflect.Ptr {
- f = f.Elem()
- data = reflect.ValueOf(data).Elem().Interface()
- }
- if f.Kind() != reflect.String {
- return data, nil
- }
-
- dataString, ok := data.(string)
- if !ok {
- return nil, errors.Errorf("expected string: got %s", reflect.TypeOf(data))
- }
-
- var result any
- var decoder StringDecoder
-
- if t.Implements(typeStringDecoder) {
- result = reflect.New(t.Elem()).Interface()
- decoder = result.(StringDecoder)
- } else if reflect.PtrTo(t).Implements(typeStringDecoder) {
- result = reflect.New(t).Interface()
- decoder = result.(StringDecoder)
- }
-
- if decoder != nil {
- if err := decoder.DecodeString(dataString); err != nil {
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
-
- return nil, errors.Errorf("invalid %s %q: %v", t.Name(), dataString, err)
- }
-
- return result, nil
- }
-
- switch t {
- case typeDuration:
- // Check for simple integer values and treat them
- // as milliseconds
- if val, err := strconv.Atoi(dataString); err == nil {
- return time.Duration(val) * time.Millisecond, nil
- }
-
- // Convert it by parsing
- d, err := time.ParseDuration(dataString)
-
- return d, invalidError(err, "duration", dataString)
- case typeTime:
- // Convert it by parsing
- t, err := time.Parse(time.RFC3339Nano, dataString)
- if err == nil {
- return t, nil
- }
- t, err = time.Parse(time.RFC3339, dataString)
-
- return t, invalidError(err, "time", dataString)
- }
-
- switch t.Kind() {
- case reflect.Uint:
- val, err := strconv.ParseUint(dataString, 10, 32)
-
- return uint(val), invalidError(err, "uint", dataString)
- case reflect.Uint64:
- val, err := strconv.ParseUint(dataString, 10, 64)
-
- return val, invalidError(err, "uint64", dataString)
- case reflect.Uint32:
- val, err := strconv.ParseUint(dataString, 10, 32)
-
- return uint32(val), invalidError(err, "uint32", dataString)
- case reflect.Uint16:
- val, err := strconv.ParseUint(dataString, 10, 16)
-
- return uint16(val), invalidError(err, "uint16", dataString)
- case reflect.Uint8:
- val, err := strconv.ParseUint(dataString, 10, 8)
-
- return uint8(val), invalidError(err, "uint8", dataString)
-
- case reflect.Int:
- val, err := strconv.Atoi(dataString)
-
- return val, invalidError(err, "int", dataString)
- case reflect.Int64:
- val, err := strconv.ParseInt(dataString, 10, 64)
-
- return val, invalidError(err, "int64", dataString)
- case reflect.Int32:
- val, err := strconv.ParseInt(dataString, 10, 32)
-
- return int32(val), invalidError(err, "int32", dataString)
- case reflect.Int16:
- val, err := strconv.ParseInt(dataString, 10, 16)
-
- return int16(val), invalidError(err, "int16", dataString)
- case reflect.Int8:
- val, err := strconv.ParseInt(dataString, 10, 8)
-
- return int8(val), invalidError(err, "int8", dataString)
-
- case reflect.Float32:
- val, err := strconv.ParseFloat(dataString, 32)
-
- return float32(val), invalidError(err, "float32", dataString)
- case reflect.Float64:
- val, err := strconv.ParseFloat(dataString, 64)
-
- return val, invalidError(err, "float64", dataString)
-
- case reflect.Bool:
- val, err := strconv.ParseBool(dataString)
-
- return val, invalidError(err, "bool", dataString)
-
- default:
- return data, nil
- }
-}
-func invalidError(err error, msg, value string) error {
- if err == nil {
- return nil
- }
-
- return errors.Errorf("invalid %s %q", msg, value)
-}
-
-// NotifyRecover is a wrapper around backoff.RetryNotify that adds another callback for when an operation
-// previously failed but has since recovered. The main purpose of this wrapper is to call `notify` only when
-// the operations fails the first time and `recovered` when it finally succeeds. This can be helpful in limiting
-// log messages to only the events that operators need to be alerted on.
-func NotifyRecover(operation backoff.Operation, b backoff.BackOff, notify backoff.Notify, recovered func()) error {
- notified := atomic.Bool{}
-
- return backoff.RetryNotify(func() error {
- err := operation()
-
- if err == nil && notified.CompareAndSwap(true, false) {
- recovered()
- }
-
- return err
- }, b, func(err error, d time.Duration) {
- if notified.CompareAndSwap(false, true) {
- notify(err, d)
- }
- })
-}
-
-// NotifyRecoverWithData is a variant of NotifyRecover that also returns data in addition to an error.
-func NotifyRecoverWithData[T any](operation backoff.OperationWithData[T], b backoff.BackOff, notify backoff.Notify, recovered func()) (T, error) {
- notified := atomic.Bool{}
-
- return backoff.RetryNotifyWithData(func() (T, error) {
- res, err := operation()
-
- if err == nil && notified.CompareAndSwap(true, false) {
- recovered()
- }
-
- return res, err
- }, b, func(err error, d time.Duration) {
- if notified.CompareAndSwap(false, true) {
- notify(err, d)
- }
- })
-}
-
-// DecodeString handles converting a string value to `p`.
-func (p *PolicyType) DecodeString(value string) error {
- switch strings.ToLower(value) {
- case "constant":
- *p = PolicyConstant
- case "exponential":
- *p = PolicyExponential
- default:
- return errors.Errorf("unexpected back off policy type: %s", value)
- }
- return nil
-}
-
-// String implements fmt.Stringer and is used for debugging.
-func (p PolicyType) String() string {
- switch p {
- case PolicyConstant:
- return "constant"
- case PolicyExponential:
- return "exponential"
- default:
- return ""
- }
-}
-
-func PrefixedBy(input interface{}, prefix string) (interface{}, error) {
- normalized, err := Normalize(input)
- if err != nil {
- // The only error that can come from normalize is if
- // input is a map[interface{}]interface{} and contains
- // a key that is not a string.
- return input, err
- }
- input = normalized
-
- if inputMap, ok := input.(map[string]interface{}); ok {
- converted := make(map[string]interface{}, len(inputMap))
- for k, v := range inputMap {
- if strings.HasPrefix(k, prefix) {
- key := uncapitalize(strings.TrimPrefix(k, prefix))
- converted[key] = v
- }
- }
-
- return converted, nil
- } else if inputMap, ok := input.(map[string]string); ok {
- converted := make(map[string]string, len(inputMap))
- for k, v := range inputMap {
- if strings.HasPrefix(k, prefix) {
- key := uncapitalize(strings.TrimPrefix(k, prefix))
- converted[key] = v
- }
- }
-
- return converted, nil
- }
-
- return input, nil
-}
-
-// uncapitalize initial capital letters in `str`.
-func uncapitalize(str string) string {
- if len(str) == 0 {
- return str
- }
-
- vv := []rune(str) // Introduced later
- vv[0] = unicode.ToLower(vv[0])
-
- return string(vv)
-}
-
-//nolint:cyclop
-func Normalize(i interface{}) (interface{}, error) {
- var err error
- switch x := i.(type) {
- case map[interface{}]interface{}:
- m2 := map[string]interface{}{}
- for k, v := range x {
- if strKey, ok := k.(string); ok {
- if m2[strKey], err = Normalize(v); err != nil {
- return nil, err
- }
- } else {
- return nil, fmt.Errorf("error parsing config field: %v", k)
- }
- }
-
- return m2, nil
- case map[string]interface{}:
- m2 := map[string]interface{}{}
- for k, v := range x {
- if m2[k], err = Normalize(v); err != nil {
- return nil, err
- }
- }
-
- return m2, nil
- case []interface{}:
- for i, v := range x {
- if x[i], err = Normalize(v); err != nil {
- return nil, err
- }
- }
- }
-
- return i, nil
-}
diff --git a/engines/kafka/utils.go b/engines/kafka/utils.go
deleted file mode 100644
index 4f8bf65..0000000
--- a/engines/kafka/utils.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2021 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "encoding/base64"
- "encoding/pem"
- "fmt"
- "strings"
-
- "github.com/IBM/sarama"
-)
-
-const (
- // DefaultMaxBulkSubCount is the default max bulk count for kafka pubsub component
- // if the MaxBulkCountKey is not set in the metadata.
- DefaultMaxBulkSubCount = 80
- // DefaultMaxBulkSubAwaitDurationMs is the default max bulk await duration for kafka pubsub component
- // if the MaxBulkAwaitDurationKey is not set in the metadata.
- DefaultMaxBulkSubAwaitDurationMs = 10000
-)
-
-// asBase64String implements the `fmt.Stringer` interface in order to print
-// `[]byte` as a base 64 encoded string.
-// It is used above to log the message key. The call to `EncodeToString`
-// only occurs for logs that are written based on the logging level.
-type asBase64String []byte
-
-func (s asBase64String) String() string {
- return base64.StdEncoding.EncodeToString(s)
-}
-
-func parseInitialOffset(value string) (initialOffset int64, err error) {
- switch strings.ToLower(value) {
- case "oldest":
- initialOffset = sarama.OffsetOldest
- case "newest":
- initialOffset = sarama.OffsetNewest
- default:
- return 0, fmt.Errorf("kafka error: invalid initialOffset: %s", value)
- }
- return initialOffset, err
-}
-
-// isValidPEM validates the provided input has PEM formatted block.
-func isValidPEM(val string) bool {
- block, _ := pem.Decode([]byte(val))
-
- return block != nil
-}
-
-// TopicHandlerConfig is the map of topics and sruct containing handler and their config.
-type TopicHandlerConfig map[string]SubscriptionHandlerConfig
-
-// TopicList returns the list of topics
-func (tbh TopicHandlerConfig) TopicList() []string {
- topics := make([]string, len(tbh))
- i := 0
- for topic := range tbh {
- topics[i] = topic
- i++
- }
- return topics
-}
diff --git a/engines/mock.go b/engines/mock.go
index 44b8954..c257331 100644
--- a/engines/mock.go
+++ b/engines/mock.go
@@ -19,135 +19,12 @@ along with this program. If not, see .
package engines
-import (
- "context"
- "fmt"
-
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/dcs"
-)
-
type MockManager struct {
DBManagerBase
}
var _ DBManager = &MockManager{}
-func NewMockManager(properties Properties) (DBManager, error) {
- logger := ctrl.Log.WithName("MockManager")
-
- managerBase, err := NewDBManagerBase(logger)
- if err != nil {
- return nil, err
- }
-
- Mgr := &MockManager{
- DBManagerBase: *managerBase,
- }
-
- return Mgr, nil
-}
-func (*MockManager) IsRunning() bool {
- return true
-}
-
func (*MockManager) IsDBStartupReady() bool {
return true
}
-
-func (*MockManager) InitializeCluster(context.Context, *dcs.Cluster) error {
- return fmt.Errorf("NotSupported")
-}
-func (*MockManager) IsClusterInitialized(context.Context, *dcs.Cluster) (bool, error) {
- return false, fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) IsCurrentMemberInCluster(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (*MockManager) IsCurrentMemberHealthy(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (*MockManager) IsClusterHealthy(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (*MockManager) IsMemberHealthy(context.Context, *dcs.Cluster, *dcs.Member) bool {
- return true
-}
-
-func (*MockManager) HasOtherHealthyLeader(context.Context, *dcs.Cluster) *dcs.Member {
- return nil
-}
-
-func (*MockManager) HasOtherHealthyMembers(context.Context, *dcs.Cluster, string) []*dcs.Member {
- return nil
-}
-
-func (*MockManager) IsLeader(context.Context, *dcs.Cluster) (bool, error) {
- return false, fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) IsLeaderMember(context.Context, *dcs.Cluster, *dcs.Member) (bool, error) {
- return false, fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) IsFirstMember() bool {
- return true
-}
-
-func (*MockManager) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) Promote(context.Context, *dcs.Cluster) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) IsPromoted(context.Context) bool {
- return true
-}
-
-func (*MockManager) Demote(context.Context) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) Follow(context.Context, *dcs.Cluster) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) Recover(context.Context, *dcs.Cluster) error {
- return nil
-
-}
-
-func (*MockManager) GetHealthiestMember(*dcs.Cluster, string) *dcs.Member {
- return nil
-}
-
-func (*MockManager) GetMemberAddrs(context.Context, *dcs.Cluster) []string {
- return nil
-}
-
-func (*MockManager) IsRootCreated(context.Context) (bool, error) {
- return false, fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) CreateRoot(context.Context) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) Lock(context.Context, string) error {
- return fmt.Errorf("NotSupported")
-}
-
-func (*MockManager) Unlock(context.Context) error {
- return fmt.Errorf("NotSupported")
-}
diff --git a/engines/models/client_types.go b/engines/models/client_types.go
index d8be238..c8232c5 100644
--- a/engines/models/client_types.go
+++ b/engines/models/client_types.go
@@ -36,13 +36,6 @@ const (
SYMFONY ClientType = "symfony"
)
-func ClientTypes() []string {
- return []string{CLI.String(), DJANGO.String(), DOTNET.String(), GO.String(),
- JAVA.String(), NODEJS.String(), PHP.String(), PRISMA.String(),
- PYTHON.String(), RAILS.String(), RUST.String(), SYMFONY.String(),
- }
-}
-
func (t ClientType) String() string {
return string(t)
}
diff --git a/engines/models/errors.go b/engines/models/errors.go
index b0263bf..990f43f 100644
--- a/engines/models/errors.go
+++ b/engines/models/errors.go
@@ -19,24 +19,14 @@ along with this program. If not, see .
package models
-import "fmt"
+import (
+ "errors"
+)
const (
- errMsgNoSQL = "no sql provided"
- errMsgNoUserName = "no username provided"
- errMsgNoPassword = "no password provided"
- errMsgNoRoleName = "no rolename provided"
- errMsgInvalidRoleName = "invalid rolename, should be one of [superuser, readwrite, readonly]"
- errMsgNoSuchUser = "no such user"
- errMsgNotImplemented = "not implemented"
+ errMsgNotImplemented = "not implemented"
)
var (
- ErrNoSQL = fmt.Errorf(errMsgNoSQL)
- ErrNoUserName = fmt.Errorf(errMsgNoUserName)
- ErrNoPassword = fmt.Errorf(errMsgNoPassword)
- ErrNoRoleName = fmt.Errorf(errMsgNoRoleName)
- ErrInvalidRoleName = fmt.Errorf(errMsgInvalidRoleName)
- ErrNoSuchUser = fmt.Errorf(errMsgNoSuchUser)
- ErrNotImplemented = fmt.Errorf(errMsgNotImplemented)
+ ErrNotImplemented = errors.New(errMsgNotImplemented)
)
diff --git a/engines/models/replca_role_types.go b/engines/models/replca_role_types.go
index 676b879..3812cac 100644
--- a/engines/models/replca_role_types.go
+++ b/engines/models/replca_role_types.go
@@ -19,22 +19,13 @@ along with this program. If not, see .
package models
-import "strings"
-
const (
PRIMARY = "primary"
SECONDARY = "secondary"
MASTER = "master"
SLAVE = "slave"
- LEADER = "Leader"
- FOLLOWER = "Follower"
- LEARNER = "Learner"
- CANDIDATE = "Candidate"
+ LEADER = "leader"
+ FOLLOWER = "follower"
+ LEARNER = "learner"
+ CANDIDATE = "candidate"
)
-
-// IsLikelyPrimaryRole returns true if the role is primary,
-// it is used for the case where db manager do not implemement the IsLeader method.
-// use it curefully, as it is for normal case, and may be wrong for some special cases.
-func IsLikelyPrimaryRole(role string) bool {
- return strings.EqualFold(role, PRIMARY) || strings.EqualFold(role, MASTER) || strings.EqualFold(role, LEADER)
-}
diff --git a/engines/models/role_types.go b/engines/models/role_types.go
deleted file mode 100644
index ab4f34f..0000000
--- a/engines/models/role_types.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package models
-
-import "strings"
-
-const (
- SuperUserRole RoleType = "superuser"
- ReadWriteRole RoleType = "readwrite"
- ReadOnlyRole RoleType = "readonly"
- NoPrivileges RoleType = ""
- CustomizedRole RoleType = "customized"
- InvalidRole RoleType = "invalid"
-)
-
-type RoleType string
-
-func (r RoleType) EqualTo(role string) bool {
- return strings.EqualFold(string(r), role)
-}
-
-func (r RoleType) GetWeight() int32 {
- switch r {
- case SuperUserRole:
- return 1 << 3
- case ReadWriteRole:
- return 1 << 2
- case ReadOnlyRole:
- return 1 << 1
- case CustomizedRole:
- return 1
- default:
- return 0
- }
-}
-
-func SortRoleByWeight(r1, r2 RoleType) int {
- return int(r1.GetWeight()) - int(r2.GetWeight())
-}
-
-func String2RoleType(roleName string) RoleType {
- if SuperUserRole.EqualTo(roleName) {
- return SuperUserRole
- }
- if ReadWriteRole.EqualTo(roleName) {
- return ReadWriteRole
- }
- if ReadOnlyRole.EqualTo(roleName) {
- return ReadOnlyRole
- }
- if NoPrivileges.EqualTo(roleName) {
- return NoPrivileges
- }
- return CustomizedRole
-}
diff --git a/engines/models/userinfo.go b/engines/models/userinfo.go
deleted file mode 100644
index 94318ca..0000000
--- a/engines/models/userinfo.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package models
-
-import (
- "time"
-)
-
-// UserInfo is the user information for account management
-type UserInfo struct {
- UserName string `json:"userName"`
- Password string `json:"password,omitempty"`
- Expired string `json:"expired,omitempty"`
- ExpireAt time.Duration `json:"expireAt,omitempty"`
- RoleName string `json:"roleName,omitempty"`
-}
-
-func (user *UserInfo) UserNameValidator() error {
- if user.UserName == "" {
- return ErrNoUserName
- }
- return nil
-}
-
-func (user *UserInfo) PasswdValidator() error {
- if user.Password == "" {
- return ErrNoPassword
- }
- return nil
-}
-
-func (user *UserInfo) RoleValidator() error {
- if user.RoleName == "" {
- return ErrNoRoleName
- }
-
- roles := []RoleType{ReadOnlyRole, ReadWriteRole, SuperUserRole}
- for _, role := range roles {
- if role.EqualTo(user.RoleName) {
- return nil
- }
- }
- return ErrInvalidRoleName
-}
-
-func (user *UserInfo) UserNameAndPasswdValidator() error {
- if err := user.UserNameValidator(); err != nil {
- return err
- }
-
- if err := user.PasswdValidator(); err != nil {
- return err
- }
- return nil
-}
-
-func (user *UserInfo) UserNameAndRoleValidator() error {
- if err := user.UserNameValidator(); err != nil {
- return err
- }
-
- if err := user.RoleValidator(); err != nil {
- return err
- }
- return nil
-}
diff --git a/engines/mongodb/client.go b/engines/mongodb/client.go
deleted file mode 100644
index 993e5c0..0000000
--- a/engines/mongodb/client.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mongodb
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "go.mongodb.org/mongo-driver/mongo"
- "go.mongodb.org/mongo-driver/mongo/options"
- "go.mongodb.org/mongo-driver/mongo/readpref"
- "go.mongodb.org/mongo-driver/mongo/writeconcern"
-)
-
-func NewMongodbClient(ctx context.Context, config *Config) (*mongo.Client, error) {
- if len(config.Hosts) == 0 {
- return nil, errors.New("Get replset client without hosts")
- }
-
- opts := options.Client().
- SetHosts(config.Hosts).
- SetReplicaSet(config.ReplSetName).
- SetAuth(options.Credential{
- Password: config.Password,
- Username: config.Username,
- }).
- SetWriteConcern(writeconcern.Majority()).
- SetReadPreference(readpref.Primary()).
- SetDirect(config.Direct)
-
- client, err := mongo.Connect(ctx, opts)
- if err != nil {
- return nil, errors.Wrap(err, "connect to mongodb")
- }
- return client, nil
-}
-
-func NewReplSetClient(ctx context.Context, hosts []string) (*mongo.Client, error) {
- config := GetConfig().DeepCopy()
- config.Hosts = hosts
- config.Direct = false
- return NewMongodbClient(ctx, config)
-
-}
-
-func NewMongosClient(ctx context.Context, hosts []string) (*mongo.Client, error) {
- config := GetConfig().DeepCopy()
- config.Hosts = hosts
- config.Direct = false
- config.ReplSetName = ""
-
- return NewMongodbClient(ctx, config)
-}
-
-func NewStandaloneClient(ctx context.Context, host string) (*mongo.Client, error) {
- config := GetConfig().DeepCopy()
- config.Hosts = []string{host}
- config.Direct = true
- config.ReplSetName = ""
-
- return NewMongodbClient(ctx, config)
-}
-
-func NewLocalUnauthClient(ctx context.Context) (*mongo.Client, error) {
- config := GetConfig().DeepCopy()
- config.Direct = true
- config.ReplSetName = ""
-
- opts := options.Client().
- SetHosts(config.Hosts).
- SetWriteConcern(writeconcern.Majority()).
- SetReadPreference(readpref.Primary()).
- SetDirect(config.Direct)
-
- client, err := mongo.Connect(ctx, opts)
- if err != nil {
- return nil, errors.Wrap(err, "connect to mongodb")
- }
-
- return client, nil
-}
diff --git a/engines/mongodb/config.go b/engines/mongodb/config.go
index 3f79880..9bfcb96 100644
--- a/engines/mongodb/config.go
+++ b/engines/mongodb/config.go
@@ -20,7 +20,6 @@ along with this program. If not, see .
package mongodb
import (
- "errors"
"net"
"strconv"
"time"
@@ -32,94 +31,64 @@ import (
)
const (
- host = "host"
- username = "username"
- password = "password"
- server = "server"
- databaseName = "databaseName"
- operationTimeout = "operationTimeout"
- params = "params"
- adminDatabase = "admin"
-
- defaultTimeout = 5 * time.Second
- defaultDBPort = 27017
- RootUserEnv = "MONGODB_USER"
- RootPasswordEnv = "MONGODB_PASSWORD"
+ adminDatabase = "admin"
+
+ defaultTimeout = 5 * time.Second
+ defaultDBPort = 27017
+ UserEnv = "MONGODB_USER"
+ PasswordEnv = "MONGODB_PASSWORD"
+ RootUserEnv = "MONGODB_ROOT_USER"
+ RootPasswordEnv = "MONGODB_ROOT_PASSWORD"
+ ClusterRoleEnv = "MONGODB_CLUSTER_ROLE"
+ GrantAnyActionPrivilegeEnv = "MONGODB_GRANT_ANYACTION_PRIVILEGE"
)
type Config struct {
- Hosts []string
- Username string
- Password string
- ReplSetName string
- DatabaseName string
- Params string
- Direct bool
- OperationTimeout time.Duration
+ Hosts []string
+ Username string
+ Password string
+ ReplSetName string
+ DatabaseName string
+ Params string
+ Direct bool
+ OperationTimeout time.Duration
+ ConfigSvr bool
+ GrantAnyActionPrivilege bool
}
var config *Config
-func NewConfig(properties map[string]string) (*Config, error) {
+func NewConfig() (*Config, error) {
config = &Config{
Direct: true,
Username: "root",
+ Hosts: []string{"127.0.0.1:27017"},
+ Params: "?directConnection=true",
OperationTimeout: defaultTimeout,
}
- if val, ok := properties[host]; ok && val != "" {
- config.Hosts = []string{val}
- }
-
if viper.IsSet(constant.KBEnvServicePort) {
config.Hosts = []string{"localhost:" + viper.GetString(constant.KBEnvServicePort)}
}
- if len(config.Hosts) == 0 {
- return nil, errors.New("must set 'host' in metadata or KB_SERVICE_PORT environment variable")
- }
-
- if val, ok := properties[username]; ok && val != "" {
- config.Username = val
+ _ = viper.BindEnv(constant.ConfigKeyUserName, constant.KBEnvServiceUser, RootUserEnv, UserEnv)
+ if viper.IsSet(constant.ConfigKeyUserName) {
+ config.Username = viper.GetString(constant.ConfigKeyUserName)
}
- if val, ok := properties[password]; ok && val != "" {
- config.Password = val
+ _ = viper.BindEnv(constant.ConfigKeyPassword, constant.KBEnvServicePassword, RootPasswordEnv, PasswordEnv)
+ if viper.IsSet(constant.ConfigKeyPassword) {
+ config.Password = viper.GetString(constant.ConfigKeyPassword)
}
- if viper.IsSet(constant.KBEnvServiceUser) {
- config.Username = viper.GetString(constant.KBEnvServiceUser)
- } else if viper.IsSet(RootUserEnv) {
- config.Username = viper.GetString(RootUserEnv)
-
- }
-
- if viper.IsSet(constant.KBEnvServicePassword) {
- config.Password = viper.GetString(constant.KBEnvServicePassword)
- } else if viper.IsSet(RootPasswordEnv) {
- config.Password = viper.GetString(RootPasswordEnv)
+ if viper.IsSet(ClusterRoleEnv) {
+ config.ConfigSvr = viper.GetString(ClusterRoleEnv) == "configsvr"
}
-
- if clusterCompName := constant.GetClusterCompName(); clusterCompName != "" {
- config.ReplSetName = clusterCompName
+ if viper.IsSet(GrantAnyActionPrivilegeEnv) {
+ config.GrantAnyActionPrivilege = viper.GetBool(GrantAnyActionPrivilegeEnv)
}
-
+ config.ReplSetName = constant.GetClusterCompName()
config.DatabaseName = adminDatabase
- if val, ok := properties[databaseName]; ok && val != "" {
- config.DatabaseName = val
- }
-
- if val, ok := properties[params]; ok && val != "" {
- config.Params = val
- }
-
- var err error
- if val, ok := properties[operationTimeout]; ok && val != "" {
- config.OperationTimeout, err = time.ParseDuration(val)
- if err != nil {
- return nil, errors.New("incorrect operationTimeout field from metadata")
- }
- }
return config, nil
}
@@ -142,7 +111,3 @@ func (config *Config) DeepCopy() *Config {
newConf, _ := utilconfig.Clone(config)
return newConf.(*Config)
}
-
-func GetConfig() *Config {
- return config
-}
diff --git a/engines/mongodb/config_test.go b/engines/mongodb/config_test.go
index 48223df..dd923e2 100644
--- a/engines/mongodb/config_test.go
+++ b/engines/mongodb/config_test.go
@@ -27,51 +27,14 @@ import (
func TestGetMongoDBMetadata(t *testing.T) {
t.Run("With defaults", func(t *testing.T) {
- properties := map[string]string{
- host: "127.0.0.1",
- }
-
- metadata, err := NewConfig(properties)
+ metadata, err := NewConfig()
assert.Nil(t, err)
- assert.Equal(t, properties[host], metadata.Hosts[0])
+ assert.Equal(t, "127.0.0.1:27017", metadata.Hosts[0])
assert.Equal(t, adminDatabase, metadata.DatabaseName)
- })
-
- t.Run("With custom values", func(t *testing.T) {
- properties := map[string]string{
- host: "127.0.0.2",
- databaseName: "TestDB",
- username: "username",
- password: "password",
- }
-
- metadata, err := NewConfig(properties)
- assert.Nil(t, err)
- assert.Equal(t, properties[host], metadata.Hosts[0])
- assert.Equal(t, properties[databaseName], metadata.DatabaseName)
- assert.Equal(t, properties[username], metadata.Username)
- assert.Equal(t, properties[password], metadata.Password)
- })
-
- t.Run("Missing hosts", func(t *testing.T) {
- properties := map[string]string{
- username: "username",
- password: "password",
- }
-
- _, err := NewConfig(properties)
- assert.NotNil(t, err)
- })
-
- t.Run("Invalid without host/server", func(t *testing.T) {
- properties := map[string]string{
- databaseName: "TestDB",
- }
-
- _, err := NewConfig(properties)
- assert.NotNil(t, err)
-
- expected := "must set 'host' in metadata or KB_SERVICE_PORT environment variable"
- assert.Equal(t, expected, err.Error())
+ assert.Equal(t, true, metadata.Direct)
+ assert.Equal(t, "root", metadata.Username)
+ assert.Equal(t, "", metadata.Password)
+ assert.Equal(t, "?directConnection=true", metadata.Params)
+ assert.Equal(t, defaultTimeout, metadata.OperationTimeout)
})
}
diff --git a/engines/mongodb/manager.go b/engines/mongodb/manager.go
index 9507920..51b2127 100644
--- a/engines/mongodb/manager.go
+++ b/engines/mongodb/manager.go
@@ -21,31 +21,19 @@ package mongodb
import (
"context"
- "encoding/json"
- "fmt"
- "math/rand"
"strings"
"time"
"github.com/pkg/errors"
- "go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"go.mongodb.org/mongo-driver/mongo/writeconcern"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
)
-const (
- PrimaryPriority = 2
- SecondaryPriority = 1
-
- ServiceType = "mongodb"
-)
-
type Manager struct {
engines.DBManagerBase
Client *mongo.Client
@@ -55,10 +43,10 @@ type Manager struct {
var Mgr *Manager
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
ctx := context.Background()
logger := ctrl.Log.WithName("MongoDB")
- config, err := NewConfig(properties)
+ config, err := NewConfig()
if err != nil {
return nil, err
}
@@ -102,168 +90,6 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
return Mgr, nil
}
-func (mgr *Manager) InitializeCluster(ctx context.Context, cluster *dcs.Cluster) error {
- return mgr.InitiateReplSet(ctx, cluster)
-}
-
-// InitiateReplSet is a method to create MongoDB cluster
-func (mgr *Manager) InitiateReplSet(ctx context.Context, cluster *dcs.Cluster) error {
- configMembers := make([]ConfigMember, len(cluster.Members))
-
- for i, member := range cluster.Members {
- configMembers[i].ID = i
- configMembers[i].Host = cluster.GetMemberAddrWithPort(member)
- if strings.HasPrefix(member.Name, mgr.CurrentMemberName) || strings.HasPrefix(member.Name, mgr.CurrentMemberIP) {
- configMembers[i].Priority = PrimaryPriority
- } else {
- configMembers[i].Priority = SecondaryPriority
- }
- }
-
- config := RSConfig{
- ID: mgr.ClusterCompName,
- Members: configMembers,
- }
- client, err := NewLocalUnauthClient(ctx)
- if err != nil {
- mgr.Logger.Info("Get local unauth client failed", "error", err.Error())
- return err
- }
- defer client.Disconnect(context.TODO()) //nolint:errcheck
-
- configJSON, _ := json.Marshal(config)
- mgr.Logger.Info(fmt.Sprintf("Initial Replset Config: %s", string(configJSON)))
- response := client.Database("admin").RunCommand(ctx, bson.M{"replSetInitiate": config})
- if response.Err() != nil {
- return response.Err()
- }
- return nil
-}
-
-// IsClusterInitialized is a method to check if cluster is initialized or not
-func (mgr *Manager) IsClusterInitialized(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- mgr.Logger.Info("Get leader client failed", "error", err)
- return false, err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- ctx1, cancel := context.WithTimeout(ctx, 1000*time.Millisecond)
- defer cancel()
- rsStatus, err := GetReplSetStatus(ctx1, client)
- if rsStatus != nil {
- return rsStatus.Set != "", nil
- }
- mgr.Logger.Info("Get replSet status failed", "error", err)
-
- if !mgr.IsFirstMember() {
- return false, nil
- }
-
- client, err = NewLocalUnauthClient(ctx)
- if err != nil {
- mgr.Logger.Info("Get local unauth client failed", "error", err)
- return false, err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- rsStatus, err = GetReplSetStatus(ctx, client)
- if rsStatus != nil {
- return rsStatus.Set != "", nil
- }
-
- err = errors.Cause(err)
- if cmdErr, ok := err.(mongo.CommandError); ok && cmdErr.Name == "NotYetInitialized" {
- return false, nil
- }
- mgr.Logger.Info("Get replSet status with local unauth client failed", "error", err)
-
- rsStatus, err = mgr.GetReplSetStatus(ctx)
- if rsStatus != nil {
- return rsStatus.Set != "", nil
- }
- if err != nil {
- mgr.Logger.Info("Get replSet status with local auth client failed", "error", err)
- return false, err
- }
-
- mgr.Logger.Info("Get replSet status failed", "error", err)
- return false, err
-}
-
-func (mgr *Manager) IsRootCreated(ctx context.Context) (bool, error) {
- if !mgr.IsFirstMember() {
- return true, nil
- }
-
- client, err := NewLocalUnauthClient(ctx)
- if err != nil {
- mgr.Logger.Info("Get local unauth client failed", "error", err)
- return false, err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- _, err = GetReplSetStatus(ctx, client)
- if err == nil {
- return false, nil
- }
- err = errors.Cause(err)
- if cmdErr, ok := err.(mongo.CommandError); ok && cmdErr.Name == "Unauthorized" {
- return true, nil
- }
-
- mgr.Logger.Info("Get replSet status with local unauth client failed", "error", err)
-
- _, err = mgr.GetReplSetStatus(ctx)
- if err == nil {
- return true, nil
- }
-
- mgr.Logger.Info("Get replSet status with local auth client failed", "error", err)
- return false, err
-
-}
-
-func (mgr *Manager) CreateRoot(ctx context.Context) error {
- if !mgr.IsFirstMember() {
- return nil
- }
-
- client, err := NewLocalUnauthClient(ctx)
- if err != nil {
- mgr.Logger.Info("Get local unauth client failed", "error", err)
- return err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- role := map[string]interface{}{
- "role": "root",
- "db": "admin",
- }
-
- mgr.Logger.Info(fmt.Sprintf("Create user: %s, passwd: %s, roles: %v", config.Username, config.Password, role))
- err = CreateUser(ctx, client, config.Username, config.Password, role)
- if err != nil {
- mgr.Logger.Info("Create Root failed", "error", err)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) IsRunning() bool {
- // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
- // defer cancel()
-
- // err := mgr.Client.Ping(ctx, readpref.Nearest())
- // if err != nil {
- // mgr.Logger.Infof("DB is not ready: %v", err)
- // return false
- // }
- return true
-}
-
func (mgr *Manager) IsDBStartupReady() bool {
if mgr.DBStartupReady {
return true
@@ -299,56 +125,6 @@ func (mgr *Manager) GetReplSetStatus(ctx context.Context) (*ReplSetStatus, error
return GetReplSetStatus(ctx, mgr.Client)
}
-func (mgr *Manager) IsLeaderMember(ctx context.Context, cluster *dcs.Cluster, dcsMember *dcs.Member) (bool, error) {
- memberName := mgr.CurrentMemberName
- memberIP := mgr.CurrentMemberIP
-
- if dcsMember != nil {
- memberName = dcsMember.Name
- memberIP = dcsMember.PodIP
- }
-
- status, err := mgr.GetReplSetStatus(ctx)
- if err != nil {
- mgr.Logger.Info("rs.status() error", "error", err.Error())
- return false, err
- }
- for _, member := range status.Members {
- if strings.HasPrefix(member.Name, memberName) || strings.HasPrefix(member.Name, memberIP) {
- if member.StateStr == "PRIMARY" {
- return true, nil
- }
- break
- }
- }
- return false, nil
-}
-
-func (mgr *Manager) IsLeader(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- return mgr.IsLeaderMember(ctx, cluster, nil)
-}
-
-func (mgr *Manager) GetReplSetConfig(ctx context.Context) (*RSConfig, error) {
- return GetReplSetConfig(ctx, mgr.Client)
-}
-
-func (mgr *Manager) GetMemberAddrs(ctx context.Context, cluster *dcs.Cluster) []string {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- mgr.Logger.Info("Get replSet client failed", "error", err.Error())
- return nil
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- rsConfig, err := GetReplSetConfig(ctx, client)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return nil
- }
-
- return mgr.GetMemberAddrsFromRSConfig(rsConfig)
-}
-
func (mgr *Manager) GetMemberAddrsFromRSConfig(rsConfig *RSConfig) []string {
if rsConfig == nil {
return []string{}
@@ -360,477 +136,3 @@ func (mgr *Manager) GetMemberAddrsFromRSConfig(rsConfig *RSConfig) []string {
}
return hosts
}
-
-func (mgr *Manager) GetReplSetClient(ctx context.Context, cluster *dcs.Cluster) (*mongo.Client, error) {
- hosts := cluster.GetMemberAddrs()
- return NewReplSetClient(ctx, hosts)
-}
-
-func (mgr *Manager) GetLeaderClient(ctx context.Context, cluster *dcs.Cluster) (*mongo.Client, error) {
- if cluster.Leader == nil || cluster.Leader.Name == "" {
- return nil, fmt.Errorf("cluster has no leader")
- }
-
- leaderMember := cluster.GetMemberWithName(cluster.Leader.Name)
- host := cluster.GetMemberAddrWithPort(*leaderMember)
- return NewReplSetClient(context.TODO(), []string{host})
-}
-
-func (mgr *Manager) GetReplSetClientWithHosts(ctx context.Context, hosts []string) (*mongo.Client, error) {
- if len(hosts) == 0 {
- err := errors.New("Get replset client without hosts")
- mgr.Logger.Info("Get replset client without hosts", "error", err.Error())
- return nil, err
- }
-
- opts := options.Client().
- SetHosts(hosts).
- SetReplicaSet(config.ReplSetName).
- SetAuth(options.Credential{
- Password: config.Password,
- Username: config.Username,
- }).
- SetWriteConcern(writeconcern.Majority()).
- SetReadPreference(readpref.Primary()).
- SetDirect(false)
-
- client, err := mongo.Connect(ctx, opts)
- if err != nil {
- return nil, errors.Wrap(err, "connect to mongodb")
- }
- return client, err
-}
-
-func (mgr *Manager) IsCurrentMemberInCluster(ctx context.Context, cluster *dcs.Cluster) bool {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- mgr.Logger.Info("Get replSet client failed", "error", err.Error())
- return true
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- rsConfig, err := GetReplSetConfig(ctx, client)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- //
- return true
- }
-
- for _, member := range rsConfig.Members {
- if strings.HasPrefix(member.Host, mgr.CurrentMemberName) || strings.HasPrefix(member.Host, mgr.CurrentMemberIP) {
- return true
- }
- }
-
- return false
-}
-
-func (mgr *Manager) IsCurrentMemberHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
- return mgr.IsMemberHealthy(ctx, cluster, nil)
-}
-
-func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
- var memberName string
- if member != nil {
- memberName = member.Name
- } else {
- memberName = mgr.CurrentMemberName
- }
-
- rsStatus, err := mgr.GetReplSetStatus(ctx)
- if err != nil {
- mgr.Logger.Info("get replset status failed", "error", err.Error())
- return false
- }
-
- if rsStatus == nil {
- return false
- }
-
- for _, member := range rsStatus.Members {
- if (strings.HasPrefix(member.Name, memberName) || strings.HasPrefix(member.Name, mgr.CurrentMemberIP)) &&
- member.Health == 1 {
- return true
- }
- }
- return false
-}
-
-func (mgr *Manager) Recover(ctx context.Context, cluster *dcs.Cluster) error {
- if mgr.IsCurrentMemberInCluster(ctx, cluster) {
- return nil
- }
- return mgr.UpdateCurrentMemberHost(ctx, cluster)
-}
-
-func (mgr *Manager) UpdateCurrentMemberHost(ctx context.Context, cluster *dcs.Cluster) error {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- return err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- currentMember := cluster.GetMemberWithName(mgr.GetCurrentMemberName())
- currentHost := cluster.GetMemberAddrWithPort(*currentMember)
- rsConfig, err := GetReplSetConfig(ctx, client)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return err
- }
-
- var invalidMembers []*ConfigMember
- for i, configMember := range rsConfig.Members {
- host := configMember.Host
- isInvalid := true
- for _, member := range cluster.Members {
- if strings.HasPrefix(host, member.Name) || strings.HasPrefix(host, member.PodIP) {
- isInvalid = false
- continue
- }
- }
- if isInvalid {
- invalidMembers = append(invalidMembers, &rsConfig.Members[i])
- }
- }
- if len(invalidMembers) > 1 {
- return errors.Errorf("the replica set has more than one invalid members: %v", invalidMembers)
- }
- if len(invalidMembers) == 0 {
- return nil
- }
- configMember := invalidMembers[0]
- configMember.Host = currentHost
-
- rsConfig.Version++
- return SetReplSetConfig(ctx, client, rsConfig)
-}
-
-func (mgr *Manager) JoinCurrentMemberToCluster(ctx context.Context, cluster *dcs.Cluster) error {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- return err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- currentMember := cluster.GetMemberWithName(mgr.GetCurrentMemberName())
- currentHost := cluster.GetMemberAddrWithPort(*currentMember)
- rsConfig, err := GetReplSetConfig(ctx, client)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return err
- }
-
- var lastID int
- var configMember ConfigMember
- for _, configMember = range rsConfig.Members {
- if configMember.ID > lastID {
- lastID = configMember.ID
- }
- }
- configMember.ID = lastID + 1
- configMember.Host = currentHost
- configMember.Priority = SecondaryPriority
- rsConfig.Members = append(rsConfig.Members, configMember)
-
- rsConfig.Version++
- return SetReplSetConfig(ctx, client, rsConfig)
-}
-
-func (mgr *Manager) LeaveMemberFromCluster(ctx context.Context, cluster *dcs.Cluster, memberName string) error {
- client, err := mgr.GetLeaderClient(ctx, cluster)
- if err != nil {
- return err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- rsConfig, err := GetReplSetConfig(ctx, client)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return err
- }
-
- mgr.Logger.Info(fmt.Sprintf("Delete member: %s", memberName))
- configMembers := make([]ConfigMember, 0, len(rsConfig.Members)-1)
- isDeleted := true
- for _, configMember := range rsConfig.Members {
- if strings.HasPrefix(configMember.Host, memberName) || strings.HasPrefix(configMember.Host, mgr.CurrentMemberIP) {
- isDeleted = false
- continue
- }
- configMembers = append(configMembers, configMember)
- }
- if isDeleted {
- mgr.Logger.Info("member is already deleted", "member", memberName)
- return nil
- }
-
- rsConfig.Members = configMembers
- rsConfig.Version++
- return SetReplSetConfig(ctx, client, rsConfig)
-}
-
-func (mgr *Manager) IsClusterHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
- client, err := mgr.GetReplSetClient(ctx, cluster)
- if err != nil {
- mgr.Logger.Info("Get leader client failed", "error", err.Error())
- return false
- }
- defer client.Disconnect(ctx) //nolint:errcheck
-
- status, err := GetReplSetStatus(ctx, client)
- if err != nil {
- return false
- }
- isHeathly := status.OK != 0
- if !isHeathly {
- statusJSON, _ := json.Marshal(status)
- mgr.Logger.Info("cluster is unhealthy", "status", string(statusJSON))
- }
- return isHeathly
-}
-
-func (mgr *Manager) IsPromoted(ctx context.Context) bool {
- isLeader, err := mgr.IsLeader(ctx, nil)
- if err != nil {
- mgr.Logger.Info("Is leader check failed", "error", err.Error())
- return false
- }
-
- if !isLeader {
- return false
- }
-
- rsConfig, err := mgr.GetReplSetConfig(ctx)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return false
- }
- for i := range rsConfig.Members {
- host := rsConfig.Members[i].Host
- if strings.HasPrefix(host, mgr.CurrentMemberName) || strings.HasPrefix(host, mgr.CurrentMemberIP) {
- if rsConfig.Members[i].Priority == PrimaryPriority {
- return true
- }
- }
- }
- return false
-}
-
-func (mgr *Manager) Promote(ctx context.Context, cluster *dcs.Cluster) error {
- rsConfig, err := mgr.GetReplSetConfig(ctx)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return err
- }
-
- for i := range rsConfig.Members {
- host := rsConfig.Members[i].Host
- if strings.HasPrefix(host, mgr.CurrentMemberName) || strings.HasPrefix(host, mgr.CurrentMemberIP) {
- if rsConfig.Members[i].Priority == PrimaryPriority {
- mgr.Logger.Info("Current member already has the highest priority!")
- return nil
- }
-
- rsConfig.Members[i].Priority = PrimaryPriority
- } else if rsConfig.Members[i].Priority == PrimaryPriority {
- rsConfig.Members[i].Priority = SecondaryPriority
- }
- }
-
- rsConfig.Version++
-
- hosts := mgr.GetMemberAddrsFromRSConfig(rsConfig)
- client, err := NewReplSetClient(ctx, hosts)
- if err != nil {
- return err
- }
- defer client.Disconnect(ctx) //nolint:errcheck
- mgr.Logger.Info("reconfig replset", "config", rsConfig)
- return SetReplSetConfig(ctx, client, rsConfig)
-}
-
-func (mgr *Manager) Demote(context.Context) error {
- // mongodb do premote and demote in one action, here do nothing.
- return nil
-}
-
-func (mgr *Manager) Follow(ctx context.Context, cluster *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) GetHealthiestMember(cluster *dcs.Cluster, candidate string) *dcs.Member {
- rsStatus, _ := mgr.GetReplSetStatus(context.TODO())
- if rsStatus == nil {
- return nil
- }
- healthyMembers := make([]string, 0, len(rsStatus.Members))
- var leader string
- for _, member := range rsStatus.Members {
- if member.Health == 1 {
- m := cluster.GetMemberWithHost(member.Name)
- if m == nil {
- continue
- }
- memberName := m.Name
- if memberName == candidate {
- return m
- }
- healthyMembers = append(healthyMembers, memberName)
- if member.State == 1 {
- leader = memberName
- }
- }
- }
-
- if candidate != "" {
- mgr.Logger.Info("no health member for candidate", "candidate", candidate)
- return nil
- }
-
- if leader != "" {
- return cluster.GetMemberWithName(leader)
- }
-
- // TODO: use lag and other info to pick the healthiest member
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
- healthiestMember := healthyMembers[r.Intn(len(healthyMembers))]
- return cluster.GetMemberWithName(healthiestMember)
-
-}
-
-func (mgr *Manager) HasOtherHealthyLeader(ctx context.Context, cluster *dcs.Cluster) *dcs.Member {
- rsStatus, _ := mgr.GetReplSetStatus(ctx)
- if rsStatus == nil {
- return nil
- }
- healthMembers := map[string]struct{}{}
- var otherLeader string
- for _, member := range rsStatus.Members {
- memberName := member.Name
- if member.State == 1 || member.State == 2 {
- healthMembers[memberName] = struct{}{}
- }
-
- if member.State != 1 {
- continue
- }
- if !strings.HasPrefix(memberName, mgr.CurrentMemberName) && !strings.HasPrefix(memberName, mgr.CurrentMemberIP) {
- otherLeader = memberName
- }
- }
- if otherLeader != "" {
- return cluster.GetMemberWithHost(otherLeader)
- }
-
- rsConfig, err := mgr.GetReplSetConfig(ctx)
- if rsConfig == nil {
- mgr.Logger.Info("Get replSet config failed", "error", err.Error())
- return nil
- }
-
- for _, mb := range rsConfig.Members {
- memberName := mb.Host
- if mb.Priority == PrimaryPriority && !strings.HasPrefix(memberName, mgr.CurrentMemberName) && !strings.HasPrefix(memberName, mgr.CurrentMemberIP) {
- if _, ok := healthMembers[memberName]; ok {
- otherLeader = memberName
- }
- }
- }
-
- if otherLeader != "" {
- return cluster.GetMemberWithHost(otherLeader)
- }
-
- return nil
-}
-
-// HasOtherHealthyMembers Are there any healthy members other than the leader?
-func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
- members := make([]*dcs.Member, 0)
- rsStatus, _ := mgr.GetReplSetStatus(ctx)
- if rsStatus == nil {
- return members
- }
-
- for _, member := range rsStatus.Members {
- if member == nil {
- continue
- }
- if member.Health != 1 {
- continue
- }
- m := cluster.GetMemberWithHost(member.Name)
- if m == nil {
- continue
- }
- memberName := m.Name
- if memberName == leader {
- continue
- }
- members = append(members, m)
- }
-
- return members
-}
-
-func (mgr *Manager) Lock(ctx context.Context, reason string) error {
- mgr.Logger.Info(fmt.Sprintf("Lock db: %s", reason))
- m := bson.D{
- {Key: "fsync", Value: 1},
- {Key: "lock", Value: true},
- {Key: "comment", Value: reason},
- }
- lockResp := LockResp{}
-
- response := mgr.Client.Database("admin").RunCommand(ctx, m)
- if response.Err() != nil {
- mgr.Logger.Info(fmt.Sprintf("Lock db (%s) failed", reason), "error", response.Err().Error())
- return response.Err()
- }
- if err := response.Decode(&lockResp); err != nil {
- err := errors.Wrap(err, "failed to decode lock response")
- return err
- }
-
- if lockResp.OK != 1 {
- err := errors.Errorf("mongo says: %s", lockResp.Errmsg)
- return err
- }
- mgr.IsLocked = true
- mgr.Logger.Info(fmt.Sprintf("Lock db success times: %d", lockResp.LockCount))
- return nil
-}
-
-func (mgr *Manager) Unlock(ctx context.Context) error {
- mgr.Logger.Info("Unlock db")
- m := bson.M{"fsyncUnlock": 1}
- unlockResp := LockResp{}
- response := mgr.Client.Database("admin").RunCommand(ctx, m)
- if response.Err() != nil {
- mgr.Logger.Info("Unlock db failed", "error", response.Err().Error())
- return response.Err()
- }
- if err := response.Decode(&unlockResp); err != nil {
- err := errors.Wrap(err, "failed to decode unlock response")
- return err
- }
-
- if unlockResp.OK != 1 {
- err := errors.Errorf("mongo says: %s", unlockResp.Errmsg)
- return err
- }
- for unlockResp.LockCount > 0 {
- response = mgr.Client.Database("admin").RunCommand(ctx, m)
- if response.Err() != nil {
- mgr.Logger.Info("Unlock db failed", "error", response.Err().Error())
- return response.Err()
- }
- if err := response.Decode(&unlockResp); err != nil {
- err := errors.Wrap(err, "failed to decode unlock response")
- return err
- }
- }
- mgr.IsLocked = false
- mgr.Logger.Info("Unlock db success")
- return nil
-}
diff --git a/engines/mongodb/replset.go b/engines/mongodb/replset.go
index 1f2d846..60b7a6f 100644
--- a/engines/mongodb/replset.go
+++ b/engines/mongodb/replset.go
@@ -48,45 +48,3 @@ func GetReplSetStatus(ctx context.Context, client *mongo.Client) (*ReplSetStatus
return status, nil
}
-
-func SetReplSetConfig(ctx context.Context, rsClient *mongo.Client, cfg *RSConfig) error {
- resp := OKResponse{}
-
- res := rsClient.Database("admin").RunCommand(ctx, bson.D{{Key: "replSetReconfig", Value: cfg}})
- if res.Err() != nil {
- err := errors.Wrap(res.Err(), "replSetReconfig")
- return err
- }
-
- if err := res.Decode(&resp); err != nil {
- err = errors.Wrap(err, "failed to decode to replSetReconfigResponse")
- return err
- }
-
- if resp.OK != 1 {
- err := errors.Errorf("mongo says: %s", resp.Errmsg)
- return err
- }
-
- return nil
-}
-
-func GetReplSetConfig(ctx context.Context, client *mongo.Client) (*RSConfig, error) {
- resp := ReplSetGetConfig{}
- res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "replSetGetConfig", Value: 1}})
- if res.Err() != nil {
- err := errors.Wrap(res.Err(), "replSetGetConfig")
- return nil, err
- }
- if err := res.Decode(&resp); err != nil {
- err := errors.Wrap(err, "failed to decode to replSetGetConfig")
- return nil, err
- }
-
- if resp.Config == nil {
- err := errors.Errorf("mongo says: %s", resp.Errmsg)
- return nil, err
- }
-
- return resp.Config, nil
-}
diff --git a/engines/mongodb/roles.go b/engines/mongodb/roles.go
deleted file mode 100644
index 11c8768..0000000
--- a/engines/mongodb/roles.go
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mongodb
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "go.mongodb.org/mongo-driver/bson"
- "go.mongodb.org/mongo-driver/mongo"
-)
-
-func CreateRole(ctx context.Context, client *mongo.Client, role string, privileges []RolePrivilege, roles []interface{}) error {
- resp := OKResponse{}
-
- privilegesArr := bson.A{}
- for _, p := range privileges {
- privilegesArr = append(privilegesArr, p)
- }
-
- rolesArr := bson.A{}
- for _, r := range roles {
- rolesArr = append(rolesArr, r)
- }
-
- m := bson.D{
- {Key: "createRole", Value: role},
- {Key: "privileges", Value: privilegesArr},
- {Key: "roles", Value: rolesArr},
- }
-
- res := client.Database("admin").RunCommand(ctx, m)
- if res.Err() != nil {
- return errors.Wrap(res.Err(), "failed to create role")
- }
-
- err := res.Decode(&resp)
- if err != nil {
- return errors.Wrap(err, "failed to decode response")
- }
-
- if resp.OK != 1 {
- return errors.Errorf("mongo says: %s", resp.Errmsg)
- }
-
- return nil
-}
-
-func UpdateRole(ctx context.Context, client *mongo.Client, role string, privileges []RolePrivilege, roles []interface{}) error {
- resp := OKResponse{}
-
- privilegesArr := bson.A{}
- for _, p := range privileges {
- privilegesArr = append(privilegesArr, p)
- }
-
- rolesArr := bson.A{}
- for _, r := range roles {
- rolesArr = append(rolesArr, r)
- }
-
- m := bson.D{
- {Key: "updateRole", Value: role},
- {Key: "privileges", Value: privilegesArr},
- {Key: "roles", Value: rolesArr},
- }
-
- res := client.Database("admin").RunCommand(ctx, m)
- if res.Err() != nil {
- return errors.Wrap(res.Err(), "failed to create role")
- }
-
- err := res.Decode(&resp)
- if err != nil {
- return errors.Wrap(err, "failed to decode response")
- }
-
- if resp.OK != 1 {
- return errors.Errorf("mongo says: %s", resp.Errmsg)
- }
-
- return nil
-}
-
-func GetRole(ctx context.Context, client *mongo.Client, role string) (*Role, error) {
- resp := RoleInfo{}
-
- res := client.Database("admin").RunCommand(ctx, bson.D{
- {Key: "rolesInfo", Value: role},
- {Key: "showPrivileges", Value: true},
- })
- if res.Err() != nil {
- return nil, errors.Wrap(res.Err(), "run command")
- }
-
- err := res.Decode(&resp)
- if err != nil {
- return nil, errors.Wrap(err, "failed to decode response")
- }
- if resp.OK != 1 {
- return nil, errors.Errorf("mongo says: %s", resp.Errmsg)
- }
- if len(resp.Roles) == 0 {
- return nil, nil
- }
- return &resp.Roles[0], nil
-}
diff --git a/engines/mongodb/types.go b/engines/mongodb/types.go
index dca1122..ab19029 100644
--- a/engines/mongodb/types.go
+++ b/engines/mongodb/types.go
@@ -25,16 +25,6 @@ import (
"go.mongodb.org/mongo-driver/bson/primitive"
)
-const (
- MinVotingMembers = 1
- MaxVotingMembers = 7
- MaxMembers = 50
- DefaultPriority = 2
- DefaultVotes = 1
- DefaultReadConcern = "majority"
- DefaultWriteConcern = "majority"
-)
-
// ReplsetTags Set tags: https://docs.mongodb.com/manual/tutorial/configure-replica-set-tag-sets/#add-tag-sets-to-a-replica-set
type ReplsetTags map[string]string
@@ -211,82 +201,3 @@ type StatusOptimes struct {
type MemberHealth int
type MemberState int
-
-const (
- MemberHealthDown MemberHealth = iota
- MemberHealthUp
- MemberStateStartup MemberState = 0
- MemberStatePrimary MemberState = 1
- MemberStateSecondary MemberState = 2
- MemberStateRecovering MemberState = 3
- MemberStateStartup2 MemberState = 5
- MemberStateUnknown MemberState = 6
- MemberStateArbiter MemberState = 7
- MemberStateDown MemberState = 8
- MemberStateRollback MemberState = 9
- MemberStateRemoved MemberState = 10
-)
-
-var MemberStateStrings = map[MemberState]string{
- MemberStateStartup: "STARTUP",
- MemberStatePrimary: "PRIMARY",
- MemberStateSecondary: "SECONDARY",
- MemberStateRecovering: "RECOVERING",
- MemberStateStartup2: "STARTUP2",
- MemberStateUnknown: "UNKNOWN",
- MemberStateArbiter: "ARBITER",
- MemberStateDown: "DOWN",
- MemberStateRollback: "ROLLBACK",
- MemberStateRemoved: "REMOVED",
-}
-
-func (s *ReplSetStatus) GetMembersByState(state MemberState, limit int) []*Member {
- members := make([]*Member, 0)
- for _, member := range s.Members {
- if member.State == state {
- members = append(members, member)
- if limit > 0 && len(members) == limit {
- return members
- }
- }
- }
- return members
-}
-
-func (s *ReplSetStatus) Primary() *Member {
- primary := s.GetMembersByState(MemberStatePrimary, 1)
- if len(primary) == 1 {
- return primary[0]
- }
- return nil
-}
-
-type RolePrivilege struct {
- Resource map[string]interface{} `bson:"resource" json:"resource"`
- Actions []string `bson:"actions" json:"actions"`
-}
-
-type Role struct {
- Role string `bson:"role" json:"role"`
- DB string `bson:"db" json:"db"`
- IsBuiltin string `bson:"isBuiltin" json:"isBuiltin"`
- Roles []map[string]interface{} `bson:"roles" json:"roles"`
- Privileges []RolePrivilege `bson:"privileges" json:"privileges"`
-}
-
-type RoleInfo struct {
- Roles []Role `bson:"roles" json:"roles"`
- OKResponse `bson:",inline"`
-}
-
-type User struct {
- ID string `bson:"_id" json:"_id"`
- User string `bson:"user" json:"user"`
- DB string `bson:"db" json:"db"`
- Roles []map[string]interface{} `bson:"roles" json:"roles"`
-}
-
-type UsersInfo struct {
- Users []User `bson:"users" json:"users"`
- OKResponse `bson:",inline"`
-}
diff --git a/engines/mongodb/users.go b/engines/mongodb/users.go
deleted file mode 100644
index d71c199..0000000
--- a/engines/mongodb/users.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mongodb
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "go.mongodb.org/mongo-driver/bson"
- "go.mongodb.org/mongo-driver/mongo"
-)
-
-func CreateUser(ctx context.Context, client *mongo.Client, user, pwd string, roles ...map[string]interface{}) error {
- resp := OKResponse{}
-
- res := client.Database("admin").RunCommand(ctx, bson.D{
- {Key: "createUser", Value: user},
- {Key: "pwd", Value: pwd},
- {Key: "roles", Value: roles},
- })
- if res.Err() != nil {
- return errors.Wrap(res.Err(), "failed to create user")
- }
-
- err := res.Decode(&resp)
- if err != nil {
- return errors.Wrap(err, "failed to decode response")
- }
-
- if resp.OK != 1 {
- return errors.Errorf("mongo says: %s", resp.Errmsg)
- }
-
- return nil
-}
-
-func GetUser(ctx context.Context, client *mongo.Client, userName string) (*User, error) {
- resp := UsersInfo{}
- res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "usersInfo", Value: userName}})
- if res.Err() != nil {
- return nil, errors.Wrap(res.Err(), "run command")
- }
-
- err := res.Decode(&resp)
- if err != nil {
- return nil, errors.Wrap(err, "failed to decode response")
- }
- if resp.OK != 1 {
- return nil, errors.Errorf("mongo says: %s", resp.Errmsg)
- }
- if len(resp.Users) == 0 {
- return nil, nil
- }
- return &resp.Users[0], nil
-}
-
-func UpdateUserRoles(ctx context.Context, client *mongo.Client, userName string, roles []map[string]interface{}) error {
- return client.Database("admin").RunCommand(ctx, bson.D{{Key: "updateUser", Value: userName}, {Key: "roles", Value: roles}}).Err()
-}
-
-// UpdateUserPass updates user's password
-func UpdateUserPass(ctx context.Context, client *mongo.Client, name, pass string) error {
- return client.Database("admin").RunCommand(ctx, bson.D{{Key: "updateUser", Value: name}, {Key: "pwd", Value: pass}}).Err()
-}
-
-// DropUser delete user
-func DropUser(ctx context.Context, client *mongo.Client, userName string) error {
- user, err := GetUser(ctx, client, userName)
- if err != nil {
- return errors.Wrap(err, "get user")
- }
-
- if user == nil {
- return errors.New(userName + " user not exists")
- }
-
- err = client.Database("admin").RunCommand(ctx, bson.D{{Key: "dropUser", Value: userName}}).Err()
- return errors.Wrap(err, "drop user")
-}
diff --git a/engines/mysql/commands.go b/engines/mysql/commands.go
index 5bd1d6d..40edc77 100644
--- a/engines/mysql/commands.go
+++ b/engines/mysql/commands.go
@@ -285,7 +285,7 @@ func (m *Commands) ConnectExample(info *engines.ConnectionInfo, client string) s
}
func (m *Commands) ExecuteCommand(scripts []string) ([]string, []corev1.EnvVar, error) {
- cmd := []string{}
+ var cmd []string
cmd = append(cmd, "/bin/sh", "-c", "-ex")
cmd = append(cmd, fmt.Sprintf("%s -u%s -p%s -e %s", m.info.Client,
fmt.Sprintf("$%s", engines.EnvVarMap[engines.USER]),
diff --git a/engines/mysql/config.go b/engines/mysql/config.go
index 1b7fa24..ca23310 100644
--- a/engines/mysql/config.go
+++ b/engines/mysql/config.go
@@ -24,8 +24,6 @@ import (
"crypto/x509"
"database/sql"
"fmt"
- "net"
- "strconv"
"time"
"github.com/go-sql-driver/mysql"
@@ -39,21 +37,6 @@ import (
const (
// configurations to connect to MySQL, either a data source name represent by URL.
connectionURLKey = "url"
-
- // To connect to MySQL running over SSL you have to download a
- // SSL certificate. If this is provided the driver will connect using
- // SSL. If you have disabled SSL you can leave this empty.
- // When the user provides a pem path their connection string must end with
- // &tls=custom
- // The connection string should be in the following format
- // "%s:%s@tcp(%s:3306)/%s?allowNativePasswords=true&tls=custom",'myadmin@mydemoserver', 'yourpassword', 'mydemoserver.mysql.database.azure.com', 'targetdb'.
- pemPathKey = "pemPath"
-
- // other general settings for DB connections.
- maxIdleConnsKey = "maxIdleConns"
- maxOpenConnsKey = "maxOpenConns"
- connMaxLifetimeKey = "connMaxLifetime"
- connMaxIdleTimeKey = "connMaxIdleTime"
)
const (
@@ -64,76 +47,41 @@ const (
)
type Config struct {
- URL string
- Port string
- Username string
- Password string
- pemPath string
- maxIdleConns int
- maxOpenConns int
- connMaxLifetime time.Duration
- connMaxIdletime time.Duration
+ URL string
+ Port string
+ Username string
+ Password string
+ pemPath string
+ MaxIdleConns int
+ MaxOpenConns int
+ AdminUsername string
+ AdminPassword string
+ ReplicationUsername string
+ ReplicationPassword string
}
var fs = afero.NewOsFs()
var config *Config
-func NewConfig(properties map[string]string) (*Config, error) {
- config = &Config{}
-
- if val, ok := properties[connectionURLKey]; ok && val != "" {
- config.URL = val
- } else {
- config.URL = "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true"
+func NewConfig() (*Config, error) {
+ config = &Config{
+ URL: "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true",
+ MaxIdleConns: 1,
+ MaxOpenConns: 5,
}
- if viper.IsSet(constant.KBEnvServiceUser) {
- config.Username = viper.GetString(constant.KBEnvServiceUser)
- } else if viper.IsSet(EnvRootUser) {
- config.Username = viper.GetString(EnvRootUser)
- } else if username, ok := properties["username"]; ok {
- config.Username = username
- }
-
- if viper.IsSet(constant.KBEnvServicePassword) {
- config.Password = viper.GetString(constant.KBEnvServicePassword)
- } else if viper.IsSet(EnvRootPass) {
- config.Password = viper.GetString(EnvRootPass)
- }
+ config.Username = getRootUserName()
+ config.Password = getRootPassword()
+ config.AdminUsername = getAdminUserName()
+ config.AdminPassword = getAdminPassword()
+ config.ReplicationUsername = getReplicationUserName()
+ config.ReplicationPassword = getReplicationPassword()
if viper.IsSet(constant.KBEnvServicePort) {
config.Port = viper.GetString(constant.KBEnvServicePort)
}
- if val, ok := properties[pemPathKey]; ok {
- config.pemPath = val
- }
-
- if val, ok := properties[maxIdleConnsKey]; ok {
- if i, err := strconv.Atoi(val); err == nil {
- config.maxIdleConns = i
- }
- }
-
- if val, ok := properties[maxOpenConnsKey]; ok {
- if i, err := strconv.Atoi(val); err == nil {
- config.maxOpenConns = i
- }
- }
-
- if val, ok := properties[connMaxLifetimeKey]; ok {
- if d, err := time.ParseDuration(val); err == nil {
- config.connMaxLifetime = d
- }
- }
-
- if val, ok := properties[connMaxIdleTimeKey]; ok {
- if d, err := time.ParseDuration(val); err == nil {
- config.connMaxIdletime = d
- }
- }
-
if config.pemPath != "" {
rootCertPool := x509.NewCertPool()
pem, err := afero.ReadFile(fs, config.pemPath)
@@ -154,28 +102,57 @@ func NewConfig(properties map[string]string) (*Config, error) {
return config, nil
}
-func (config *Config) GetLocalDBConn() (*sql.DB, error) {
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- if err != nil {
- return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", connectionURLKey)
+func getRootUserName() string {
+ if viper.IsSet(constant.KBEnvServiceUser) {
+ return viper.GetString(constant.KBEnvServiceUser)
+ } else if viper.IsSet(EnvRootUser) {
+ return viper.GetString(EnvRootUser)
}
- mysqlConfig.User = config.Username
- mysqlConfig.Passwd = config.Password
- mysqlConfig.Timeout = time.Second * 5
- mysqlConfig.ReadTimeout = time.Second * 5
- mysqlConfig.WriteTimeout = time.Second * 5
- if config.Port != "" {
- mysqlConfig.Addr = "127.0.0.1:" + config.Port
+ return ""
+}
+
+func getRootPassword() string {
+ if viper.IsSet(constant.KBEnvServicePassword) {
+ return viper.GetString(constant.KBEnvServicePassword)
+ } else if viper.IsSet(EnvRootPass) {
+ return viper.GetString(EnvRootPass)
}
- db, err := GetDBConnection(mysqlConfig.FormatDSN())
- if err != nil {
- return nil, errors.Wrap(err, "get DB connection failed")
+ return ""
+}
+
+func getAdminUserName() string {
+ // if the user is not set, use the root user
+ if viper.IsSet("MYSQL_ADMIN_USER") {
+ return viper.GetString("MYSQL_ADMIN_USER")
}
+ return getRootUserName()
+}
- return db, nil
+func getAdminPassword() string {
+ // if the password is not set, use the root password
+ if viper.IsSet("MYSQL_ADMIN_PASSWORD") {
+ return viper.GetString("MYSQL_ADMIN_PASSWORD")
+ }
+ return getRootPassword()
+}
+
+func getReplicationUserName() string {
+ // if the user is not set, use the admin user
+ if viper.IsSet("MYSQL_REPLICATION_USER") {
+ return viper.GetString("MYSQL_REPLICATION_USER")
+ }
+ return getAdminUserName()
+}
+
+func getReplicationPassword() string {
+ // if the password is not set, use the admin password
+ if viper.IsSet("MYSQL_REPLICATION_PASSWORD") {
+ return viper.GetString("MYSQL_REPLICATION_PASSWORD")
+ }
+ return getAdminPassword()
}
-func (config *Config) GetDBConnWithAddr(addr string) (*sql.DB, error) {
+func (config *Config) GetLocalDBConn() (*sql.DB, error) {
mysqlConfig, err := mysql.ParseDSN(config.URL)
if err != nil {
return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", connectionURLKey)
@@ -185,7 +162,9 @@ func (config *Config) GetDBConnWithAddr(addr string) (*sql.DB, error) {
mysqlConfig.Timeout = time.Second * 5
mysqlConfig.ReadTimeout = time.Second * 5
mysqlConfig.WriteTimeout = time.Second * 5
- mysqlConfig.Addr = addr
+ if config.Port != "" {
+ mysqlConfig.Addr = "127.0.0.1:" + config.Port
+ }
db, err := GetDBConnection(mysqlConfig.FormatDSN())
if err != nil {
return nil, errors.Wrap(err, "get DB connection failed")
@@ -193,26 +172,3 @@ func (config *Config) GetDBConnWithAddr(addr string) (*sql.DB, error) {
return db, nil
}
-
-func (config *Config) GetDBPort() int {
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- if err != nil {
- return defaultDBPort
- }
-
- _, portStr, err := net.SplitHostPort(mysqlConfig.Addr)
- if err != nil {
- return defaultDBPort
- }
-
- port, err := strconv.Atoi(portStr)
- if err != nil {
- return defaultDBPort
- }
-
- return port
-}
-
-func GetConfig() *Config {
- return config
-}
diff --git a/engines/mysql/config_test.go b/engines/mysql/config_test.go
index fdd798f..88aaeab 100644
--- a/engines/mysql/config_test.go
+++ b/engines/mysql/config_test.go
@@ -21,39 +21,10 @@ package mysql
import (
"testing"
- "time"
"github.com/spf13/afero"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/engines"
-)
-
-const (
- fakeUser = "fake-user"
- fakePassword = "fake-password"
- fakePemPath = "fake-pem-path"
- fakeAddr = "fake-addr"
-)
-
-var (
- fakeProperties = engines.Properties{
- connectionURLKey: "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true",
- maxOpenConnsKey: "5",
- maxIdleConnsKey: "4",
- connMaxLifetimeKey: "10m",
- connMaxIdleTimeKey: "500s",
- }
-
- fakePropertiesWithPem = engines.Properties{
- pemPathKey: fakePemPath,
- }
-
- fakePropertiesWithWrongURL = engines.Properties{
- connectionURLKey: "fake-url",
- }
)
func TestNewConfig(t *testing.T) {
@@ -63,60 +34,19 @@ func TestNewConfig(t *testing.T) {
viper.Reset()
}()
- t.Run("with empty properties", func(t *testing.T) {
- fakeConfig, err := NewConfig(map[string]string{})
+ t.Run("with default", func(t *testing.T) {
+ fakeConfig, err := NewConfig()
assert.Nil(t, err)
assert.NotNil(t, fakeConfig)
assert.Equal(t, "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true", fakeConfig.URL)
- })
-
- t.Run("with default properties", func(t *testing.T) {
- viper.Set(constant.KBEnvServiceUser, fakeUser)
- viper.Set(constant.KBEnvServicePassword, fakePassword)
-
- fakeConfig, err := NewConfig(fakeProperties)
- assert.Nil(t, err)
- assert.NotNil(t, fakeConfig)
- assert.Equal(t, "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true", fakeConfig.URL)
- assert.Equal(t, 5, fakeConfig.maxOpenConns)
- assert.Equal(t, 4, fakeConfig.maxIdleConns)
- assert.Equal(t, time.Minute*10, fakeConfig.connMaxLifetime)
- assert.Equal(t, time.Second*500, fakeConfig.connMaxIdletime)
- assert.Equal(t, fakeUser, fakeConfig.Username)
- assert.Equal(t, fakePassword, fakeConfig.Password)
- })
-
- t.Run("can't open pem file", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithPem)
- assert.Nil(t, fakeConfig)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "Error reading PEM file from fake-pem-path")
- })
-
- f, err := fs.Create(fakePemPath)
- assert.Nil(t, err)
- _ = f.Close()
- t.Run("", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithPem)
- assert.Nil(t, fakeConfig)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "failed to append PEM")
+ assert.Equal(t, 5, fakeConfig.MaxOpenConns)
+ assert.Equal(t, 1, fakeConfig.MaxIdleConns)
})
}
func TestConfig_GetLocalDBConn(t *testing.T) {
- t.Run("parse dsn failed", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithWrongURL)
- assert.Nil(t, err)
-
- db, err := fakeConfig.GetLocalDBConn()
- assert.Nil(t, db)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "illegal Data Source Name (DNS) specified by url")
- })
-
t.Run("get DB connection with addr successfully", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakeProperties)
+ fakeConfig, err := NewConfig()
assert.Nil(t, err)
db, err := fakeConfig.GetLocalDBConn()
@@ -124,42 +54,3 @@ func TestConfig_GetLocalDBConn(t *testing.T) {
assert.NotNil(t, db)
})
}
-
-func TestConfig_GetDBConnWithAddr(t *testing.T) {
- t.Run("parse dsn failed", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithWrongURL)
- assert.Nil(t, err)
-
- db, err := fakeConfig.GetDBConnWithAddr(fakeAddr)
- assert.Nil(t, db)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "illegal Data Source Name (DNS) specified by url")
- })
-
- t.Run("get local DB connection successfully", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakeProperties)
- assert.Nil(t, err)
-
- db, err := fakeConfig.GetDBConnWithAddr(fakeAddr)
- assert.Nil(t, err)
- assert.NotNil(t, db)
- })
-}
-
-func TestConfig_GetDBPort(t *testing.T) {
- t.Run("parse dsn failed", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithWrongURL)
- assert.Nil(t, err)
-
- port := fakeConfig.GetDBPort()
- assert.Equal(t, 3306, port)
- })
-
- t.Run("get db port successfully", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakeProperties)
- assert.Nil(t, err)
-
- port := fakeConfig.GetDBPort()
- assert.Equal(t, 3306, port)
- })
-}
diff --git a/engines/mysql/conn.go b/engines/mysql/conn.go
index 396f0e0..24f1f0c 100644
--- a/engines/mysql/conn.go
+++ b/engines/mysql/conn.go
@@ -17,8 +17,6 @@ package mysql
import (
"database/sql"
-
- "github.com/apecloud/dbctl/dcs"
)
var connectionPoolCache = make(map[string]*sql.DB)
@@ -37,17 +35,3 @@ func GetDBConnection(dsn string) (*sql.DB, error) {
connectionPoolCache[dsn] = db
return db, nil
}
-
-func (mgr *Manager) GetMemberConnection(cluster *dcs.Cluster, member *dcs.Member) (db *sql.DB, err error) {
- if member != nil && member.Name != mgr.CurrentMemberName {
- addr := cluster.GetMemberAddrWithPort(*member)
- db, err = config.GetDBConnWithAddr(addr)
- if err != nil {
- return nil, err
- }
- } else {
- db = mgr.DB
- }
-
- return db, nil
-}
diff --git a/engines/mysql/get_replica_role.go b/engines/mysql/get_replica_role.go
index 14fbdbc..f458749 100644
--- a/engines/mysql/get_replica_role.go
+++ b/engines/mysql/get_replica_role.go
@@ -22,20 +22,18 @@ package mysql
import (
"context"
- "github.com/apecloud/dbctl/engines/models"
+ "github.com/apecloud/kubeblocks/pkg/constant"
)
+var semiSyncSourceVersion = "8.0.26"
+
func (mgr *Manager) GetReplicaRole(ctx context.Context) (string, error) {
return mgr.GetReplicaRoleFromDB(ctx)
}
func (mgr *Manager) GetReplicaRoleFromDB(ctx context.Context) (string, error) {
- slaveRunning, err := mgr.isSlaveRunning()
- if err != nil {
- return "", err
- }
- if slaveRunning {
- return models.SECONDARY, nil
+ if mgr.isSlaveRunning(ctx) {
+ return constant.Secondary, nil
}
hasSlave, err := mgr.hasSlaveHosts()
@@ -43,34 +41,38 @@ func (mgr *Manager) GetReplicaRoleFromDB(ctx context.Context) (string, error) {
return "", err
}
if hasSlave {
- return models.PRIMARY, nil
+ return constant.Primary, nil
}
- isReadonly, err := mgr.IsReadonly(ctx, nil, nil)
+ isReadonly, err := mgr.IsReadonly(ctx)
if err != nil {
return "", err
}
if isReadonly {
- // TODO: in case of diskFull lock, database will be set readonly,
+ // TODO: in case of diskfull lock, dababase will be set readonly,
// how to deal with this situation
- return models.SECONDARY, nil
+ return constant.Secondary, nil
}
- return models.PRIMARY, nil
+ return constant.Primary, nil
}
-func (mgr *Manager) isSlaveRunning() (bool, error) {
+func (mgr *Manager) isSlaveRunning(ctx context.Context) bool {
var rowMap = mgr.slaveStatus
if len(rowMap) == 0 {
- return false, nil
+ return false
}
ioRunning := rowMap.GetString("Slave_IO_Running")
sqlRunning := rowMap.GetString("Slave_SQL_Running")
+ if use, _ := mgr.UseSourceReplica(ctx); use {
+ ioRunning = rowMap.GetString("Replica_IO_Running")
+ sqlRunning = rowMap.GetString("Replica_SQL_Running")
+ }
if ioRunning == "Yes" || sqlRunning == "Yes" {
- return true, nil
+ return true
}
- return false, nil
+ return false
}
func (mgr *Manager) hasSlaveHosts() (bool, error) {
@@ -92,3 +94,14 @@ func (mgr *Manager) hasSlaveHosts() (bool, error) {
return true, nil
}
+
+func (mgr *Manager) UseSourceReplica(ctx context.Context) (bool, error) {
+ version, err := mgr.GetVersion(ctx)
+ if err != nil {
+ return false, err
+ }
+ if IsBeforeVersion(version, semiSyncSourceVersion) {
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/engines/mysql/gtid.go b/engines/mysql/gtid.go
deleted file mode 100644
index 69271bd..0000000
--- a/engines/mysql/gtid.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mysql
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- singleValueInterval = regexp.MustCompile("^([0-9]+)$")
- multiValueInterval = regexp.MustCompile("^([0-9]+)[-]([0-9]+)$")
-)
-
-// GTIDItem represents an item in a set of GTID ranges,
-// for example, the item: "ee194423-3040-11ee-9393-eab5dfc9b22a:1-5:8-10"
-type GTIDItem struct {
- ServerUUID string
- Ranges string
-}
-
-func NewGTIDItem(gtidString string) (*GTIDItem, error) {
- gtidString = strings.TrimSpace(gtidString)
- tokens := strings.SplitN(gtidString, ":", 2)
- if len(tokens) != 2 {
- return nil, fmt.Errorf("GTID wrong format: %s", gtidString)
- }
- if tokens[0] == "" {
- return nil, fmt.Errorf("GTID no server UUID: %s", tokens[0])
- }
- if tokens[1] == "" {
- return nil, fmt.Errorf("GTID no range: %s", tokens[1])
- }
- gtidItem := >IDItem{ServerUUID: tokens[0], Ranges: tokens[1]}
- return gtidItem, nil
-}
-
-func (gtid *GTIDItem) String() string {
- return fmt.Sprintf("%s:%s", gtid.ServerUUID, gtid.Ranges)
-}
-
-func (gtid *GTIDItem) Explode() (result []*GTIDItem) {
- intervals := strings.Split(gtid.Ranges, ":")
- for _, interval := range intervals {
- if submatch := multiValueInterval.FindStringSubmatch(interval); submatch != nil {
- intervalStart, _ := strconv.Atoi(submatch[1])
- intervalEnd, _ := strconv.Atoi(submatch[2])
- for i := intervalStart; i <= intervalEnd; i++ {
- result = append(result, >IDItem{ServerUUID: gtid.ServerUUID, Ranges: fmt.Sprintf("%d", i)})
- }
- } else if submatch := singleValueInterval.FindStringSubmatch(interval); submatch != nil {
- result = append(result, >IDItem{ServerUUID: gtid.ServerUUID, Ranges: interval})
- }
- }
- return result
-}
-
-type GTIDSet struct {
- Items []*GTIDItem
-}
-
-func NewOracleGtidSet(gtidSet string) (res *GTIDSet, err error) {
- res = >IDSet{}
-
- gtidSet = strings.TrimSpace(gtidSet)
- if gtidSet == "" {
- return res, nil
- }
- gtids := strings.Split(gtidSet, ",")
- for _, gtid := range gtids {
- gtid = strings.TrimSpace(gtid)
- if gtid == "" {
- continue
- }
- if gtidRange, err := NewGTIDItem(gtid); err == nil {
- res.Items = append(res.Items, gtidRange)
- } else {
- return res, err
- }
- }
- return res, nil
-}
-
-func (gtidSet *GTIDSet) RemoveUUID(uuid string) (removed bool) {
- var filteredEntries []*GTIDItem
- for _, item := range gtidSet.Items {
- if item.ServerUUID == uuid {
- removed = true
- } else {
- filteredEntries = append(filteredEntries, item)
- }
- }
- if removed {
- gtidSet.Items = filteredEntries
- }
- return removed
-}
-
-func (gtidSet *GTIDSet) RetainUUID(uuid string) (anythingRemoved bool) {
- return gtidSet.RetainUUIDs([]string{uuid})
-}
-
-func (gtidSet *GTIDSet) RetainUUIDs(uuids []string) (anythingRemoved bool) {
- retainUUIDs := map[string]bool{}
- for _, uuid := range uuids {
- retainUUIDs[uuid] = true
- }
- var filteredEntries []*GTIDItem
- for _, item := range gtidSet.Items {
- if retainUUIDs[item.ServerUUID] {
- filteredEntries = append(filteredEntries, item)
- } else {
- anythingRemoved = true
- }
- }
- if anythingRemoved {
- gtidSet.Items = filteredEntries
- }
- return anythingRemoved
-}
-
-func (gtidSet *GTIDSet) SharedUUIDs(other *GTIDSet) (shared []string) {
- gtidSetUUIDs := map[string]bool{}
- for _, item := range gtidSet.Items {
- gtidSetUUIDs[item.ServerUUID] = true
- }
- for _, item := range other.Items {
- if gtidSetUUIDs[item.ServerUUID] {
- shared = append(shared, item.ServerUUID)
- }
- }
- return shared
-}
-
-func (gtidSet *GTIDSet) Explode() (result []*GTIDItem) {
- for _, entries := range gtidSet.Items {
- result = append(result, entries.Explode()...)
- }
- return result
-}
-
-func (gtidSet *GTIDSet) String() string {
- var tokens []string
- for _, item := range gtidSet.Items {
- tokens = append(tokens, item.String())
- }
- return strings.Join(tokens, ",")
-}
-
-func (gtidSet *GTIDSet) IsEmpty() bool {
- return len(gtidSet.Items) == 0
-}
diff --git a/engines/mysql/gtid_test.go b/engines/mysql/gtid_test.go
deleted file mode 100644
index 12fc51e..0000000
--- a/engines/mysql/gtid_test.go
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mysql
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-const (
- fakeGTIDString = "ee194423-3040-11ee-9393-eab5dfc9b22a:1-5:7:9-10"
- fakeGTIDSet = " ee194423-3040-11ee-9393-eab5dfc9b22a:1-5:7:9-10,b3512340-fc03-11ec-920f-000c29f6e7cf:1-4 "
- fakeServerUUID = "ee194423-3040-11ee-9393-eab5dfc9b22a"
-)
-
-func TestNewGTIDItem(t *testing.T) {
- fakeGTIDStrings := []string{
- fakeServerUUID,
- ":1-5",
- "ee194423-3040-11ee-9393-eab5dfc9b22a:",
- fakeGTIDString,
- }
- expectResults := []string{
- "GTID wrong format:",
- "GTID no server UUID:",
- "GTID no range:",
- "",
- }
-
- for i, s := range fakeGTIDStrings {
- gtidItem, err := NewGTIDItem(s)
- assert.Equal(t, expectResults[i] == "", err == nil)
- if err != nil {
- assert.ErrorContains(t, err, expectResults[i])
- }
- assert.Equal(t, expectResults[i] != "", gtidItem == nil)
- }
-}
-
-func TestGTIDItem_Explode(t *testing.T) {
- gtidItem, err := NewGTIDItem(fakeGTIDString)
- assert.Nil(t, err)
-
- items := gtidItem.Explode()
- assert.NotNil(t, items)
- assert.Len(t, items, 8)
-}
-
-func TestNewOracleGtidSet(t *testing.T) {
- testCases := []struct {
- gtidSets string
- expectItemsLen int
- expectErrorMsg string
- }{
- {"", 0, ""},
- {" , :1-5, ", 0, "GTID no server UUID:"},
- {fakeGTIDSet, 2, ""},
- }
-
- for _, testCase := range testCases {
- gtidSets, err := NewOracleGtidSet(testCase.gtidSets)
- assert.Len(t, gtidSets.Items, testCase.expectItemsLen)
- assert.Equal(t, err == nil, testCase.expectErrorMsg == "")
- if err != nil {
- assert.ErrorContains(t, err, testCase.expectErrorMsg)
- }
- }
-}
-
-func TestGTIDSet_RemoveUUID(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
-
- testCases := []struct {
- uuid string
- expectRemoved bool
- }{
- {fakeServerUUID, true},
- {"", false},
- }
-
- for _, testCase := range testCases {
- assert.Equal(t, testCase.expectRemoved, gtidSets.RemoveUUID(testCase.uuid))
- }
-}
-
-func TestGTIDSet_RetainUUID(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
-
- assert.True(t, gtidSets.RetainUUID(fakeServerUUID))
-}
-
-func TestGTIDSet_RetainUUIDs(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
-
- testCases := []struct {
- uuids []string
- expectAnythingRemoved bool
- }{
- {[]string{fakeServerUUID}, true},
- {[]string{fakeServerUUID, "b3512340-fc03-11ec-920f-000c29f6e7cf"}, false},
- }
-
- for _, testCase := range testCases {
- assert.Equal(t, testCase.expectAnythingRemoved, gtidSets.RetainUUIDs(testCase.uuids))
- }
-}
-
-func TestGTIDSet_SharedUUIDs(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
- assert.False(t, gtidSets.IsEmpty())
-
- testCases := []struct {
- other *GTIDSet
- expectSharedItemsLen int
- }{
- {>IDSet{
- Items: []*GTIDItem{
- {
- ServerUUID: fakeServerUUID,
- },
- },
- }, 1},
- {>IDSet{Items: make([]*GTIDItem, 0)}, 0},
- }
-
- for _, testCase := range testCases {
- assert.Len(t, gtidSets.SharedUUIDs(testCase.other), testCase.expectSharedItemsLen)
- }
-}
-
-func TestGTIDSet_Explode(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
-
- items := gtidSets.Explode()
- assert.NotNil(t, items)
- assert.Len(t, items, 12)
-}
-
-func TestGTIDSet_String(t *testing.T) {
- gtidSets, err := NewOracleGtidSet(fakeGTIDSet)
- assert.Nil(t, err)
-
- assert.Equal(t, "ee194423-3040-11ee-9393-eab5dfc9b22a:1-5:7:9-10,b3512340-fc03-11ec-920f-000c29f6e7cf:1-4", gtidSets.String())
-}
diff --git a/engines/mysql/manager.go b/engines/mysql/manager.go
index 06d8f05..2a90448 100644
--- a/engines/mysql/manager.go
+++ b/engines/mysql/manager.go
@@ -22,16 +22,11 @@ package mysql
import (
"context"
"database/sql"
- "fmt"
- "strconv"
- "strings"
"time"
- "github.com/go-sql-driver/mysql"
"github.com/pkg/errors"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
)
@@ -39,22 +34,18 @@ type Manager struct {
engines.DBManagerBase
DB *sql.DB
hostname string
- serverID uint
version string
binlogFormat string
logbinEnabled bool
logReplicationUpdatesEnabled bool
- opTimestamp int64
- globalState map[string]string
- masterStatus RowMap
slaveStatus RowMap
}
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("MySQL")
- config, err := NewConfig(properties)
+ config, err := NewConfig()
if err != nil {
return nil, err
}
@@ -64,11 +55,6 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
return nil, err
}
- serverID, err := engines.GetIndex(managerBase.CurrentMemberName)
- if err != nil {
- return nil, err
- }
-
db, err := config.GetLocalDBConn()
if err != nil {
return nil, errors.Wrap(err, "connect to MySQL")
@@ -76,39 +62,12 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
mgr := &Manager{
DBManagerBase: *managerBase,
- serverID: uint(serverID) + 1,
DB: db,
}
return mgr, nil
}
-func (mgr *Manager) InitializeCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) IsRunning() bool {
- ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
- defer cancel()
-
- // test if db is ready to connect or not
- err := mgr.DB.PingContext(ctx)
- if err != nil {
- var driverErr *mysql.MySQLError
- if errors.As(err, &driverErr) {
- // Now the error number is accessible directly
- if driverErr.Number == 1040 {
- mgr.Logger.Info("connect failed: Too many connections")
- return true
- }
- }
- mgr.Logger.Info("DB is not ready", "error", err)
- return false
- }
-
- return true
-}
-
func (mgr *Manager) IsDBStartupReady() bool {
if mgr.DBStartupReady {
return true
@@ -128,338 +87,6 @@ func (mgr *Manager) IsDBStartupReady() bool {
return true
}
-func (mgr *Manager) IsReadonly(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, error) {
- db, err := mgr.GetMemberConnection(cluster, member)
- if err != nil {
- mgr.Logger.Info("Get Member conn failed", "error", err.Error())
- return false, err
- }
-
- var readonly bool
- err = db.QueryRowContext(ctx, "select @@global.hostname, @@global.version, "+
- "@@global.read_only, @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates").
- Scan(&mgr.hostname, &mgr.version, &readonly, &mgr.binlogFormat,
- &mgr.logbinEnabled, &mgr.logReplicationUpdatesEnabled)
- if err != nil {
- mgr.Logger.Info("Get global readonly failed", "error", err.Error())
- return false, err
- }
- return readonly, nil
-}
-
-func (mgr *Manager) IsLeader(ctx context.Context, _ *dcs.Cluster) (bool, error) {
- readonly, err := mgr.IsReadonly(ctx, nil, nil)
-
- if err != nil || readonly {
- return false, err
- }
-
- // if cluster.Leader != nil && cluster.Leader.Name != "" {
- // if cluster.Leader.Name == mgr.CurrentMemberName {
- // return true, nil
- // } else {
- // return false, nil
- // }
- // }
-
- // // During the initialization of cluster, there would be more than one leader,
- // // in this case, the first member is chosen as the leader
- // if mgr.CurrentMemberName == cluster.Members[0].Name {
- // return true, nil
- // }
- // isFirstMemberLeader, err := mgr.IsLeaderMember(ctx, cluster, &cluster.Members[0])
- // if err == nil && isFirstMemberLeader {
- // return false, nil
- // }
-
- return true, err
-}
-
-func (mgr *Manager) IsLeaderMember(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, error) {
- readonly, err := mgr.IsReadonly(ctx, cluster, member)
- if err != nil || readonly {
- return false, err
- }
-
- return true, err
-}
-
-func (mgr *Manager) InitiateCluster(*dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) GetMemberAddrs(_ context.Context, cluster *dcs.Cluster) []string {
- return cluster.GetMemberAddrs()
-}
-
-func (mgr *Manager) IsCurrentMemberInCluster(context.Context, *dcs.Cluster) bool {
- return true
-}
-
-func (mgr *Manager) IsCurrentMemberHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
- // _, _ = mgr.EnsureServerID(ctx)
- member := cluster.GetMemberWithName(mgr.CurrentMemberName)
-
- return mgr.IsMemberHealthy(ctx, cluster, member)
-}
-
-func (mgr *Manager) IsMemberLagging(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, int64) {
- var leaderDBState *dcs.DBState
- if cluster.Leader == nil || cluster.Leader.DBState == nil {
- // In the event of leader initialization failure, there is no available database state information,
- // just returning false allows other replicas to acquire the lease.
- mgr.Logger.Info("No leader DBState info")
- return false, 0
- }
- leaderDBState = cluster.Leader.DBState
-
- db, err := mgr.GetMemberConnection(cluster, member)
- if err != nil {
- mgr.Logger.Info("Get Member conn failed", "error", err)
- return true, 0
- }
-
- opTimestamp, err := mgr.GetOpTimestamp(ctx, db)
- if err != nil {
- mgr.Logger.Info("get op timestamp failed", "error", err)
- return true, 0
- }
- lag := leaderDBState.OpTimestamp - opTimestamp
- if lag <= cluster.HaConfig.GetMaxLagOnSwitchover() {
- return false, lag
- }
- mgr.Logger.Info(fmt.Sprintf("The member %s has lag: %d", member.Name, lag))
- return true, lag
-}
-
-func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
- db, err := mgr.GetMemberConnection(cluster, member)
- if err != nil {
- mgr.Logger.Info("Get Member conn failed", "error", err)
- return false
- }
-
- if cluster.Leader != nil && cluster.Leader.Name == member.Name {
- if mgr.WriteCheck(ctx, db) != nil {
- return false
- }
- }
- if mgr.ReadCheck(ctx, db) != nil {
- return false
- }
-
- return true
-}
-
-func (mgr *Manager) GetDBState(ctx context.Context, cluster *dcs.Cluster) *dcs.DBState {
- mgr.DBState = nil
-
- globalState, err := mgr.GetGlobalState(ctx, mgr.DB)
- if err != nil {
- mgr.Logger.Info("select global failed", "error", err)
- return nil
- }
-
- masterStatus, err := mgr.GetMasterStatus(ctx, mgr.DB)
- if err != nil {
- mgr.Logger.Info("show master status failed", "error", err)
- return nil
- }
-
- slaveStatus, err := mgr.GetSlaveStatus(ctx, mgr.DB)
- if err != nil {
- mgr.Logger.Info("show slave status failed", "error", err)
- return nil
- }
-
- opTimestamp, err := mgr.GetOpTimestamp(ctx, mgr.DB)
- if err != nil {
- mgr.Logger.Info("get op timestamp failed", "error", err)
- return nil
- }
-
- dbState := &dcs.DBState{
- OpTimestamp: opTimestamp,
- Extra: map[string]string{},
- }
- for k, v := range globalState {
- dbState.Extra[k] = v
- }
-
- if cluster.Leader != nil && cluster.Leader.Name == mgr.CurrentMemberName {
- dbState.Extra["Binlog_File"] = masterStatus.GetString("File")
- dbState.Extra["Binlog_Pos"] = masterStatus.GetString("Pos")
- } else {
- dbState.Extra["Master_Host"] = slaveStatus.GetString("Master_Host")
- dbState.Extra["Master_UUID"] = slaveStatus.GetString("Master_UUID")
- dbState.Extra["Slave_IO_Running"] = slaveStatus.GetString("Slave_IO_Running")
- dbState.Extra["Slave_SQL_Running"] = slaveStatus.GetString("Slave_SQL_Running")
- dbState.Extra["Last_IO_Error"] = slaveStatus.GetString("Last_IO_Error")
- dbState.Extra["Last_SQL_Error"] = slaveStatus.GetString("Last_SQL_Error")
- dbState.Extra["Master_Log_File"] = slaveStatus.GetString("Master_Log_File")
- dbState.Extra["Read_Master_Log_Pos"] = slaveStatus.GetString("Read_Master_Log_Pos")
- dbState.Extra["Relay_Master_Log_File"] = slaveStatus.GetString("Relay_Master_Log_File")
- dbState.Extra["Exec_Master_Log_Pos"] = slaveStatus.GetString("Exec_Master_Log_Pos")
- }
-
- mgr.globalState = globalState
- mgr.masterStatus = masterStatus
- mgr.slaveStatus = slaveStatus
- mgr.opTimestamp = opTimestamp
- mgr.DBState = dbState
-
- return dbState
-}
-
-func (mgr *Manager) GetSecondsBehindMaster(ctx context.Context) (int, error) {
- slaveStatus, err := mgr.GetSlaveStatus(ctx, mgr.DB)
- if err != nil {
- mgr.Logger.Info("show slave status failed", "error", err)
- return 0, err
- }
- if len(slaveStatus) == 0 {
- return 0, nil
- }
- secondsBehindMaster := slaveStatus.GetString("Seconds_Behind_Master")
- if secondsBehindMaster == "NULL" || secondsBehindMaster == "" {
- return 0, nil
- }
- return strconv.Atoi(secondsBehindMaster)
-}
-
-func (mgr *Manager) WriteCheck(ctx context.Context, db *sql.DB) error {
- writeSQL := fmt.Sprintf(`BEGIN;
-CREATE DATABASE IF NOT EXISTS kubeblocks;
-CREATE TABLE IF NOT EXISTS kubeblocks.kb_health_check(type INT, check_ts BIGINT, PRIMARY KEY(type));
-INSERT INTO kubeblocks.kb_health_check VALUES(%d, UNIX_TIMESTAMP()) ON DUPLICATE KEY UPDATE check_ts = UNIX_TIMESTAMP();
-COMMIT;`, engines.CheckStatusType)
- _, err := db.ExecContext(ctx, writeSQL)
- if err != nil {
- mgr.Logger.Info(writeSQL+" executing failed", "error", err.Error())
- return err
- }
- return nil
-}
-
-func (mgr *Manager) ReadCheck(ctx context.Context, db *sql.DB) error {
- _, err := mgr.GetOpTimestamp(ctx, db)
- if err != nil {
- if errors.Is(err, sql.ErrNoRows) {
- // no healthy check records, return true
- return nil
- }
- var mysqlErr *mysql.MySQLError
- if errors.As(err, &mysqlErr) && (mysqlErr.Number == 1049 || mysqlErr.Number == 1146) {
- // error 1049: database does not exists
- // error 1146: table does not exists
- // no healthy database, return true
- return nil
- }
- mgr.Logger.Info("Read check failed", "error", err)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) GetOpTimestamp(ctx context.Context, db *sql.DB) (int64, error) {
- readSQL := fmt.Sprintf(`select check_ts from kubeblocks.kb_health_check where type=%d limit 1;`, engines.CheckStatusType)
- var opTimestamp int64
- err := db.QueryRowContext(ctx, readSQL).Scan(&opTimestamp)
- return opTimestamp, err
-}
-
-func (mgr *Manager) GetGlobalState(ctx context.Context, db *sql.DB) (map[string]string, error) {
- var hostname, serverUUID, gtidExecuted, gtidPurged, isReadonly, superReadonly string
- err := db.QueryRowContext(ctx, "select @@global.hostname, @@global.server_uuid, @@global.gtid_executed, @@global.gtid_purged, @@global.read_only, @@global.super_read_only").
- Scan(&hostname, &serverUUID, >idExecuted, >idPurged, &isReadonly, &superReadonly)
- if err != nil {
- return nil, err
- }
-
- return map[string]string{
- "hostname": hostname,
- "server_uuid": serverUUID,
- "gtid_executed": gtidExecuted,
- "gtid_purged": gtidPurged,
- "read_only": isReadonly,
- "super_read_only": superReadonly,
- }, nil
-}
-
-func (mgr *Manager) GetSlaveStatus(context.Context, *sql.DB) (RowMap, error) {
- sql := "show slave status"
- var rowMap RowMap
-
- err := QueryRowsMap(mgr.DB, sql, func(rMap RowMap) error {
- rowMap = rMap
- return nil
- })
- if err != nil {
- mgr.Logger.Info("executing "+sql+" failed", "error", err.Error())
- return nil, err
- }
- return rowMap, nil
-}
-
-func (mgr *Manager) GetMasterStatus(context.Context, *sql.DB) (RowMap, error) {
- sql := "show master status"
- var rowMap RowMap
-
- err := QueryRowsMap(mgr.DB, sql, func(rMap RowMap) error {
- rowMap = rMap
- return nil
- })
- if err != nil {
- mgr.Logger.Info("executing "+sql+" failed", "error", err.Error())
- return nil, err
- }
- return rowMap, nil
-}
-
-func (mgr *Manager) Recover(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error {
- return nil
-}
-
-// func (mgr *Manager) IsClusterHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
-// leaderMember := cluster.GetLeaderMember()
-// if leaderMember == nil {
-// mgr.Logger.Info("IsClusterHealthy: has no leader.")
-// return true
-// }
-
-// return mgr.IsMemberHealthy(ctx, cluster, leaderMember)
-// }
-
-// IsClusterInitialized is a method to check if cluster is initialized or not
-func (mgr *Manager) IsClusterInitialized(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- _, err := mgr.GetVersion(ctx)
- if err != nil {
- return false, err
- }
-
- if cluster != nil {
- isValid, err := mgr.ValidateAddr(ctx, cluster)
- if err != nil || !isValid {
- return isValid, err
- }
- }
-
- // err = mgr.EnableSemiSyncIfNeed(ctx)
- // if err != nil {
- // return false, err
- // }
- return mgr.EnsureServerID(ctx)
-}
-
func (mgr *Manager) GetVersion(ctx context.Context) (string, error) {
if mgr.version != "" {
return mgr.version, nil
@@ -471,283 +98,22 @@ func (mgr *Manager) GetVersion(ctx context.Context) (string, error) {
return mgr.version, nil
}
-func (mgr *Manager) ValidateAddr(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- // The maximum length of the server addr is 255 characters. Before MySQL 8.0.17 it was 60 characters.
- currentMemberName := mgr.GetCurrentMemberName()
- member := cluster.GetMemberWithName(currentMemberName)
- addr := cluster.GetMemberShortAddr(*member)
- maxLength := 255
- if IsBeforeVersion(mgr.version, "8.0.17") {
- maxLength = 60
- }
-
- if len(addr) > maxLength {
- return false, errors.Errorf("The length of the member address must be less than or equal to %d", maxLength)
+func (mgr *Manager) ShutDownWithWait() {
+ for _, db := range connectionPoolCache {
+ _ = db.Close()
}
- return true, nil
+ connectionPoolCache = make(map[string]*sql.DB)
}
-func (mgr *Manager) EnsureServerID(ctx context.Context) (bool, error) {
- var serverID uint
- err := mgr.DB.QueryRowContext(ctx, "select @@global.server_id").Scan(&serverID)
- if err != nil {
- mgr.Logger.Info("Get global server id failed", "error", err)
- return false, err
- }
- if serverID == mgr.serverID {
- return true, nil
- }
- mgr.Logger.Info("Set global server id", "server_id", serverID)
-
- setServerID := fmt.Sprintf(`set global server_id = %d`, mgr.serverID)
- mgr.Logger.Info("Set global server id", "server-id", setServerID)
- _, err = mgr.DB.Exec(setServerID)
+func (mgr *Manager) IsReadonly(ctx context.Context) (bool, error) {
+ var readonly bool
+ err := mgr.DB.QueryRowContext(ctx, "select @@global.hostname, @@global.version, "+
+ "@@global.read_only, @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates").
+ Scan(&mgr.hostname, &mgr.version, &readonly, &mgr.binlogFormat,
+ &mgr.logbinEnabled, &mgr.logReplicationUpdatesEnabled)
if err != nil {
- mgr.Logger.Info("set server id failed", "error", err)
+ mgr.Logger.Info("Get global readonly failed", "error", err.Error())
return false, err
}
-
- return true, nil
-}
-
-func (mgr *Manager) EnableSemiSyncIfNeed(ctx context.Context) error {
- var status string
- err := mgr.DB.QueryRowContext(ctx, "SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS "+
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").Scan(&status)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil
- }
- mgr.Logger.Info("Get rpl_semi_sync_source plugin status failed", "error", err.Error())
- return err
- }
-
- // In MySQL 8.0, semi-sync configuration options should not be specified in my.cnf,
- // as this may cause the database initialization process to fail:
- // [Warning] [MY-013501] [Server] Ignoring --plugin-load[_add] list as the server is running with --initialize(-insecure).
- // [ERROR] [MY-000067] [Server] unknown variable 'rpl_semi_sync_master_enabled=1'.
- if status == "ACTIVE" {
- setSourceEnable := "SET GLOBAL rpl_semi_sync_source_enabled = 1;" +
- "SET GLOBAL rpl_semi_sync_source_timeout = 100000;"
- _, err = mgr.DB.Exec(setSourceEnable)
- if err != nil {
- mgr.Logger.Info(setSourceEnable+" execute failed", "error", err.Error())
- return err
- }
- }
-
- err = mgr.DB.QueryRowContext(ctx, "SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS "+
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").Scan(&status)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil
- }
- mgr.Logger.Info("Get rpl_semi_sync_replica plugin status failed", "error", err.Error())
- return err
- }
-
- if status == "ACTIVE" {
- setSourceEnable := "SET GLOBAL rpl_semi_sync_replica_enabled = 1;"
- _, err = mgr.DB.Exec(setSourceEnable)
- if err != nil {
- mgr.Logger.Info(setSourceEnable+" execute failed", "error", err.Error())
- return err
- }
- }
- return nil
-}
-
-func (mgr *Manager) Promote(ctx context.Context, cluster *dcs.Cluster) error {
- err := mgr.EnableSemiSyncSource(ctx)
- if err != nil {
- return err
- }
-
- err = mgr.DisableSemiSyncReplica(ctx)
- if err != nil {
- return err
- }
-
- if (mgr.globalState["super_read_only"] == "0" && mgr.globalState["read_only"] == "0") &&
- (len(mgr.slaveStatus) == 0 || (mgr.slaveStatus.GetString("Slave_IO_Running") == "No" &&
- mgr.slaveStatus.GetString("Slave_SQL_Running") == "No")) {
- return nil
- }
-
- // wait relay log to play
- secondsBehindMaster, err := mgr.GetSecondsBehindMaster(ctx)
- for err != nil || secondsBehindMaster != 0 {
- time.Sleep(time.Second)
- secondsBehindMaster, err = mgr.GetSecondsBehindMaster(ctx)
- }
-
- stopReadOnly := `set global read_only=off;set global super_read_only=off;`
- stopSlave := `stop slave;`
- resp, err := mgr.DB.Exec(stopReadOnly + stopSlave)
- if err != nil {
- mgr.Logger.Info("promote failed", "error", err.Error())
- return err
- }
-
- // fresh db state
- mgr.GetDBState(ctx, cluster)
- mgr.Logger.Info(fmt.Sprintf("promote success, resp:%v", resp))
- return nil
-}
-
-func (mgr *Manager) Demote(context.Context) error {
- setReadOnly := `set global read_only=on;set global super_read_only=on;`
-
- _, err := mgr.DB.Exec(setReadOnly)
- if err != nil {
- mgr.Logger.Info("demote failed", "error", err.Error())
- return err
- }
- return nil
-}
-
-func (mgr *Manager) Follow(ctx context.Context, cluster *dcs.Cluster) error {
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- return fmt.Errorf("cluster has no leader")
- }
-
- if mgr.CurrentMemberName == cluster.Leader.Name {
- mgr.Logger.Info("i get the leader key, don't need to follow")
- return nil
- }
-
- if !mgr.isRecoveryConfOutdated(cluster.Leader.Name) {
- return nil
- }
- err := mgr.EnableSemiSyncReplica(ctx)
- if err != nil {
- return err
- }
- err = mgr.DisableSemiSyncSource(ctx)
- if err != nil {
- return err
- }
-
- stopSlave := `stop slave;`
- // MySQL 5.7 has a limitation where the length of the master_host cannot exceed 60 characters.
- masterHost := cluster.GetMemberShortAddr(*leaderMember)
- changeMaster := fmt.Sprintf(`change master to master_host='%s',master_user='%s',master_password='%s',master_port=%s,master_auto_position=1;`,
- masterHost, config.Username, config.Password, leaderMember.DBPort)
- mgr.Logger.Info("follow new leader", "changemaster", changeMaster)
- startSlave := `start slave;`
-
- _, err = mgr.DB.Exec(stopSlave + changeMaster + startSlave)
- if err != nil {
- mgr.Logger.Info("Follow master failed", "error", err.Error())
- return err
- }
-
- err = mgr.SetSemiSyncSourceTimeout(ctx, cluster, leaderMember)
- if err != nil {
- return err
- }
-
- // fresh db state
- mgr.GetDBState(ctx, cluster)
- mgr.Logger.Info("successfully follow new leader", "leader-name", leaderMember.Name)
- return nil
-}
-
-func (mgr *Manager) isRecoveryConfOutdated(leader string) bool {
- var rowMap = mgr.slaveStatus
-
- if len(rowMap) == 0 {
- return true
- }
-
- ioRunning := rowMap.GetString("Slave_IO_Running")
- sqlRunning := rowMap.GetString("Slave_SQL_Running")
- if ioRunning == "No" || sqlRunning == "No" {
- mgr.Logger.Info("slave status error", "status", rowMap)
- return true
- }
-
- masterHost := rowMap.GetString("Master_Host")
- return !strings.HasPrefix(masterHost, leader)
-}
-
-func (mgr *Manager) GetHealthiestMember(*dcs.Cluster, string) *dcs.Member {
- return nil
-}
-
-func (mgr *Manager) HasOtherHealthyLeader(ctx context.Context, cluster *dcs.Cluster) *dcs.Member {
- isLeader, err := mgr.IsLeader(ctx, cluster)
- if err == nil && isLeader {
- // if current member is leader, just return
- return nil
- }
-
- for _, member := range cluster.Members {
- if member.Name == mgr.CurrentMemberName {
- continue
- }
-
- isLeader, err := mgr.IsLeaderMember(ctx, cluster, &member)
- if err == nil && isLeader {
- return &member
- }
- }
-
- return nil
-}
-
-// HasOtherHealthyMembers checks if there are any healthy members, excluding the leader
-func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
- members := make([]*dcs.Member, 0)
- for _, member := range cluster.Members {
- if member.Name == leader {
- continue
- }
- if !mgr.IsMemberHealthy(ctx, cluster, &member) {
- continue
- }
- members = append(members, &member)
- }
-
- return members
-}
-
-func (mgr *Manager) IsRootCreated(context.Context) (bool, error) {
- return true, nil
-}
-
-func (mgr *Manager) CreateRoot(context.Context) error {
- return nil
-}
-
-func (mgr *Manager) Lock(context.Context, string) error {
- setReadOnly := `set global read_only=on;`
-
- _, err := mgr.DB.Exec(setReadOnly)
- if err != nil {
- mgr.Logger.Info("Lock failed", "error", err.Error())
- return err
- }
- mgr.IsLocked = true
- return nil
-}
-
-func (mgr *Manager) Unlock(context.Context) error {
- setReadOnlyOff := `set global read_only=off;`
-
- _, err := mgr.DB.Exec(setReadOnlyOff)
- if err != nil {
- mgr.Logger.Info("Unlock failed", "error", err.Error())
- return err
- }
- mgr.IsLocked = false
- return nil
-}
-
-func (mgr *Manager) ShutDownWithWait() {
- for _, db := range connectionPoolCache {
- _ = db.Close()
- }
- connectionPoolCache = make(map[string]*sql.DB)
+ return readonly, nil
}
diff --git a/engines/mysql/manager_test.go b/engines/mysql/manager_test.go
index aba0a87..5ff3d33 100644
--- a/engines/mysql/manager_test.go
+++ b/engines/mysql/manager_test.go
@@ -20,107 +20,36 @@ along with this program. If not, see .
package mysql
import (
- "context"
- "database/sql"
- "fmt"
"testing"
"time"
- "github.com/DATA-DOG/go-sqlmock"
- "github.com/go-sql-driver/mysql"
- "github.com/pkg/errors"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
)
const (
fakePodName = "fake-mysql-0"
fakeClusterCompName = "test-mysql"
- fakeNamespace = "fake-namespace"
- fakeDBPort = "fake-port"
)
func TestNewManager(t *testing.T) {
defer viper.Reset()
- t.Run("new config failed", func(t *testing.T) {
- manager, err := NewManager(fakePropertiesWithPem)
-
- assert.Nil(t, manager)
- assert.NotNil(t, err)
- })
-
- viper.Set(constant.KBEnvPodName, "fake")
- viper.Set(constant.KBEnvClusterCompName, fakeClusterCompName)
- viper.Set(constant.KBEnvNamespace, fakeNamespace)
- t.Run("get server id failed", func(t *testing.T) {
- manager, err := NewManager(fakeProperties)
-
- assert.Nil(t, manager)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "the format of member name is wrong")
- })
-
- viper.Set(constant.KBEnvPodName, fakePodName)
- t.Run("get local connection failed", func(t *testing.T) {
- manager, err := NewManager(fakePropertiesWithWrongURL)
-
- assert.Nil(t, manager)
- assert.NotNil(t, err)
- })
-
- t.Run("new manager successfully", func(t *testing.T) {
- managerIFace, err := NewManager(fakeProperties)
+ viper.SetDefault(constant.KBEnvPodName, "pod-test-0")
+ viper.SetDefault(constant.KBEnvClusterCompName, "cluster-component-test")
+ viper.SetDefault(constant.KBEnvNamespace, "namespace-test")
+ t.Run("new default manager", func(t *testing.T) {
+ managerIFace, err := NewManager()
+ assert.NotNil(t, managerIFace)
assert.Nil(t, err)
manager, ok := managerIFace.(*Manager)
assert.True(t, ok)
- assert.Equal(t, fakePodName, manager.CurrentMemberName)
- assert.Equal(t, fakeNamespace, manager.Namespace)
- assert.Equal(t, fakeClusterCompName, manager.ClusterCompName)
- assert.Equal(t, uint(1), manager.serverID)
- })
-}
-
-func TestManager_IsRunning(t *testing.T) {
- manager, mock, _ := mockDatabase(t)
-
- t.Run("Too many connections", func(t *testing.T) {
- mock.ExpectPing().
- WillReturnError(&mysql.MySQLError{Number: 1040})
-
- isRunning := manager.IsRunning()
- assert.True(t, isRunning)
- })
-
- t.Run("DB is not ready", func(t *testing.T) {
- mock.ExpectPing().
- WillReturnError(fmt.Errorf("some error"))
-
- isRunning := manager.IsRunning()
- assert.False(t, isRunning)
- })
-
- t.Run("ping db overtime", func(t *testing.T) {
- mock.ExpectPing().WillDelayFor(time.Second)
-
- isRunning := manager.IsRunning()
- assert.False(t, isRunning)
- })
-
- t.Run("db is running", func(t *testing.T) {
- mock.ExpectPing()
-
- isRunning := manager.IsRunning()
- assert.True(t, isRunning)
+ assert.Equal(t, "pod-test-0", manager.CurrentMemberName)
+ assert.Equal(t, "cluster-component-test", manager.ClusterCompName)
})
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
}
func TestManager_IsDBStartupReady(t *testing.T) {
@@ -154,759 +83,3 @@ func TestManager_IsDBStartupReady(t *testing.T) {
t.Errorf("there were unfulfilled expectations: %v", err)
}
}
-
-func TestManager_IsReadonly(t *testing.T) {
- ctx := context.TODO()
- manager, _, _ := mockDatabase(t)
- cluster := &dcs.Cluster{}
-
- t.Run("Get Member conn failed", func(t *testing.T) {
- _, _ = NewConfig(fakePropertiesWithWrongURL)
-
- readonly, err := manager.IsReadonly(ctx, cluster, &dcs.Member{})
- assert.False(t, readonly)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "illegal Data Source Name (DNS) specified by")
- })
-}
-
-func TestManager_IsLeader(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("check is read only failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.False(t, isLeader)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("current member is leader", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.version", "@@global.read_only",
- "@@global.binlog_format", "@@global.log_bin", "@@global.log_slave_updates"}).
- AddRow(fakePodName, "8.0.30", false, "MIXED", "1", "1"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_IsLeaderMember(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("check is read only failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeader, err := manager.IsLeaderMember(ctx, nil, nil)
- assert.False(t, isLeader)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("current member is leader", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.version", "@@global.read_only",
- "@@global.binlog_format", "@@global.log_bin", "@@global.log_slave_updates"}).
- AddRow(fakePodName, "8.0.30", false, "MIXED", "1", "1"))
-
- isLeader, err := manager.IsLeaderMember(ctx, nil, nil)
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_GetMemberAddrs(t *testing.T) {
- ctx := context.TODO()
- manager, _, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {
- Name: fakePodName,
- DBPort: fakeDBPort,
- },
- },
- Namespace: fakeNamespace,
- }
-
- viper.Set(constant.KubernetesClusterDomainEnv, "cluster.local")
- defer viper.Reset()
- addrs := manager.GetMemberAddrs(ctx, cluster)
- assert.Len(t, addrs, 1)
- assert.Equal(t, "fake-mysql-0.fake-mysql-headless.fake-namespace.svc.cluster.local:fake-port", addrs[0])
-}
-
-func TestManager_IsMemberLagging(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{Leader: &dcs.Leader{}, HaConfig: &dcs.HaConfig{}}
-
- t.Run("No leader DBState info", func(t *testing.T) {
- isMemberLagging, lags := manager.IsMemberLagging(ctx, cluster, nil)
- assert.False(t, isMemberLagging)
- assert.Zero(t, lags)
- })
-
- cluster.Leader.DBState = &dcs.DBState{}
- t.Run("Get Member conn failed", func(t *testing.T) {
- _, _ = NewConfig(fakePropertiesWithWrongURL)
-
- isMemberLagging, lags := manager.IsMemberLagging(ctx, cluster, &dcs.Member{})
- assert.True(t, isMemberLagging)
- assert.Zero(t, lags)
- })
-
- _, _ = NewConfig(fakeProperties)
- t.Run("get op timestamp failed", func(t *testing.T) {
- mock.ExpectQuery("select check_ts").
- WillReturnError(fmt.Errorf("some error"))
-
- isMemberLagging, lags := manager.IsMemberLagging(ctx, cluster, &dcs.Member{Name: fakePodName})
- assert.True(t, isMemberLagging)
- assert.Zero(t, lags)
- })
-
- cluster.Leader.DBState.OpTimestamp = 100
- t.Run("no lags", func(t *testing.T) {
-
- mock.ExpectQuery("select check_ts").
- WillReturnRows(sqlmock.NewRows([]string{"check_ts"}).AddRow(100))
-
- isMemberLagging, lags := manager.IsMemberLagging(ctx, cluster, &dcs.Member{Name: fakePodName})
- assert.False(t, isMemberLagging)
- assert.Zero(t, lags)
- })
-
- t.Run("member is lagging", func(t *testing.T) {
- mock.ExpectQuery("select check_ts").
- WillReturnRows(sqlmock.NewRows([]string{"check_ts"}).AddRow(0))
-
- isMemberLagging, lags := manager.IsMemberLagging(ctx, cluster, &dcs.Member{Name: fakePodName})
- assert.True(t, isMemberLagging)
- assert.Equal(t, int64(100), lags)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_IsMemberHealthy(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- member := dcs.Member{Name: fakePodName}
- cluster := &dcs.Cluster{
- Leader: &dcs.Leader{Name: fakePodName},
- Members: []dcs.Member{member},
- }
-
- t.Run("Get Member conn failed", func(t *testing.T) {
- _, _ = NewConfig(fakePropertiesWithWrongURL)
-
- isHealthy := manager.IsMemberHealthy(ctx, cluster, &dcs.Member{})
- assert.False(t, isHealthy)
- })
-
- _, _ = NewConfig(fakeProperties)
- t.Run("write check failed", func(t *testing.T) {
- mock.ExpectExec("CREATE DATABASE IF NOT EXISTS kubeblocks").
- WillReturnError(fmt.Errorf("some error"))
-
- isHealthy := manager.IsCurrentMemberHealthy(ctx, cluster)
- assert.False(t, isHealthy)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_WriteCheck(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("write check failed", func(t *testing.T) {
- mock.ExpectExec("CREATE DATABASE IF NOT EXISTS kubeblocks;").
- WillReturnError(fmt.Errorf("some error"))
-
- canWrite := manager.WriteCheck(ctx, manager.DB)
- assert.NotNil(t, canWrite)
- })
-
- t.Run("write check successfully", func(t *testing.T) {
- mock.ExpectExec("CREATE DATABASE IF NOT EXISTS kubeblocks;").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- canWrite := manager.WriteCheck(ctx, manager.DB)
- assert.Nil(t, canWrite)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_ReadCheck(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("no rows in result set", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(sql.ErrNoRows)
-
- canRead := manager.ReadCheck(ctx, manager.DB)
- assert.Nil(t, canRead)
- })
-
- t.Run("no healthy database", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(&mysql.MySQLError{Number: 1049})
-
- canRead := manager.ReadCheck(ctx, manager.DB)
- assert.Nil(t, canRead)
- })
-
- t.Run("Read check failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- canRead := manager.ReadCheck(ctx, manager.DB)
- assert.NotNil(t, canRead)
- })
-
- t.Run("Read check successfully", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"check_ts"}).AddRow(1))
-
- canRead := manager.ReadCheck(ctx, manager.DB)
- assert.Nil(t, canRead)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_GetGlobalState(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("get global state successfully", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
-
- globalState, err := manager.GetGlobalState(ctx, manager.DB)
- assert.Nil(t, err)
- assert.NotNil(t, globalState)
- assert.Equal(t, fakePodName, globalState["hostname"])
- assert.Equal(t, fakeServerUUID, globalState["server_uuid"])
- assert.Equal(t, fakeGTIDString, globalState["gtid_executed"])
- assert.Equal(t, fakeGTIDSet, globalState["gtid_purged"])
- assert.Equal(t, "1", globalState["read_only"])
- assert.Equal(t, "1", globalState["super_read_only"])
- })
-
- t.Run("get global state failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- globalState, err := manager.GetGlobalState(ctx, manager.DB)
- assert.Nil(t, globalState)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_GetSlaveStatus(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("query rows map failed", func(t *testing.T) {
- mock.ExpectQuery("show slave status").
- WillReturnError(fmt.Errorf("some error"))
-
- slaveStatus, err := manager.GetSlaveStatus(ctx, manager.DB)
- assert.Nil(t, slaveStatus)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("get slave status successfully", func(t *testing.T) {
- mock.ExpectQuery("show slave status").
- WillReturnRows(sqlmock.NewRows([]string{"Seconds_Behind_Master", "Slave_IO_Running"}).AddRow("249904", "Yes"))
-
- slaveStatus, err := manager.GetSlaveStatus(ctx, manager.DB)
- assert.Nil(t, err)
- assert.Equal(t, "249904", slaveStatus.GetString("Seconds_Behind_Master"))
- assert.Equal(t, "Yes", slaveStatus.GetString("Slave_IO_Running"))
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_GetMasterStatus(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("query rows map failed", func(t *testing.T) {
- mock.ExpectQuery("show master status").
- WillReturnError(fmt.Errorf("some error"))
-
- slaveStatus, err := manager.GetMasterStatus(ctx, manager.DB)
- assert.Nil(t, slaveStatus)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("get slave status successfully", func(t *testing.T) {
- mock.ExpectQuery("show master status").
- WillReturnRows(sqlmock.NewRows([]string{"File", "Executed_Gtid_Set"}).AddRow("master-bin.000002", fakeGTIDSet))
-
- slaveStatus, err := manager.GetMasterStatus(ctx, manager.DB)
- assert.Nil(t, err)
- assert.Equal(t, "master-bin.000002", slaveStatus.GetString("File"))
- assert.Equal(t, fakeGTIDSet, slaveStatus.GetString("Executed_Gtid_Set"))
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_IsClusterInitialized(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- manager.version = "5.7.42"
- manager.serverID = 1
-
- t.Run("query server id failed", func(t *testing.T) {
- mock.ExpectQuery("select @@global.server_id").
- WillReturnError(fmt.Errorf("some error"))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.False(t, isInitialized)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("server id equal to manager's server id", func(t *testing.T) {
- mock.ExpectQuery("select @@global.server_id").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.server_id"}).AddRow(1))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.True(t, isInitialized)
- assert.Nil(t, err)
- })
-
- t.Run("set server id failed", func(t *testing.T) {
- mock.ExpectQuery("select @@global.server_id").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.server_id"}).AddRow(2))
- mock.ExpectExec("set global server_id").
- WillReturnError(fmt.Errorf("some error"))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.False(t, isInitialized)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("set server id successfully", func(t *testing.T) {
- manager.version = ""
- const version = "5.7.42"
- mock.ExpectQuery("select version()").
- WillReturnRows(sqlmock.NewRows([]string{"version"}).AddRow(version))
- mock.ExpectQuery("select @@global.server_id").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.server_id"}).AddRow(2))
- mock.ExpectExec("set global server_id").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.True(t, isInitialized)
- assert.Equal(t, manager.version, version)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Promote(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- manager.globalState = map[string]string{}
- manager.slaveStatus = RowMap{}
-
- t.Run("execute promote failed", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnError(errors.New("some error"))
- // mock.ExpectExec("set global read_only=off").
- // WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Promote(ctx, nil)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("execute promote successfully", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- mock.ExpectQuery("show slave status").
- WillReturnRows(sqlmock.NewRows([]string{"Seconds_Behind_Master"}).AddRow(0))
- mock.ExpectExec("set global read_only=off").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.Promote(ctx, nil)
- assert.Nil(t, err)
- })
-
- t.Run("current member has been promoted", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- manager.globalState["super_read_only"] = "0"
- manager.globalState["read_only"] = "0"
-
- err := manager.Promote(ctx, nil)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Demote(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("execute promote failed", func(t *testing.T) {
- mock.ExpectExec("set global read_only=on").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Demote(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("execute promote successfully", func(t *testing.T) {
- mock.ExpectExec("set global read_only=on").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.Demote(ctx)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Follow(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- _, _ = NewConfig(fakeProperties)
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {Name: fakePodName},
- {Name: "fake-pod-2"},
- {Name: "fake-pod-1"},
- },
- }
-
- t.Run("cluster has no leader", func(t *testing.T) {
- err := manager.Follow(ctx, cluster)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "cluster has no leader")
- })
-
- t.Run("i get the leader key, don't need to follow", func(t *testing.T) {
- cluster.Leader = &dcs.Leader{Name: manager.CurrentMemberName}
-
- err := manager.Follow(ctx, cluster)
- assert.Nil(t, err)
- })
-
- cluster.Leader = &dcs.Leader{Name: "fake-pod-1"}
- t.Run("recovery conf still right", func(t *testing.T) {
- manager.slaveStatus = RowMap{
- "Master_Host": CellData{
- String: "fake-pod-1",
- },
- }
-
- err := manager.Follow(ctx, cluster)
- assert.Nil(t, err)
- })
-
- manager.slaveStatus = RowMap{
- "Master_Host": CellData{
- String: "fake-pod-2",
- },
- }
- t.Run("execute follow failed", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- mock.ExpectExec("stop slave").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Follow(ctx, cluster)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("execute follow successfully", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- mock.ExpectExec("stop slave").
- WillReturnResult(sqlmock.NewResult(1, 1))
- mock.ExpectExec("SET GLOBAL rpl_semi_sync_source_timeout = 4294967295").
- WillReturnResult(sqlmock.NewResult(1, 1))
- addr := cluster.GetMemberAddrWithPort(*cluster.GetLeaderMember())
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- assert.Nil(t, err)
- mysqlConfig.User = config.Username
- mysqlConfig.Passwd = config.Password
- mysqlConfig.Addr = addr
- mysqlConfig.Timeout = time.Second * 5
- mysqlConfig.ReadTimeout = time.Second * 5
- mysqlConfig.WriteTimeout = time.Second * 5
- connectionPoolCache[mysqlConfig.FormatDSN()] = manager.DB
-
- err = manager.Follow(ctx, cluster)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_isRecoveryConfOutdated(t *testing.T) {
- manager, _, _ := mockDatabase(t)
- manager.slaveStatus = RowMap{}
-
- t.Run("slaveStatus empty", func(t *testing.T) {
- outdated := manager.isRecoveryConfOutdated(fakePodName)
- assert.True(t, outdated)
- })
-
- t.Run("slave status error", func(t *testing.T) {
- manager.slaveStatus = RowMap{
- "Last_IO_Error": CellData{String: "some error"},
- }
-
- outdated := manager.isRecoveryConfOutdated(fakePodName)
- assert.True(t, outdated)
- })
-}
-
-func TestManager_HasOtherHealthyMembers(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {
- Name: "fake-pod-0",
- },
- {
- Name: "fake-pod-1",
- },
- {
- Name: fakePodName,
- },
- },
- }
- mock.ExpectQuery("select check_ts from kubeblocks.kb_health_check where type=1 limit 1").
- WillReturnError(sql.ErrNoRows)
- _, _ = NewConfig(fakeProperties)
-
- members := manager.HasOtherHealthyMembers(ctx, cluster, "fake-pod-0")
- assert.Len(t, members, 1)
- assert.Equal(t, fakePodName, members[0].Name)
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Lock(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("lock failed", func(t *testing.T) {
- mock.ExpectExec("set global read_only=on").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Lock(ctx, "")
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- assert.False(t, manager.IsLocked)
- })
-
- t.Run("lock successfully", func(t *testing.T) {
- mock.ExpectExec("set global read_only=on").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.Lock(ctx, "")
- assert.Nil(t, err)
- assert.True(t, manager.IsLocked)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Unlock(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- manager.IsLocked = true
-
- t.Run("unlock failed", func(t *testing.T) {
- mock.ExpectExec("set global read_only=off").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Unlock(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- assert.True(t, manager.IsLocked)
- })
-
- t.Run("lock successfully", func(t *testing.T) {
- mock.ExpectExec("set global read_only=off").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.Unlock(ctx)
- assert.Nil(t, err)
- assert.False(t, manager.IsLocked)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_GetDBState(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- Leader: &dcs.Leader{},
- }
-
- t.Run("select global failed", func(t *testing.T) {
- mock.ExpectQuery("select @@global.hostname").
- WillReturnError(fmt.Errorf("some error"))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.Nil(t, dbState)
- })
-
- t.Run("show master status failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
- mock.ExpectQuery("show master status").
- WillReturnError(fmt.Errorf("some error"))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.Nil(t, dbState)
- })
-
- t.Run("show slave status failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
- mock.ExpectQuery("show master status").
- WillReturnRows(sqlmock.NewRows([]string{"Binlog_File", "Binlog_Pos"}).AddRow("master-bin.000002", 20))
- mock.ExpectQuery("show slave status").
- WillReturnError(fmt.Errorf("some error"))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.Nil(t, dbState)
- })
-
- t.Run("get op timestamp failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
- mock.ExpectQuery("show master status").
- WillReturnRows(sqlmock.NewRows([]string{"Binlog_File", "Binlog_Pos"}).AddRow("master-bin.000002", 20))
- mock.ExpectQuery("show slave status").
- WillReturnRows(sqlmock.NewRows([]string{"Master_UUID", "Slave_IO_Running"}).AddRow(fakeServerUUID, "Yes"))
- mock.ExpectQuery("select check_ts").
- WillReturnError(fmt.Errorf("some error"))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.Nil(t, dbState)
- })
-
- t.Run("current member is leader", func(t *testing.T) {
- cluster.Leader.Name = manager.CurrentMemberName
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
- mock.ExpectQuery("show master status").
- WillReturnRows(sqlmock.NewRows([]string{"File", "Pos"}).AddRow("master-bin.000002", 20))
- mock.ExpectQuery("show slave status").
- WillReturnRows(sqlmock.NewRows([]string{"Master_UUID", "Slave_IO_Running"}).AddRow(fakeServerUUID, "Yes"))
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"check_ts"}).AddRow(1))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.NotNil(t, dbState)
- assert.Equal(t, fakePodName, dbState.Extra["hostname"])
- assert.Equal(t, "master-bin.000002", dbState.Extra["Binlog_File"])
- })
-
- t.Run("current member is not leader", func(t *testing.T) {
- cluster.Leader.Name = ""
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"@@global.hostname", "@@global.server_uuid", "@@global.gtid_executed", "@@global.gtid_purged", "@@global.read_only", "@@global.super_read_only"}).
- AddRow(fakePodName, fakeServerUUID, fakeGTIDString, fakeGTIDSet, 1, 1))
- mock.ExpectQuery("show master status").
- WillReturnRows(sqlmock.NewRows([]string{"File", "Pos"}).AddRow("master-bin.000002", 20))
- mock.ExpectQuery("show slave status").
- WillReturnRows(sqlmock.NewRows([]string{"Master_UUID", "Slave_IO_Running"}).AddRow(fakeServerUUID, "Yes"))
- mock.ExpectQuery("select").
- WillReturnRows(sqlmock.NewRows([]string{"check_ts"}).AddRow(1))
-
- dbState := manager.GetDBState(ctx, cluster)
- assert.NotNil(t, dbState)
- assert.Equal(t, fakePodName, dbState.Extra["hostname"])
- assert.Equal(t, fakeServerUUID, dbState.Extra["Master_UUID"])
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/mysql/query_test.go b/engines/mysql/query_test.go
index 9090e1f..98ab09e 100644
--- a/engines/mysql/query_test.go
+++ b/engines/mysql/query_test.go
@@ -106,7 +106,6 @@ func mockDatabase(t *testing.T) (*Manager, sqlmock.Sqlmock, error) {
DBManagerBase: engines.DBManagerBase{
CurrentMemberName: fakePodName,
ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
},
}
development, _ := zap.NewDevelopment()
diff --git a/engines/mysql/semi_sync.go b/engines/mysql/semi_sync.go
deleted file mode 100644
index 6d40cb9..0000000
--- a/engines/mysql/semi_sync.go
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mysql
-
-import (
- "context"
- "fmt"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
-)
-
-var semiSyncMaxTimeout int = 4294967295
-var semiSyncSourceVersion string = "8.0.26"
-
-func (mgr *Manager) GetSemiSyncSourcePlugin() string {
- plugin := "rpl_semi_sync_source"
- if IsBeforeVersion(mgr.version, semiSyncSourceVersion) {
- plugin = "rpl_semi_sync_master"
- }
- return plugin
-}
-
-func (mgr *Manager) GetSemiSyncReplicaPlugin() string {
- plugin := "rpl_semi_sync_replica"
- if IsBeforeVersion(mgr.version, semiSyncSourceVersion) {
- plugin = "rpl_semi_sync_slave"
- }
- return plugin
-}
-
-func (mgr *Manager) EnableSemiSyncSource(ctx context.Context) error {
- plugin := mgr.GetSemiSyncSourcePlugin()
- var status string
- sql := fmt.Sprintf("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME ='%s';", plugin)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&status)
- if err != nil {
- return errors.Wrapf(err, "Get %s plugin status failed", plugin)
- }
-
- // In MySQL 8.0, semi-sync configuration options should not be specified in my.cnf,
- // as this may cause the database initialization process to fail:
- // [Warning] [MY-013501] [Server] Ignoring --plugin-load[_add] list as the server is running with --initialize(-insecure).
- // [ERROR] [MY-000067] [Server] unknown variable 'rpl_semi_sync_master_enabled=1'.
- if status != "ACTIVE" {
- return errors.Errorf("plugin %s is not active: %s", plugin, status)
- }
-
- isSemiSyncSourceEnabled, err := mgr.IsSemiSyncSourceEnabled(ctx)
- if err != nil {
- return err
- }
- if isSemiSyncSourceEnabled {
- return nil
- }
- setSourceEnable := fmt.Sprintf("SET GLOBAL %s_enabled = 1;", plugin)
- setSourceTimeout := fmt.Sprintf("SET GLOBAL %s_timeout = 0;", plugin)
- _, err = mgr.DB.Exec(setSourceEnable + setSourceTimeout)
- if err != nil {
- return errors.Wrap(err, setSourceEnable+setSourceTimeout+" execute failed")
- }
- return nil
-}
-
-func (mgr *Manager) DisableSemiSyncSource(ctx context.Context) error {
- isSemiSyncSourceEnabled, err := mgr.IsSemiSyncSourceEnabled(ctx)
- if err != nil {
- return err
- }
- if !isSemiSyncSourceEnabled {
- return nil
- }
- plugin := mgr.GetSemiSyncSourcePlugin()
- setSourceDisable := fmt.Sprintf("SET GLOBAL %s_enabled = 0;", plugin)
- _, err = mgr.DB.Exec(setSourceDisable)
- if err != nil {
- return errors.Wrap(err, setSourceDisable+" execute failed")
- }
- return nil
-}
-
-func (mgr *Manager) IsSemiSyncSourceEnabled(ctx context.Context) (bool, error) {
- plugin := mgr.GetSemiSyncSourcePlugin()
- var value int
- sql := fmt.Sprintf("select @@global.%s_enabled", plugin)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&value)
- if err != nil {
- return false, errors.Wrapf(err, "exec %s failed", sql)
- }
- return value == 1, nil
-}
-
-func (mgr *Manager) GetSemiSyncSourceTimeout(ctx context.Context) (int, error) {
- plugin := mgr.GetSemiSyncSourcePlugin()
- var value int
- sql := fmt.Sprintf("select @@global.%s_timeout", plugin)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&value)
- if err != nil {
- return 0, errors.Wrapf(err, "exec %s failed", sql)
- }
- return value, nil
-}
-
-func (mgr *Manager) SetSemiSyncSourceTimeout(ctx context.Context, cluster *dcs.Cluster, leader *dcs.Member) error {
- db, err := mgr.GetMemberConnection(cluster, leader)
- if err != nil {
- mgr.Logger.Info("Get Member conn failed", "error", err.Error())
- return err
- }
-
- plugin := mgr.GetSemiSyncSourcePlugin()
- setSourceTimeout := fmt.Sprintf("SET GLOBAL %s_timeout = %d;", plugin, semiSyncMaxTimeout)
- _, err = db.Exec(setSourceTimeout)
- if err != nil {
- return errors.Wrap(err, setSourceTimeout+" execute failed")
- }
- return nil
-}
-
-func (mgr *Manager) EnableSemiSyncReplica(ctx context.Context) error {
- plugin := mgr.GetSemiSyncReplicaPlugin()
- var status string
- sql := fmt.Sprintf("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME ='%s';", plugin)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&status)
- if err != nil {
- return errors.Wrap(err, "get "+plugin+" status failed")
- }
- if status != "ACTIVE" {
- return errors.Errorf("plugin %s is not active: %s", plugin, status)
- }
-
- isSemiSyncReplicaEnabled, err := mgr.IsSemiSyncReplicaEnabled(ctx)
- if err != nil {
- return err
- }
- if isSemiSyncReplicaEnabled {
- return nil
- }
-
- setReplicaEnable := fmt.Sprintf("SET GLOBAL %s_enabled = 1;", plugin)
- _, err = mgr.DB.Exec(setReplicaEnable)
- if err != nil {
- return errors.Wrap(err, setReplicaEnable+" execute failed")
- }
- return nil
-}
-
-func (mgr *Manager) IsSemiSyncReplicaEnabled(ctx context.Context) (bool, error) {
- plugin := mgr.GetSemiSyncReplicaPlugin()
- var value int
- sql := fmt.Sprintf("select @@global.%s_enabled", plugin)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&value)
- if err != nil {
- return false, errors.Wrapf(err, "exec %s failed", sql)
- }
- return value == 1, nil
-}
-
-func (mgr *Manager) DisableSemiSyncReplica(ctx context.Context) error {
- isSemiSyncReplicaEnabled, err := mgr.IsSemiSyncReplicaEnabled(ctx)
- if err != nil {
- return err
- }
- if !isSemiSyncReplicaEnabled {
- return nil
- }
- plugin := mgr.GetSemiSyncReplicaPlugin()
- setReplicaDisable := fmt.Sprintf("SET GLOBAL %s_enabled = 0;", plugin)
- _, err = mgr.DB.Exec(setReplicaDisable)
- if err != nil {
- return errors.Wrap(err, setReplicaDisable+" execute failed")
- }
- return nil
-}
diff --git a/engines/mysql/semi_sync_test.go b/engines/mysql/semi_sync_test.go
deleted file mode 100644
index c125343..0000000
--- a/engines/mysql/semi_sync_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-# This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mysql
-
-import (
- "context"
- "testing"
-
- "github.com/DATA-DOG/go-sqlmock"
- "github.com/stretchr/testify/assert"
-)
-
-func TestManager_SemiSync(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- _, _ = NewConfig(fakeProperties)
-
- t.Run("semi sync plugin", func(t *testing.T) {
- t.Run("version before 8.0.26", func(t *testing.T) {
- manager.version = "5.7.42"
- semiSyncSourcePlugin := manager.GetSemiSyncSourcePlugin()
- semiSyncReplicaPlugin := manager.GetSemiSyncReplicaPlugin()
- assert.Equal(t, semiSyncSourcePlugin, "rpl_semi_sync_master")
- assert.Equal(t, semiSyncReplicaPlugin, "rpl_semi_sync_slave")
- })
-
- t.Run("version after 8.0.26", func(t *testing.T) {
- manager.version = "8.0.30"
- semiSyncSourcePlugin := manager.GetSemiSyncSourcePlugin()
- semiSyncReplicaPlugin := manager.GetSemiSyncReplicaPlugin()
- assert.Equal(t, semiSyncSourcePlugin, "rpl_semi_sync_source")
- assert.Equal(t, semiSyncReplicaPlugin, "rpl_semi_sync_replica")
- })
- })
-
- t.Run("enable semi sync source", func(t *testing.T) {
- t.Run("failed if plugin not load", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}))
- err := manager.EnableSemiSyncSource(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "plugin status failed")
- })
- t.Run("failed if plugin not active", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("NotActive"))
- err := manager.EnableSemiSyncSource(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "is not active")
- })
-
- t.Run("already enabled", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- err := manager.EnableSemiSyncSource(ctx)
- assert.Nil(t, err)
- })
-
- t.Run("enable", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_source';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- mock.ExpectExec("SET GLOBAL rpl_semi_sync_source_enabled = 1;" +
- "SET GLOBAL rpl_semi_sync_source_timeout = 0;").
- WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.EnableSemiSyncSource(ctx)
- assert.Nil(t, err)
- })
- })
-
- t.Run("disable semi sync source", func(t *testing.T) {
- t.Run("already disabled", func(t *testing.T) {
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- err := manager.DisableSemiSyncSource(ctx)
- assert.Nil(t, err)
- })
-
- t.Run("disable", func(t *testing.T) {
- mock.ExpectQuery("select @@global.rpl_semi_sync_source_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectExec("SET GLOBAL rpl_semi_sync_source_enabled = 0;").
- WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.DisableSemiSyncSource(ctx)
- assert.Nil(t, err)
- })
- })
-
- t.Run("enable semi sync replica", func(t *testing.T) {
- t.Run("failed if plugin not load", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}))
- err := manager.EnableSemiSyncReplica(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "status failed")
- })
- t.Run("failed if plugin not active", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("NotActive"))
- err := manager.EnableSemiSyncReplica(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "is not active")
- })
-
- t.Run("already enabled", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- err := manager.EnableSemiSyncReplica(ctx)
- assert.Nil(t, err)
- })
-
- t.Run("enable", func(t *testing.T) {
- mock.ExpectQuery("SELECT PLUGIN_STATUS FROM INFORMATION_SCHEMA.PLUGINS " +
- "WHERE PLUGIN_NAME ='rpl_semi_sync_replica';").WillReturnRows(sqlmock.NewRows([]string{"PLUGIN_STATUS"}).AddRow("ACTIVE"))
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- mock.ExpectExec("SET GLOBAL rpl_semi_sync_replica_enabled = 1;").
- WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.EnableSemiSyncReplica(ctx)
- assert.Nil(t, err)
- })
- })
-
- t.Run("disable semi sync replica", func(t *testing.T) {
- t.Run("already disabled", func(t *testing.T) {
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(0))
- err := manager.DisableSemiSyncReplica(ctx)
- assert.Nil(t, err)
- })
-
- t.Run("disable", func(t *testing.T) {
- mock.ExpectQuery("select @@global.rpl_semi_sync_replica_enabled").WillReturnRows(sqlmock.NewRows([]string{"STATUS"}).AddRow(1))
- mock.ExpectExec("SET GLOBAL rpl_semi_sync_replica_enabled = 0;").
- WillReturnResult(sqlmock.NewResult(1, 1))
- err := manager.DisableSemiSyncReplica(ctx)
- assert.Nil(t, err)
- })
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/mysql/suite_test.go b/engines/mysql/suite_test.go
index 9b6390d..4f22c78 100644
--- a/engines/mysql/suite_test.go
+++ b/engines/mysql/suite_test.go
@@ -25,18 +25,11 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- "github.com/golang/mock/gomock"
"github.com/spf13/viper"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
-)
-
-var (
- dcsStore dcs.DCS
- mockDCSStore *dcs.MockDCS
)
func init() {
@@ -52,15 +45,5 @@ func TestMysqlDBManager(t *testing.T) {
RunSpecs(t, "MySQL DBManager. Suite")
}
-var _ = BeforeSuite(func() {
- // Init mock dcs store
- InitMockDCSStore()
+var _ = AfterSuite(func() {
})
-
-func InitMockDCSStore() {
- ctrl := gomock.NewController(GinkgoT())
- mockDCSStore = dcs.NewMockDCS(ctrl)
- mockDCSStore.EXPECT().GetClusterFromCache().Return(&dcs.Cluster{}).AnyTimes()
- dcs.SetStore(mockDCSStore)
- dcsStore = mockDCSStore
-}
diff --git a/engines/mysql/user.go b/engines/mysql/user.go
deleted file mode 100644
index f0fc6af..0000000
--- a/engines/mysql/user.go
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package mysql
-
-import (
- "context"
- "fmt"
- "strings"
-
- "golang.org/x/exp/slices"
-
- "github.com/apecloud/dbctl/engines/models"
-)
-
-const (
- superUserPriv = "SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE ON *.*"
- readWritePriv = "SELECT, INSERT, UPDATE, DELETE ON *.*"
- readOnlyRPriv = "SELECT ON *.*"
- noPriv = "USAGE ON *.*"
-
- listUserSQL = "SELECT user AS userName, CASE password_expired WHEN 'N' THEN 'F' ELSE 'T' END as expired FROM mysql.user WHERE host = '%' and user <> 'root' and user not like 'kb%';"
- showGrantSQL = "SHOW GRANTS FOR '%s'@'%%';"
- getUserSQL = `
- SELECT user AS userName, CASE password_expired WHEN 'N' THEN 'F' ELSE 'T' END as expired
- FROM mysql.user
- WHERE host = '%%' and user <> 'root' and user not like 'kb%%' and user ='%s';"
- `
- createUserSQL = "CREATE USER '%s'@'%%' IDENTIFIED BY '%s';"
- deleteUserSQL = "DROP USER IF EXISTS '%s'@'%%';"
- grantSQL = "GRANT %s TO '%s'@'%%';"
- revokeSQL = "REVOKE %s FROM '%s'@'%%';"
- listSystemAccountsSQL = "SELECT user AS userName FROM mysql.user WHERE host = '%' and user like 'kb%';"
-)
-
-func (mgr *Manager) ListUsers(ctx context.Context) ([]models.UserInfo, error) {
- users := []models.UserInfo{}
-
- err := QueryRowsMap(mgr.DB, listUserSQL, func(rMap RowMap) error {
- user := models.UserInfo{
- UserName: rMap.GetString("userName"),
- Expired: rMap.GetString("expired"),
- }
- users = append(users, user)
- return nil
- })
- if err != nil {
- mgr.Logger.Error(err, "error executing %s")
- return nil, err
- }
- return users, nil
-}
-
-func (mgr *Manager) ListSystemAccounts(ctx context.Context) ([]models.UserInfo, error) {
- users := []models.UserInfo{}
-
- err := QueryRowsMap(mgr.DB, listSystemAccountsSQL, func(rMap RowMap) error {
- user := models.UserInfo{
- UserName: rMap.GetString("userName"),
- }
- users = append(users, user)
- return nil
- })
- if err != nil {
- mgr.Logger.Error(err, "error executing %s")
- return nil, err
- }
- return users, nil
-}
-
-func (mgr *Manager) DescribeUser(ctx context.Context, userName string) (*models.UserInfo, error) {
- user := &models.UserInfo{}
- // only keep one role name of the highest privilege
- userRoles := make([]models.RoleType, 0)
-
- sql := fmt.Sprintf(showGrantSQL, userName)
-
- err := QueryRowsMap(mgr.DB, sql, func(rMap RowMap) error {
- for k, v := range rMap {
- if user.UserName == "" {
- user.UserName = strings.TrimPrefix(strings.TrimSuffix(k, "@%"), "Grants for ")
- }
- mysqlRoleType := priv2Role(strings.TrimPrefix(v.String, "GRANT "))
- userRoles = append(userRoles, mysqlRoleType)
- }
-
- return nil
- })
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return nil, err
- }
-
- slices.SortFunc(userRoles, models.SortRoleByWeight)
- if len(userRoles) > 0 {
- user.RoleName = (string)(userRoles[0])
- }
- return user, nil
-}
-
-func (mgr *Manager) CreateUser(ctx context.Context, userName, password string) error {
- sql := fmt.Sprintf(createUserSQL, userName, password)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) DeleteUser(ctx context.Context, userName string) error {
- sql := fmt.Sprintf(deleteUserSQL, userName)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) GrantUserRole(ctx context.Context, userName, roleName string) error {
- // render sql stmts
- roleDesc, _ := role2Priv(roleName)
- // update privilege
- sql := fmt.Sprintf(grantSQL, roleDesc, userName)
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) RevokeUserRole(ctx context.Context, userName, roleName string) error {
- // render sql stmts
- roleDesc, _ := role2Priv(roleName)
- // update privilege
- sql := fmt.Sprintf(revokeSQL, roleDesc, userName)
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func role2Priv(roleName string) (string, error) {
- roleType := models.String2RoleType(roleName)
- switch roleType {
- case models.SuperUserRole:
- return superUserPriv, nil
- case models.ReadWriteRole:
- return readWritePriv, nil
- case models.ReadOnlyRole:
- return readOnlyRPriv, nil
- }
- return "", fmt.Errorf("role name: %s is not supported", roleName)
-}
-
-func priv2Role(priv string) models.RoleType {
- if strings.HasPrefix(priv, readOnlyRPriv) {
- return models.ReadOnlyRole
- }
- if strings.HasPrefix(priv, readWritePriv) {
- return models.ReadWriteRole
- }
- if strings.HasPrefix(priv, superUserPriv) {
- return models.SuperUserRole
- }
- if strings.HasPrefix(priv, noPriv) {
- return models.NoPrivileges
- }
- return models.CustomizedRole
-}
diff --git a/engines/mysql/util.go b/engines/mysql/util.go
index cdb82e0..3cf41af 100644
--- a/engines/mysql/util.go
+++ b/engines/mysql/util.go
@@ -90,8 +90,6 @@ type NamedResultData struct {
Data ResultData
}
-var EmptyResultData = ResultData{}
-
func (rm *RowMap) GetString(key string) string {
return (*rm)[key].String
}
@@ -221,7 +219,9 @@ func QueryRowsMap(db *sql.DB, query string, onRow func(RowMap) error, args ...in
var rows *sql.Rows
rows, err = db.Query(query, args...)
if rows != nil {
- defer rows.Close()
+ defer func() {
+ _ = rows.Close()
+ }()
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return err
diff --git a/engines/oceanbase/commands.go b/engines/oceanbase/commands.go
deleted file mode 100644
index 0c6a59e..0000000
--- a/engines/oceanbase/commands.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- corev1 "k8s.io/api/core/v1"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/models"
-)
-
-var _ engines.ClusterCommands = &Commands{}
-
-type Commands struct {
- info engines.EngineInfo
- examples map[models.ClientType]engines.BuildConnectExample
-}
-
-func NewCommands() engines.ClusterCommands {
- return &Commands{
- info: engines.EngineInfo{
- Client: "mysql",
- },
- examples: map[models.ClientType]engines.BuildConnectExample{
- models.CLI: func(info *engines.ConnectionInfo) string {
- return fmt.Sprintf(`# oceanbase client connection example
-mysql -h %s -P $COMP_MYSQL_PORT -u %s
-`, info.Host, info.User)
- },
- },
- }
-}
-
-func (r *Commands) ConnectCommand(connectInfo *engines.AuthInfo) []string {
- userName := "root"
- userPass := ""
-
- if connectInfo != nil {
- userName = connectInfo.UserName
- userPass = connectInfo.UserPasswd
- }
-
- var obCmd []string
-
- if userPass != "" {
- obCmd = []string{fmt.Sprintf("%s -h127.0.0.1 -P $OB_SERVICE_PORT -u%s -A -p%s", r.info.Client, userName, engines.AddSingleQuote(userPass))}
- } else {
- obCmd = []string{fmt.Sprintf("%s -h127.0.0.1 -P $OB_SERVICE_PORT -u%s -A", r.info.Client, userName)}
- }
-
- return []string{"bash", "-c", strings.Join(obCmd, " ")}
-}
-
-func (r *Commands) Container() string {
- return r.info.Container
-}
-
-func (r *Commands) ConnectExample(info *engines.ConnectionInfo, client string) string {
- return engines.BuildExample(info, client, r.examples)
-}
-
-func (r *Commands) ExecuteCommand(scripts []string) ([]string, []corev1.EnvVar, error) {
- cmd := []string{}
- cmd = append(cmd, "/bin/bash", "-c", "-ex")
- if engines.EnvVarMap[engines.PASSWORD] == "" {
- cmd = append(cmd, fmt.Sprintf("%s -h127.0.0.1 -P $COMP_MYSQL_PORT -u%s -e %s", r.info.Client, engines.EnvVarMap[engines.USER], strconv.Quote(strings.Join(scripts, " "))))
- } else {
- cmd = append(cmd, fmt.Sprintf("%s -h127.0.0.1 -P $COMP_MYSQL_PORT -u%s -p%s -e %s", r.info.Client,
- fmt.Sprintf("$%s", engines.EnvVarMap[engines.USER]),
- fmt.Sprintf("$%s", engines.EnvVarMap[engines.PASSWORD]),
- strconv.Quote(strings.Join(scripts, " "))))
- }
-
- return cmd, nil, nil
-}
diff --git a/engines/oceanbase/commands_test.go b/engines/oceanbase/commands_test.go
deleted file mode 100644
index e11d6eb..0000000
--- a/engines/oceanbase/commands_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "fmt"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- "github.com/apecloud/dbctl/engines"
-)
-
-var _ = Describe("Oceanbase Engine", func() {
- It("connection command", func() {
- oceanbase := NewCommands()
-
- Expect(oceanbase.ConnectCommand(nil)).ShouldNot(BeNil())
- authInfo := &engines.AuthInfo{
- UserName: "user-test",
- UserPasswd: "pwd-test",
- }
- Expect(oceanbase.ConnectCommand(authInfo)).ShouldNot(BeNil())
- })
-
- It("connection example", func() {
- oceanbase := NewCommands().(*Commands)
-
- info := &engines.ConnectionInfo{
- User: "user",
- Host: "host",
- Password: "*****",
- Port: "1234",
- }
- for k := range oceanbase.examples {
- fmt.Printf("%s Connection Example\n", k.String())
- Expect(oceanbase.ConnectExample(info, k.String())).ShouldNot(BeZero())
- }
-
- Expect(oceanbase.ConnectExample(info, "")).ShouldNot(BeZero())
- })
-
- It("execute command", func() {
- oceanbase := NewCommands()
-
- cmd, _, err := oceanbase.ExecuteCommand(nil)
- Expect(err).Should(BeNil())
- Expect(cmd).ShouldNot(BeNil())
- engines.EnvVarMap[engines.PASSWORD] = ""
- cmd, _, err = oceanbase.ExecuteCommand(nil)
- Expect(err).Should(BeNil())
- Expect(cmd).ShouldNot(BeNil())
- })
-})
diff --git a/engines/oceanbase/config.go b/engines/oceanbase/config.go
deleted file mode 100644
index c348845..0000000
--- a/engines/oceanbase/config.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "database/sql"
- "strings"
- "time"
-
- "github.com/go-sql-driver/mysql"
- "github.com/pkg/errors"
- "github.com/spf13/viper"
-
- "github.com/apecloud/dbctl/dcs"
- mysqlengine "github.com/apecloud/dbctl/engines/mysql"
-)
-
-type Config struct {
- *mysqlengine.Config
-}
-
-const obServicePortEnv = "OB_SERVICE_PORT"
-
-var config *Config
-
-func NewConfig(properties map[string]string) (*Config, error) {
- mysqlConfig, err := mysqlengine.NewConfig(properties)
- if err != nil {
- return nil, err
- }
- if mysqlConfig.Username == "" {
- mysqlConfig.Username = "root"
- }
- if viper.IsSet(obServicePortEnv) {
- mysqlConfig.Port = viper.GetString(obServicePortEnv)
- } else {
- mysqlConfig.Port = "2881"
- }
-
- mysqlConfig.Password = getRootPassword("")
- config = &Config{
- Config: mysqlConfig,
- }
- return config, nil
-}
-
-func getRootPassword(compName string) string {
- rootPasswordEnv := "OB_ROOT_PASSWD"
- // to support different root password for different components
- rootPasswordEnvPerCmp := ""
- if compName == "" {
- compName = viper.GetString("KB_COMP_NAME")
- }
-
- if compName != "" {
- compName = strings.ToUpper(compName)
- compName = strings.ReplaceAll(compName, "-", "_")
- rootPasswordEnvPerCmp = rootPasswordEnv + "_" + compName
- }
-
- if viper.IsSet(rootPasswordEnvPerCmp) {
- rootPasswordEnv = rootPasswordEnvPerCmp
- }
- return viper.GetString(rootPasswordEnv)
-}
-
-func (config *Config) GetMemberRootDBConn(cluster *dcs.Cluster, member *dcs.Member) (*sql.DB, error) {
- addr := cluster.GetMemberAddrWithPort(*member)
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- if err != nil {
- return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", config.URL)
- }
- mysqlConfig.User = config.Username
- mysqlConfig.Passwd = getRootPassword(member.ComponentName)
- mysqlConfig.Addr = addr
- mysqlConfig.Timeout = time.Second * 5
- mysqlConfig.ReadTimeout = time.Second * 5
- mysqlConfig.WriteTimeout = time.Second * 5
- db, err := mysqlengine.GetDBConnection(mysqlConfig.FormatDSN())
- if err != nil {
- return nil, errors.Wrap(err, "get DB connection failed")
- }
-
- return db, nil
-}
diff --git a/engines/oceanbase/config_test.go b/engines/oceanbase/config_test.go
deleted file mode 100644
index 121449a..0000000
--- a/engines/oceanbase/config_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/engines"
-)
-
-var (
- fakeProperties = engines.Properties{
- "url": "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true",
- "maxOpenConns": "5",
- }
- fakePropertiesWithWrongPem = engines.Properties{
- "pemPath": "fake-path",
- }
-)
-
-func TestNewConfig(t *testing.T) {
- t.Run("new config failed", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithWrongPem)
-
- assert.Nil(t, fakeConfig)
- assert.NotNil(t, err)
- })
-
- t.Run("new config successfully", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakeProperties)
-
- assert.NotNil(t, fakeConfig)
- assert.Nil(t, err)
- })
-}
diff --git a/engines/oceanbase/conn.go b/engines/oceanbase/conn.go
deleted file mode 100644
index 57e23fd..0000000
--- a/engines/oceanbase/conn.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/go-sql-driver/mysql"
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
- mysqlengine "github.com/apecloud/dbctl/engines/mysql"
-)
-
-// GetDBConnWithMember retrieves a database connection for a specific member of a cluster.
-func (mgr *Manager) GetDBConnWithMember(cluster *dcs.Cluster, member *dcs.Member) (db *sql.DB, err error) {
- if member != nil && member.Name != mgr.CurrentMemberName {
- addr := cluster.GetMemberAddrWithPort(*member)
- db, err = config.GetDBConnWithAddr(addr)
- if err != nil {
- return nil, errors.Wrap(err, "new db connection failed")
- }
- } else {
- db = mgr.DB
- }
- return db, nil
-}
-
-func (mgr *Manager) GetMySQLDBConn() (*sql.DB, error) {
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- if err != nil {
- return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", config.URL)
- }
- mysqlConfig.User = fmt.Sprintf("%s@%s", "root", mgr.ReplicaTenant)
- mysqlConfig.Passwd = ""
- db, err := mysqlengine.GetDBConnection(mysqlConfig.FormatDSN())
- if err != nil {
- return nil, errors.Wrap(err, "get DB connection failed")
- }
-
- return db, nil
-}
-
-func (mgr *Manager) GetMySQLDBConnWithAddr(addr string) (*sql.DB, error) {
- mysqlConfig, err := mysql.ParseDSN(config.URL)
- if err != nil {
- return nil, errors.Wrapf(err, "illegal Data Source Name (DNS) specified by %s", config.URL)
- }
- mysqlConfig.User = fmt.Sprintf("%s@%s", "root", mgr.ReplicaTenant)
- // mysqlConfig.Passwd = config.Password
- mysqlConfig.Passwd = ""
- mysqlConfig.Addr = addr
- db, err := mysqlengine.GetDBConnection(mysqlConfig.FormatDSN())
- if err != nil {
- return nil, errors.Wrap(err, "get DB connection failed")
- }
-
- return db, nil
-}
diff --git a/engines/oceanbase/get_replica_role.go b/engines/oceanbase/get_replica_role.go
deleted file mode 100644
index 16f9c61..0000000
--- a/engines/oceanbase/get_replica_role.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "context"
- "fmt"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
-)
-
-func (mgr *Manager) GetReplicaRole(ctx context.Context) (string, error) {
- return mgr.GetReplicaRoleForMember(ctx, nil, nil)
-}
-
-func (mgr *Manager) GetReplicaRoleForMember(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (string, error) {
- if mgr.ReplicaTenant == "" {
- mgr.Logger.V(1).Info("the cluster has no replica tenant set")
- return "", nil
- }
-
- var zoneCount int
- zoneSQL := `select count(distinct(zone)) as count from oceanbase.__all_zone where zone!=''`
- err := mgr.DB.QueryRowContext(ctx, zoneSQL).Scan(&zoneCount)
- if err != nil {
- mgr.Logger.Info("query zone info failed", "error", err)
- return "", err
- }
-
- if zoneCount > 1 {
- mgr.Logger.Info("zone count is more than 1, return no role", "zone count", zoneCount)
- return "", nil
- }
-
- sql := fmt.Sprintf("SELECT TENANT_ROLE FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='%s'", mgr.ReplicaTenant)
-
- db := mgr.DB
- if member != nil && member.Name != mgr.CurrentMemberName && cluster != nil {
- db, err = config.GetMemberRootDBConn(cluster, member)
- if err != nil {
- return "", errors.Wrap(err, "new db connection failed")
- }
- }
-
- rows, err := db.QueryContext(ctx, sql)
- if err != nil {
- mgr.Logger.Info("error executing", "sql", sql, "error", err.Error())
- return "", errors.Wrapf(err, "error executing %s", sql)
- }
-
- defer func() {
- _ = rows.Close()
- _ = rows.Err()
- }()
-
- var role string
- var isReady bool
- for rows.Next() {
- if err = rows.Scan(&role); err != nil {
- mgr.Logger.Info("Role query failed", "error", err.Error())
- return role, err
- }
- isReady = true
- }
- if isReady {
- return role, nil
- }
- mgr.Logger.Info("no data returned", "sql", sql)
- return "", nil
-}
diff --git a/engines/oceanbase/get_replica_role_test.go b/engines/oceanbase/get_replica_role_test.go
deleted file mode 100644
index ca16a34..0000000
--- a/engines/oceanbase/get_replica_role_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/DATA-DOG/go-sqlmock"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/mysql"
-)
-
-const (
- fakePodName = "test-ob-0"
- fakeClusterCompName = "test-ob"
- fakeNamespace = "fake-namespace"
-)
-
-func mockDatabase(t *testing.T) (*Manager, sqlmock.Sqlmock, error) {
- manager := &Manager{
- Manager: mysql.Manager{
- DBManagerBase: engines.DBManagerBase{
- CurrentMemberName: fakePodName,
- ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
- Logger: ctrl.Log.WithName("ob-TEST"),
- },
- },
- }
-
- manager.ReplicaTenant = viper.GetString("TENANT_NAME")
- db, mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
- if err != nil {
- t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
- }
- manager.DB = db
-
- return manager, mock, err
-}
-
-func TestGetRole(t *testing.T) {
- ctx := context.TODO()
- viper.SetDefault("TENANT_NAME", "alice")
- manager, mock, _ := mockDatabase(t)
-
- t.Run("error executing sql", func(t *testing.T) {
- mock.ExpectQuery(`select count\(distinct\(zone\)\) as count from oceanbase.__all_zone where zone!=''`).
- WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(1))
- mock.ExpectQuery("SELECT TENANT_ROLE FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='alice'").
- WillReturnError(fmt.Errorf("some error"))
-
- role, err := manager.GetReplicaRole(ctx)
- assert.Equal(t, "", role)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("scan rows failed", func(t *testing.T) {
- mock.ExpectQuery(`select count\(distinct\(zone\)\) as count from oceanbase.__all_zone where zone!=''`).
- WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(1))
- mock.ExpectQuery("SELECT TENANT_ROLE FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='alice'").
- WillReturnRows(sqlmock.NewRows([]string{"TENANT_ROLE"}).AddRow(PRIMARY))
-
- role, err := manager.GetReplicaRole(ctx)
- assert.Equal(t, PRIMARY, role)
- assert.Nil(t, err)
- })
-
- t.Run("no data returned", func(t *testing.T) {
- mock.ExpectQuery(`select count\(distinct\(zone\)\) as count from oceanbase.__all_zone where zone!=''`).
- WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(1))
- mock.ExpectQuery("SELECT TENANT_ROLE FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='alice'").
- WillReturnRows(sqlmock.NewRows([]string{"ROLE"}))
-
- role, err := manager.GetReplicaRole(ctx)
- assert.Equal(t, "", role)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/oceanbase/manager.go b/engines/oceanbase/manager.go
deleted file mode 100644
index d8c564a..0000000
--- a/engines/oceanbase/manager.go
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "context"
- "database/sql"
- "fmt"
- "os"
- "strconv"
- "strings"
- "time"
-
- "github.com/pkg/errors"
- "github.com/spf13/viper"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/mysql"
- "github.com/apecloud/dbctl/util"
-)
-
-const (
- Role = "ROLE"
- CurrentLeader = "CURRENT_LEADER"
- PRIMARY = "PRIMARY"
- STANDBY = "STANDBY"
-
- repUser = "rep_user"
- repPassword = "rep_user"
- normalStatus = "NORMAL"
- MYSQL = "MYSQL"
- ORACLE = "ORACLE"
-)
-
-type Manager struct {
- mysql.Manager
- ReplicaTenant string
- CompatibilityMode string
- Members []dcs.Member
- MaxLag int64
-}
-
-var _ engines.DBManager = &Manager{}
-
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
- logger := ctrl.Log.WithName("Oceanbase")
- config, err := NewConfig(properties)
- if err != nil {
- return nil, err
- }
-
- managerBase, err := engines.NewDBManagerBase(logger)
- if err != nil {
- return nil, err
- }
-
- db, err := config.GetLocalDBConn()
- if err != nil {
- return nil, errors.Wrap(err, "connect to Oceanbase failed")
- }
-
- mgr := &Manager{
- Manager: mysql.Manager{
- DBManagerBase: *managerBase,
- DB: db,
- },
- }
- mgr.ReplicaTenant = viper.GetString("TENANT_NAME")
- if mgr.ReplicaTenant == "" {
- return nil, errors.New("replica tenant is not set")
- }
- return mgr, nil
-}
-
-func (mgr *Manager) IsClusterInitialized(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- time.Sleep(120 * time.Second)
- return true, nil
-}
-
-func (mgr *Manager) InitializeCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) IsLeader(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- return mgr.IsLeaderMember(ctx, cluster, nil)
-}
-
-func (mgr *Manager) IsLeaderMember(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, error) {
- role, err := mgr.GetReplicaRoleForMember(ctx, cluster, member)
-
- if err != nil {
- return false, err
- }
-
- if strings.EqualFold(role, PRIMARY) {
- return true, nil
- }
-
- return false, nil
-}
-
-func (mgr *Manager) HasOtherHealthyLeader(ctx context.Context, cluster *dcs.Cluster) *dcs.Member {
- isLeader, err := mgr.IsLeader(ctx, cluster)
- if err == nil && isLeader {
- // if current member is leader, just return
- return nil
- }
-
- for _, member := range cluster.Members {
- if member.Name == mgr.CurrentMemberName {
- continue
- }
-
- isLeader, err := mgr.IsLeaderMember(ctx, cluster, &member)
- if err == nil && isLeader {
- return &member
- }
- }
-
- return nil
-}
-
-func (mgr *Manager) GetCompatibilityMode(ctx context.Context) (string, error) {
- if mgr.CompatibilityMode != "" {
- return mgr.CompatibilityMode, nil
- }
- sql := fmt.Sprintf("SELECT COMPATIBILITY_MODE FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='%s'", mgr.ReplicaTenant)
- err := mgr.DB.QueryRowContext(ctx, sql).Scan(&mgr.CompatibilityMode)
- if err != nil {
- return "", errors.Wrap(err, "query compatibility mode failed")
- }
- return mgr.CompatibilityMode, nil
-}
-
-func (mgr *Manager) MemberHealthyCheck(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) error {
- compatibilityMode, err := mgr.GetCompatibilityMode(ctx)
- if err != nil {
- return errors.Wrap(err, "compatibility mode unknown")
- }
- switch compatibilityMode {
- case MYSQL:
- return mgr.HealthyCheckForMySQLMode(ctx, cluster, member)
- case ORACLE:
- return mgr.HealthyCheckForOracleMode(ctx, cluster, member)
- default:
- return errors.Errorf("compatibility mode not supported: [%s]", compatibilityMode)
- }
-}
-
-func (mgr *Manager) IsCurrentMemberHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
- err := mgr.CurrentMemberHealthyCheck(ctx, cluster)
- if err != nil {
- mgr.Logger.Info("current member is unhealthy", "error", err.Error())
- return false
- }
- return true
-}
-
-func (mgr *Manager) CurrentMemberHealthyCheck(ctx context.Context, cluster *dcs.Cluster) error {
- member := cluster.GetMemberWithName(mgr.CurrentMemberName)
- return mgr.MemberHealthyCheck(ctx, cluster, member)
-}
-
-func (mgr *Manager) LeaderHealthyCheck(ctx context.Context, cluster *dcs.Cluster) error {
- members := cluster.Members
- for _, member := range members {
- if isLeader, _ := mgr.IsLeaderMember(ctx, cluster, &member); isLeader {
- return mgr.MemberHealthyCheck(ctx, cluster, &member)
- }
- }
-
- return errors.New("no leader found")
-}
-
-func (mgr *Manager) HealthyCheckForMySQLMode(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) error {
- isLeader, err := mgr.IsLeaderMember(ctx, cluster, member)
- if err != nil {
- return err
- }
- addr := cluster.GetMemberAddrWithPort(*member)
- db, err := mgr.GetMySQLDBConnWithAddr(addr)
- if err != nil {
- return err
- }
- if isLeader {
- err = mgr.WriteCheck(ctx, db)
- if err != nil {
- return err
- }
- }
- err = mgr.ReadCheck(ctx, db)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) WriteCheck(ctx context.Context, db *sql.DB) error {
- writeSQL := fmt.Sprintf(`BEGIN;
-CREATE DATABASE IF NOT EXISTS kubeblocks;
-CREATE TABLE IF NOT EXISTS kubeblocks.kb_health_check(type INT, check_ts BIGINT, PRIMARY KEY(type));
-INSERT INTO kubeblocks.kb_health_check VALUES(%d, UNIX_TIMESTAMP()) ON DUPLICATE KEY UPDATE check_ts = UNIX_TIMESTAMP();
-COMMIT;`, engines.CheckStatusType)
- opTimestamp, _ := mgr.GetOpTimestamp(ctx, db)
- if opTimestamp != 0 {
- // if op timestamp is not 0, it means the table is ready created
- writeSQL = fmt.Sprintf(`
- INSERT INTO kubeblocks.kb_health_check VALUES(%d, UNIX_TIMESTAMP()) ON DUPLICATE KEY UPDATE check_ts = UNIX_TIMESTAMP();
- `, engines.CheckStatusType)
- }
- _, err := db.ExecContext(ctx, writeSQL)
- if err != nil {
- return errors.Wrap(err, "Write check failed")
- }
- return nil
-}
-
-func (mgr *Manager) HealthyCheckForOracleMode(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) error {
- // there is no golang driver for oceanbase oracle mode, so use mysql client to check
- isLeader, err := mgr.IsLeaderMember(ctx, cluster, member)
- if err != nil {
- return err
- }
- mgr.Logger.Info("check member", "isLeader", isLeader)
- // lorry has no mysql client
- // if isLeader {
- // cmd := []string{"mysql", "-h", member.PodIP, "-P", member.DBPort, "-u", "SYS@" + mgr.ReplicaTenant, "-e", "SELECT t.table_name tablename FROM user_tables t WHERE table_name = 'KB_HEALTH_CHECK'"}
- // output, err := util.ExecCommand(ctx, cmd, os.Environ())
- // if err != nil {
- // return errors.Wrap(err, "check table failed")
- // }
- // if !strings.Contains(output, "KB_HEALTH_CHECK") {
- // sql := "create table kb_health_check (type int primary key, check_ts NUMBER);"
- // sql += fmt.Sprintf("INSERT INTO kb_health_check (type, check_ts) VALUES (1, %d);", time.Now().Unix())
- // sql += "commit;"
- // cmd = []string{"mysql", "-h", member.PodIP, "-P", member.DBPort, "-u", "SYS@" + mgr.ReplicaTenant, "-e", sql}
- // _, err = util.ExecCommand(ctx, cmd, os.Environ())
- // if err != nil {
- // return errors.Wrap(err, "create table failed")
- // }
- // }
- // sql := fmt.Sprintf("UPDATE kb_health_check SET check_ts = %d WHERE type=1;", time.Now().Unix())
- // sql += "commit;"
- // cmd = []string{"mysql", "-h", member.PodIP, "-P", member.DBPort, "-u", "SYS@" + mgr.ReplicaTenant, "-e", sql}
- // _, err = util.ExecCommand(ctx, cmd, os.Environ())
- // if err != nil {
- // return errors.Wrap(err, "create table failed")
- // }
- // }
-
- // sql := "SELECT check_ts from kb_health_check WHERE type=1;"
- // cmd := []string{"mysql", "-h", member.PodIP, "-P", member.DBPort, "-u", "SYS@" + mgr.ReplicaTenant, "-e", sql}
- // _, err = util.ExecCommand(ctx, cmd, os.Environ())
- // if err != nil {
- // return errors.Wrap(err, "create table failed")
- // }
- return nil
-}
-
-func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
- err := mgr.MemberHealthyCheck(ctx, cluster, member)
- if err != nil {
- mgr.Logger.Info("member is unhealthy", "error", err.Error())
- return false
- }
- return true
-}
-
-func (mgr *Manager) IsMemberLagging(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, int64) {
- var leaderOpTimestamp int64
- if cluster.Leader == nil || cluster.Leader.DBState == nil {
- mgr.Logger.Info("leader's db state is nil, maybe leader is not ready yet")
- return false, 0
- }
- leaderOpTimestamp = cluster.Leader.DBState.OpTimestamp
- if leaderOpTimestamp == 0 {
- mgr.Logger.Info("leader's op timestamp is 0")
- return true, 0
- }
-
- opTimestamp, err := mgr.GetMemberOpTimestamp(ctx, cluster, member)
- if err != nil {
- mgr.Logger.Info("get op timestamp failed", "error", err.Error())
- return true, 0
- }
- lag := leaderOpTimestamp - opTimestamp
- if lag > mgr.MaxLag {
- mgr.Logger.Info("member is lagging", "opTimestamp", opTimestamp, "leaderOpTimestamp", leaderOpTimestamp)
- return true, lag
- }
- return false, lag
-}
-
-func (mgr *Manager) GetDBState(ctx context.Context, cluster *dcs.Cluster) *dcs.DBState {
- mgr.DBState = nil
- member := cluster.GetMemberWithName(mgr.CurrentMemberName)
- opTimestamp, err := mgr.GetMemberOpTimestamp(ctx, cluster, member)
- if err != nil {
- mgr.Logger.Info("get op timestamp failed", "error", err)
- return nil
- }
- mgr.DBState = &dcs.DBState{
- OpTimestamp: opTimestamp,
- }
- return mgr.DBState
-}
-
-func (mgr *Manager) GetMemberOpTimestamp(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (int64, error) {
- compatibilityMode, err := mgr.GetCompatibilityMode(ctx)
- if err != nil {
- return 0, errors.Wrap(err, "compatibility mode unknown")
- }
- if compatibilityMode == ORACLE {
- sql := "SELECT check_ts from kb_health_check WHERE type=1;"
- cmd := []string{"mysql", "-h", member.PodIP, "-P", member.DBPort, "-u", "SYS@" + mgr.ReplicaTenant, "-e", sql}
- output, err := util.ExecCommand(ctx, cmd, os.Environ())
- if err != nil {
- return 0, errors.Wrap(err, "get timestamp failed")
- }
- stimeStamp := strings.Split(output, "\n")
- if len(stimeStamp) < 2 {
- return 0, nil
- }
- return strconv.ParseInt(stimeStamp[1], 10, 64)
- }
- addr := cluster.GetMemberAddrWithPort(*member)
- db, err := mgr.GetMySQLDBConnWithAddr(addr)
- if err != nil {
- mgr.Logger.Info("get db connection failed", "error", err.Error())
- return 0, err
- }
- return mgr.GetOpTimestamp(ctx, db)
-}
-
-func (mgr *Manager) Promote(ctx context.Context, cluster *dcs.Cluster) error {
- db := mgr.DB
- isLeader, err := mgr.IsLeader(ctx, nil)
- if err != nil {
- return errors.Wrap(err, "leader check failed")
- }
- if isLeader {
- return nil
- }
- // if there is no switchover, it's a failover: old leader is down, we need to promote a new leader, and the old leader can't be used anymore.
- primaryTenant := "ALTER SYSTEM ACTIVATE STANDBY TENANT = " + mgr.ReplicaTenant
- if cluster.Switchover != nil {
- // it's a manual switchover
- mgr.Logger.Info("manual switchover")
- primaryTenant = "ALTER SYSTEM SWITCHOVER TO PRIMARY TENANT = " + mgr.ReplicaTenant
- } else {
- mgr.Logger.Info("unexpected switchover, promote to primary directly")
- }
-
- _, err = db.Exec(primaryTenant)
- if err != nil {
- mgr.Logger.Info("activate standby tenant failed", "error", err)
- return err
- }
-
- var tenantRole, roleStatus string
- queryTenant := fmt.Sprintf("SELECT TENANT_ROLE, SWITCHOVER_STATUS FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='%s'", mgr.ReplicaTenant)
- for {
- err := db.QueryRowContext(ctx, queryTenant).Scan(&tenantRole, &roleStatus)
- if err != nil {
- return errors.Wrap(err, "query tenant role failed")
- }
-
- if tenantRole == PRIMARY && roleStatus == normalStatus {
- break
- }
- time.Sleep(time.Second)
- }
-
- return nil
-}
-
-func (mgr *Manager) Demote(ctx context.Context) error {
- db := mgr.DB
- standbyTenant := "ALTER SYSTEM SWITCHOVER TO STANDBY TENANT = " + mgr.ReplicaTenant
- _, err := db.Exec(standbyTenant)
- if err != nil {
- return errors.Wrap(err, "standby primary tenant failed")
- }
-
- var tenantRole, roleStatus string
- queryTenant := fmt.Sprintf("SELECT TENANT_ROLE, SWITCHOVER_STATUS FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='%s'", mgr.ReplicaTenant)
- for {
- err := db.QueryRowContext(ctx, queryTenant).Scan(&tenantRole, &roleStatus)
- if err != nil {
- return errors.Wrap(err, "query tenant role failed")
- }
-
- if tenantRole == STANDBY && roleStatus == normalStatus {
- break
- }
- time.Sleep(time.Second)
- }
-
- return nil
-}
-
-func (mgr *Manager) Follow(ctx context.Context, cluster *dcs.Cluster) error {
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- return errors.New("no leader found")
- }
- sourceAddr := leaderMember.PodIP + ":" + leaderMember.DBPort
- db := mgr.DB
-
- sql := fmt.Sprintf("ALTER SYSTEM SET LOG_RESTORE_SOURCE = 'SERVICE=%s USER=%s@%s PASSWORD=%s' TENANT = %s",
- sourceAddr, repUser, mgr.ReplicaTenant, repPassword, mgr.ReplicaTenant)
- _, err := db.Exec(sql)
- if err != nil {
- mgr.Logger.Info(sql+" failed", "error", err) //nolint:goconst
- return err
- }
-
- time.Sleep(time.Second)
- var scn int64
- queryTenant := fmt.Sprintf("SELECT RECOVERY_UNTIL_SCN FROM oceanbase.DBA_OB_TENANTS where TENANT_NAME='%s'", mgr.ReplicaTenant)
- for {
- err := db.QueryRowContext(ctx, queryTenant).Scan(&scn)
- if err != nil {
- mgr.Logger.Info("query zone info failed", "error", err)
- return err
- }
-
- if scn == 4611686018427387903 {
- break
- }
- time.Sleep(time.Second)
- }
- return nil
-}
diff --git a/engines/oceanbase/suite_test.go b/engines/oceanbase/suite_test.go
deleted file mode 100644
index 98fd4e0..0000000
--- a/engines/oceanbase/suite_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package oceanbase
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-)
-
-func TestEngine(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "oceanbase Suite")
-}
diff --git a/engines/polardbx/config.go b/engines/polardbx/config.go
deleted file mode 100644
index d13b1a5..0000000
--- a/engines/polardbx/config.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package polardbx
-
-import (
- "github.com/apecloud/dbctl/engines/mysql"
-)
-
-type Config struct {
- *mysql.Config
-}
-
-var config *Config
-
-func NewConfig(properties map[string]string) (*Config, error) {
- mysqlConfig, err := mysql.NewConfig(properties)
- if err != nil {
- return nil, err
- }
- config = &Config{
- Config: mysqlConfig,
- }
- return config, nil
-}
diff --git a/engines/polardbx/manager.go b/engines/polardbx/manager.go
index 77341a2..123c14a 100644
--- a/engines/polardbx/manager.go
+++ b/engines/polardbx/manager.go
@@ -26,19 +26,15 @@ import (
"github.com/apecloud/dbctl/engines/mysql"
)
-const (
- PolardbXServiceType = "polardbx"
-)
-
type Manager struct {
mysql.Manager
}
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("PolarDBX")
- mysqlMgr, err := mysql.NewManager(properties)
+ mysqlMgr, err := mysql.NewManager()
if err != nil {
return nil, err
}
diff --git a/engines/postgres/apecloudpostgres/manager.go b/engines/postgres/apecloudpostgres/manager.go
index 580cff2..e400eec 100644
--- a/engines/postgres/apecloudpostgres/manager.go
+++ b/engines/postgres/apecloudpostgres/manager.go
@@ -21,34 +21,27 @@ package apecloudpostgres
import (
"context"
- "fmt"
"strings"
- "time"
"github.com/pkg/errors"
"github.com/spf13/cast"
- "github.com/spf13/viper"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/models"
"github.com/apecloud/dbctl/engines/postgres"
)
type Manager struct {
postgres.Manager
- memberAddrs []string
- healthStatus *postgres.ConsensusMemberHealthStatus
}
var _ engines.DBManager = &Manager{}
var Mgr *Manager
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
Mgr = &Manager{}
- baseManager, err := postgres.NewManager(properties)
+ baseManager, err := postgres.NewManager()
if err != nil {
return nil, errors.Errorf("new base manager failed, err: %v", err)
}
@@ -57,61 +50,6 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
return Mgr, nil
}
-func (mgr *Manager) IsLeader(ctx context.Context, _ *dcs.Cluster) (bool, error) {
- isSet, isLeader := mgr.GetIsLeader()
- if isSet {
- return isLeader, nil
- }
-
- return mgr.IsLeaderWithHost(ctx, "")
-}
-
-func (mgr *Manager) IsLeaderWithHost(ctx context.Context, host string) (bool, error) {
- role, err := mgr.GetMemberRoleWithHost(ctx, host)
- if err != nil {
- return false, errors.Errorf("check is leader with host:%s failed, err:%v", host, err)
- }
-
- return role == strings.ToLower(models.LEADER), nil
-}
-
-func (mgr *Manager) IsDBStartupReady() bool {
- ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
- defer cancel()
- if mgr.DBStartupReady {
- return true
- }
-
- if !mgr.IsPgReady(ctx) {
- return false
- }
-
- if !mgr.isConsensusReadyUp(ctx) {
- return false
- }
-
- mgr.DBStartupReady = true
- mgr.Logger.Info("DB startup ready")
- return true
-}
-
-func (mgr *Manager) isConsensusReadyUp(ctx context.Context) bool {
- sql := `SELECT extname FROM pg_extension WHERE extname = 'consensus_monitor';`
- resp, err := mgr.Query(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("query sql:%s failed", sql))
- return false
- }
-
- resMap, err := postgres.ParseQuery(string(resp))
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("parse query response:%s failed", string(resp)))
- return false
- }
-
- return resMap[0]["extname"] != nil
-}
-
func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (string, error) {
sql := `select role from consensus_member_status;`
@@ -127,86 +65,3 @@ func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (str
return strings.ToLower(cast.ToString(resMap[0]["role"])), nil
}
-
-// IsMemberHealthy firstly get the leader's connection pool,
-// because only leader can get the cluster healthy view
-func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
- healthStatus, err := mgr.getMemberHealthStatus(ctx, cluster, member)
- if errors.Is(err, postgres.ClusterHasNoLeader) {
- mgr.Logger.Info("cluster has no leader, will compete the leader lock")
- return true
- } else if err != nil {
- mgr.Logger.Error(err, "check member healthy failed")
- return false
- }
-
- return healthStatus.Connected
-}
-
-func (mgr *Manager) getMemberHealthStatus(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (*postgres.ConsensusMemberHealthStatus, error) {
- if mgr.DBState != nil && mgr.healthStatus != nil {
- return mgr.healthStatus, nil
- }
- res := &postgres.ConsensusMemberHealthStatus{}
-
- IPPort := mgr.Config.GetConsensusIPPort(cluster, member.Name)
- sql := fmt.Sprintf(`select connected, log_delay_num from consensus_cluster_health where ip_port = '%s';`, IPPort)
- resp, err := mgr.QueryLeader(ctx, sql, cluster)
- if err != nil {
- return nil, err
- }
-
- resMap, err := postgres.ParseQuery(string(resp))
- if err != nil {
- return nil, err
- }
-
- if resMap[0]["connected"] != nil {
- res.Connected = cast.ToBool(resMap[0]["connected"])
- }
- if resMap[0]["log_delay_num"] != nil {
- res.LogDelayNum = cast.ToInt64(resMap[0]["log_delay_num"])
- }
-
- return res, nil
-}
-
-func (mgr *Manager) JoinCurrentMemberToCluster(ctx context.Context, cluster *dcs.Cluster) error {
- // use the env KB_POD_FQDN consistently with the startup script
- sql := fmt.Sprintf(`alter system consensus add follower '%s:%d';`,
- viper.GetString("KB_POD_FQDN"), mgr.Config.GetDBPort())
-
- _, err := mgr.ExecLeader(ctx, sql, cluster)
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("exec sql:%s failed", sql))
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) LeaveMemberFromCluster(ctx context.Context, _ *dcs.Cluster, host string) error {
- sql := fmt.Sprintf(`alter system consensus drop follower '%s:%d';`,
- host, mgr.Config.GetDBPort())
-
- // only leader can delete member, so don't need to get pool
- _, err := mgr.ExecWithHost(ctx, sql, "")
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("exec sql:%s failed", sql))
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
- members := make([]*dcs.Member, 0)
-
- for i, m := range cluster.Members {
- if m.Name != leader && mgr.IsMemberHealthy(ctx, cluster, &m) {
- members = append(members, &cluster.Members[i])
- }
- }
-
- return members
-}
diff --git a/engines/postgres/apecloudpostgres/manager_test.go b/engines/postgres/apecloudpostgres/manager_test.go
index 1a51b65..a756777 100644
--- a/engines/postgres/apecloudpostgres/manager_test.go
+++ b/engines/postgres/apecloudpostgres/manager_test.go
@@ -30,17 +30,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
"github.com/apecloud/dbctl/engines/models"
"github.com/apecloud/dbctl/engines/postgres"
)
func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
- properties := map[string]string{
- postgres.ConnectionURLKey: "user=test password=test host=localhost port=5432 dbname=postgres",
- }
- testConfig, err := postgres.NewConfig(properties)
+ testConfig, err := postgres.NewConfig()
assert.NotNil(t, testConfig)
assert.Nil(t, err)
@@ -53,7 +48,7 @@ func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
t.Fatal(err)
}
- dbManager, err := NewManager(engines.Properties(properties))
+ dbManager, err := NewManager()
if err != nil {
t.Fatal(err)
}
@@ -64,85 +59,6 @@ func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
return manager, mock, err
}
-func TestIsConsensusReadyUp(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("consensus has been ready up", func(t *testing.T) {
- mock.ExpectQuery("SELECT extname FROM pg_extension").
- WillReturnRows(pgxmock.NewRows([]string{"extname"}).AddRow("consensus_monitor"))
-
- isReadyUp := manager.isConsensusReadyUp(ctx)
- assert.True(t, isReadyUp)
- })
-
- t.Run("consensus has not been ready up", func(t *testing.T) {
- mock.ExpectQuery("SELECT extname FROM pg_extension").
- WillReturnRows(pgxmock.NewRows([]string{"extname"}))
-
- isReadyUp := manager.isConsensusReadyUp(ctx)
- assert.False(t, isReadyUp)
- })
-
- t.Run("query pg_extension error", func(t *testing.T) {
- mock.ExpectQuery("SELECT extname FROM pg_extension").
- WillReturnError(fmt.Errorf("some errors"))
-
- isReadyUp := manager.isConsensusReadyUp(ctx)
- assert.False(t, isReadyUp)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsDBStartupReady(t *testing.T) {
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("db start up has been set", func(t *testing.T) {
- manager.DBStartupReady = true
-
- isReady := manager.IsDBStartupReady()
- assert.True(t, isReady)
- })
-
- t.Run("ping db failed", func(t *testing.T) {
- manager.DBStartupReady = false
- mock.ExpectPing().
- WillReturnError(fmt.Errorf("some error"))
-
- isReady := manager.IsDBStartupReady()
- assert.False(t, isReady)
- })
-
- t.Run("ping db success but consensus not ready up", func(t *testing.T) {
- manager.DBStartupReady = false
- mock.ExpectPing()
- mock.ExpectQuery("SELECT extname FROM pg_extension").
- WillReturnRows(pgxmock.NewRows([]string{"extname"}))
-
- isReady := manager.IsDBStartupReady()
- assert.False(t, isReady)
- })
-
- t.Run("db is startup ready", func(t *testing.T) {
- manager.DBStartupReady = false
- mock.ExpectPing()
- mock.ExpectQuery("SELECT extname FROM pg_extension").
- WillReturnRows(pgxmock.NewRows([]string{"extname"}).AddRow("consensus_monitor"))
-
- isReady := manager.IsDBStartupReady()
- assert.True(t, isReady)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
func TestGetMemberRoleWithHost(t *testing.T) {
ctx := context.TODO()
manager, mock, _ := MockDatabase(t)
@@ -182,196 +98,3 @@ func TestGetMemberRoleWithHost(t *testing.T) {
t.Errorf("there were unfulfilled expectations: %v", err)
}
}
-
-func TestIsLeaderWithHost(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("get member role with host failed", func(t *testing.T) {
- mock.ExpectQuery("select role from consensus_member_status;").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeader, err := manager.IsLeaderWithHost(ctx, "")
- assert.False(t, isLeader)
- assert.NotNil(t, err)
- })
-
- t.Run("check is leader success", func(t *testing.T) {
- mock.ExpectQuery("select role from consensus_member_status;").
- WillReturnRows(pgxmock.NewRows([]string{"role"}).AddRow("Leader"))
-
- isLeader, err := manager.IsLeaderWithHost(ctx, "")
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsLeader(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("is leader has been set", func(t *testing.T) {
- manager.SetIsLeader(true)
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- t.Run("is leader has not been set", func(t *testing.T) {
- manager.UnsetIsLeader()
- mock.ExpectQuery("select role from consensus_member_status;").
- WillReturnRows(pgxmock.NewRows([]string{"role"}).AddRow("leader"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestGetMemberHealthyStatus(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
- cluster.Leader = &dcs.Leader{
- Name: manager.CurrentMemberName,
- }
-
- t.Run("query failed", func(t *testing.T) {
- mock.ExpectQuery("select connected, log_delay_num from consensus_cluster_health").
- WillReturnError(fmt.Errorf("some error"))
-
- healthStatus, err := manager.getMemberHealthStatus(ctx, cluster, cluster.GetMemberWithName(manager.CurrentMemberName))
- assert.NotNil(t, err)
- assert.Nil(t, healthStatus)
- })
-
- t.Run("parse query failed", func(t *testing.T) {
- mock.ExpectQuery("select connected, log_delay_num from consensus_cluster_health").
- WillReturnRows(pgxmock.NewRows([]string{"connected, log_delay_num"}))
-
- healthStatus, err := manager.getMemberHealthStatus(ctx, cluster, cluster.GetMemberWithName(manager.CurrentMemberName))
- assert.NotNil(t, err)
- assert.Nil(t, healthStatus)
- })
-
- t.Run("get member health status success", func(t *testing.T) {
- mock.ExpectQuery("select connected, log_delay_num from consensus_cluster_health").
- WillReturnRows(pgxmock.NewRows([]string{"connected", "log_delay_num"}).AddRow(true, 0))
-
- healthStatus, err := manager.getMemberHealthStatus(ctx, cluster, cluster.GetMemberWithName(manager.CurrentMemberName))
- assert.Nil(t, err)
- assert.True(t, healthStatus.Connected)
- assert.Equal(t, int64(0), healthStatus.LogDelayNum)
- })
-
- t.Run("health status has been set", func(t *testing.T) {
- manager.healthStatus = &postgres.ConsensusMemberHealthStatus{
- Connected: false,
- LogDelayNum: 200,
- }
- manager.DBState = &dcs.DBState{}
-
- healthStatus, err := manager.getMemberHealthStatus(ctx, cluster, cluster.GetMemberWithName(manager.CurrentMemberName))
- assert.Nil(t, err)
- assert.False(t, healthStatus.Connected)
- assert.Equal(t, int64(200), healthStatus.LogDelayNum)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestJoinCurrentMemberToCluster(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
- cluster.Leader = &dcs.Leader{
- Name: manager.CurrentMemberName,
- }
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
-
- t.Run("exec alter system failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.JoinCurrentMemberToCluster(ctx, cluster)
- assert.NotNil(t, err)
- })
-
- t.Run("exec alter system success", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter system", 1))
-
- err := manager.JoinCurrentMemberToCluster(ctx, cluster)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestLeaveMemberFromCluster(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
-
- t.Run("exec alter system failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.LeaveMemberFromCluster(ctx, cluster, "")
- assert.NotNil(t, err)
- })
-
- t.Run("exec alter system success", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter system", 1))
-
- err := manager.LeaveMemberFromCluster(ctx, cluster, "")
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestHasOtherHealthyMembers(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
-
- t.Run("", func(t *testing.T) {
- members := manager.HasOtherHealthyMembers(ctx, cluster, manager.CurrentMemberName)
- assert.Equal(t, 0, len(members))
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/postgres/config.go b/engines/postgres/config.go
index 08d197c..7fa68cd 100644
--- a/engines/postgres/config.go
+++ b/engines/postgres/config.go
@@ -25,85 +25,73 @@ import (
"github.com/jackc/pgx/v5/pgxpool"
"github.com/pkg/errors"
"github.com/spf13/viper"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
)
const (
ConnectionURLKey = "url"
DefaultPort = 5432
EnvRootUser = "POSTGRES_USER"
- EnvRootPass = "POSTGRES_PASSWORD"
+ EnvRootPassword = "POSTGRES_PASSWORD"
+
+ DefaultUrl = "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10"
+ DefaultMaxConnectionTimeout = "5"
)
type Config struct {
- URL string
- Username string
- Password string
- Host string
- Port int
- Database string
- MaxConnections int32
- MinConnections int32
+ url string
+ username string
+ password string
+ host string
+ port int
+ database string
+ maxConnections int32
+ minConnections int32
+ connectTimeout string
pgxConfig *pgxpool.Config
}
var config *Config
-func NewConfig(properties map[string]string) (*Config, error) {
+func NewConfig() (*Config, error) {
config = &Config{}
- url, ok := properties[ConnectionURLKey]
- if !ok || url == "" {
- return nil, errors.Errorf("required metadata not set: %s", ConnectionURLKey)
- }
-
- poolConfig, err := pgxpool.ParseConfig(url)
+ poolConfig, err := pgxpool.ParseConfig(DefaultUrl)
if err != nil {
return nil, errors.Errorf("error opening DB connection: %v", err)
}
- config.Username = poolConfig.ConnConfig.User
- config.Password = poolConfig.ConnConfig.Password
- config.Host = poolConfig.ConnConfig.Host
- config.Port = int(poolConfig.ConnConfig.Port)
- config.Database = poolConfig.ConnConfig.Database
- config.MaxConnections = poolConfig.MaxConns
- config.MinConnections = poolConfig.MinConns
-
- if viper.IsSet(constant.KBEnvServiceUser) {
- config.Username = viper.GetString(constant.KBEnvServiceUser)
- } else if viper.IsSet(EnvRootUser) {
- config.Username = viper.GetString(EnvRootUser)
+ config.username = poolConfig.ConnConfig.User
+ config.password = poolConfig.ConnConfig.Password
+ config.host = poolConfig.ConnConfig.Host
+ config.port = int(poolConfig.ConnConfig.Port)
+ config.database = poolConfig.ConnConfig.Database
+ config.maxConnections = poolConfig.MaxConns
+ config.minConnections = poolConfig.MinConns
+ config.connectTimeout = DefaultMaxConnectionTimeout
+
+ if viper.IsSet(EnvRootUser) {
+ config.username = viper.GetString(EnvRootUser)
}
-
- if viper.IsSet(constant.KBEnvServicePassword) {
- config.Password = viper.GetString(constant.KBEnvServicePassword)
- } else if viper.IsSet(EnvRootPass) {
- config.Password = viper.GetString(EnvRootPass)
+ if viper.IsSet(EnvRootPassword) {
+ config.password = viper.GetString(EnvRootPassword)
}
- config.URL = config.GetConnectURLWithHost(config.Host)
- pgxConfig, _ := pgxpool.ParseConfig(config.URL)
+ config.url = config.GetConnectURLWithHost(config.host)
+ pgxConfig, _ := pgxpool.ParseConfig(config.url)
config.pgxConfig = pgxConfig
return config, nil
}
func (config *Config) GetDBPort() int {
- if config.Port == 0 {
+ if config.port == 0 {
return DefaultPort
}
- return config.Port
+ return config.port
}
func (config *Config) GetConnectURLWithHost(host string) string {
return fmt.Sprintf("user=%s password=%s host=%s port=%d dbname=%s",
- config.Username, config.Password, host, config.Port, config.Database)
-}
-
-func (config *Config) GetConsensusIPPort(cluster *dcs.Cluster, name string) string {
- return fmt.Sprintf("%s.%s-headless.%s.svc:1%d", name, cluster.ClusterCompName, cluster.Namespace, config.GetDBPort())
+ config.username, config.password, host, config.port, config.database)
}
diff --git a/engines/postgres/config_test.go b/engines/postgres/config_test.go
index fbfa345..c1407f6 100644
--- a/engines/postgres/config_test.go
+++ b/engines/postgres/config_test.go
@@ -24,82 +24,28 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
)
func TestGetPostgresqlMetadata(t *testing.T) {
t.Run("With defaults", func(t *testing.T) {
- properties := map[string]string{
- ConnectionURLKey: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10",
- }
-
- metadata, err := NewConfig(properties)
+ metadata, err := NewConfig()
assert.Nil(t, err)
- assert.Equal(t, "postgres", metadata.Username)
- assert.Equal(t, "docker", metadata.Password)
- assert.Equal(t, "localhost", metadata.Host)
- assert.Equal(t, 5432, metadata.Port)
- assert.Equal(t, "postgres", metadata.Database)
- assert.Equal(t, int32(1), metadata.MinConnections)
- assert.Equal(t, int32(10), metadata.MaxConnections)
- })
-
- t.Run("url not set", func(t *testing.T) {
- properties := map[string]string{}
-
- _, err := NewConfig(properties)
- assert.NotNil(t, err)
- })
-
- t.Run("pool max connection too small", func(t *testing.T) {
- properties := map[string]string{
- ConnectionURLKey: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=0",
- }
-
- _, err := NewConfig(properties)
- assert.NotNil(t, err)
+ assert.Equal(t, "postgres", metadata.username)
+ assert.Equal(t, "docker", metadata.password)
+ assert.Equal(t, "localhost", metadata.host)
+ assert.Equal(t, 5432, metadata.port)
+ assert.Equal(t, "postgres", metadata.database)
+ assert.Equal(t, int32(1), metadata.minConnections)
+ assert.Equal(t, int32(10), metadata.maxConnections)
})
t.Run("set env", func(t *testing.T) {
- viper.Set(constant.KBEnvServiceUser, "test")
- viper.Set(constant.KBEnvServicePassword, "test_pwd")
- properties := map[string]string{
- ConnectionURLKey: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10",
- }
- metadata, err := NewConfig(properties)
+ viper.Set(EnvRootUser, "test")
+ viper.Set(EnvRootPassword, "test_pwd")
+ metadata, err := NewConfig()
assert.Nil(t, err)
- assert.Equal(t, metadata.Username, "test")
- assert.Equal(t, metadata.Password, "test_pwd")
- })
-}
-
-func TestConfigFunc(t *testing.T) {
- properties := map[string]string{
- ConnectionURLKey: "user=postgres password=docker host=localhost port=5432 dbname=postgres pool_min_conns=1 pool_max_conns=10",
- }
- metadata, err := NewConfig(properties)
- assert.NotNil(t, metadata)
- assert.Nil(t, err)
-
- t.Run("get db port", func(t *testing.T) {
- port := metadata.GetDBPort()
- assert.Equal(t, port, 5432)
-
- metadata.Port = 0
- port = metadata.GetDBPort()
- assert.Equal(t, port, 5432)
- })
-
- t.Run("get consensus IP port", func(t *testing.T) {
- cluster := &dcs.Cluster{
- ClusterCompName: "test",
- Namespace: "default",
- }
-
- consensusIPPort := metadata.GetConsensusIPPort(cluster, "test")
- assert.Equal(t, consensusIPPort, "test.test-headless.default.svc:15432")
+ assert.Equal(t, "test", metadata.username)
+ assert.Equal(t, "test_pwd", metadata.password)
})
}
diff --git a/engines/postgres/manager.go b/engines/postgres/manager.go
index 8c062b2..6cd7c0f 100644
--- a/engines/postgres/manager.go
+++ b/engines/postgres/manager.go
@@ -21,16 +21,12 @@ package postgres
import (
"context"
- "fmt"
- "github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/pkg/errors"
- "github.com/shirou/gopsutil/v3/process"
"github.com/spf13/viper"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
)
@@ -38,14 +34,12 @@ type Manager struct {
engines.DBManagerBase
MajorVersion int
Pool PgxPoolIFace
- Proc *process.Process
Config *Config
- isLeader int
}
-func NewManager(properties map[string]string) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("PostgreSQL")
- config, err := NewConfig(properties)
+ config, err := NewConfig()
if err != nil {
return nil, err
}
@@ -59,7 +53,6 @@ func NewManager(properties map[string]string) (engines.DBManager, error) {
if err != nil {
return nil, err
}
- managerBase.DataDir = viper.GetString(PGDATA)
mgr := &Manager{
DBManagerBase: *managerBase,
@@ -71,76 +64,6 @@ func NewManager(properties map[string]string) (engines.DBManager, error) {
return mgr, nil
}
-func (mgr *Manager) SetIsLeader(isLeader bool) {
- if isLeader {
- mgr.isLeader = 1
- } else {
- mgr.isLeader = -1
- }
-}
-
-func (mgr *Manager) UnsetIsLeader() {
- mgr.isLeader = 0
-}
-
-// GetIsLeader returns whether the "isLeader" is set or not and whether current member is leader or not
-func (mgr *Manager) GetIsLeader() (bool, bool) {
- return mgr.isLeader != 0, mgr.isLeader == 1
-}
-
-func (mgr *Manager) IsLeaderMember(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, error) {
- if member == nil {
- return false, errors.Errorf("member is nil, can't check is leader member or not")
- }
-
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- return false, errors.Errorf("leader member is nil, can't check is leader member or not")
- }
-
- if leaderMember.Name != member.Name {
- return false, nil
- }
-
- return true, nil
-}
-
-func (mgr *Manager) ReadCheck(ctx context.Context, host string) bool {
- readSQL := fmt.Sprintf(`select check_ts from kb_health_check where type=%d limit 1;`, engines.CheckStatusType)
- _, err := mgr.QueryWithHost(ctx, readSQL, host)
- if err != nil {
- var pgErr *pgconn.PgError
- if errors.As(err, &pgErr) && pgErr.Code == "42P01" {
- // no healthy check records, return true
- return true
- }
- mgr.Logger.Error(err, "read check failed")
- return false
- }
- return true
-}
-
-func (mgr *Manager) WriteCheck(ctx context.Context, host string) bool {
- writeSQL := fmt.Sprintf(`
- create table if not exists kb_health_check(type int, check_ts timestamp, primary key(type));
- insert into kb_health_check values(%d, CURRENT_TIMESTAMP) on conflict(type) do update set check_ts = CURRENT_TIMESTAMP;
- `, engines.CheckStatusType)
- _, err := mgr.ExecWithHost(ctx, writeSQL, host)
- if err != nil {
- mgr.Logger.Error(err, "write check failed")
- return false
- }
- return true
-}
-
-func (mgr *Manager) PgReload(ctx context.Context) error {
- reload := "select pg_reload_conf();"
-
- _, err := mgr.Exec(ctx, reload)
-
- return err
-}
-
func (mgr *Manager) IsPgReady(ctx context.Context) bool {
err := mgr.Pool.Ping(ctx)
if err != nil {
@@ -150,39 +73,3 @@ func (mgr *Manager) IsPgReady(ctx context.Context) bool {
return true
}
-
-func (mgr *Manager) Lock(ctx context.Context, reason string) error {
- sql := "alter system set default_transaction_read_only=on;"
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("exec sql:%s failed", sql))
- return err
- }
-
- if err = mgr.PgReload(ctx); err != nil {
- mgr.Logger.Error(err, "reload conf failed")
- return err
- }
-
- mgr.Logger.Info(fmt.Sprintf("Lock db success: %s", reason))
- return nil
-}
-
-func (mgr *Manager) Unlock(ctx context.Context) error {
- sql := "alter system set default_transaction_read_only=off;"
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("exec sql:%s failed", sql))
- return err
- }
-
- if err = mgr.PgReload(ctx); err != nil {
- mgr.Logger.Error(err, "reload conf failed")
- return err
- }
-
- mgr.Logger.Info("UnLock db success")
- return nil
-}
diff --git a/engines/postgres/manager_test.go b/engines/postgres/manager_test.go
index e70614e..3032dfa 100644
--- a/engines/postgres/manager_test.go
+++ b/engines/postgres/manager_test.go
@@ -29,15 +29,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
)
func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
- properties := map[string]string{
- ConnectionURLKey: "user=test password=test host=localhost port=5432 dbname=postgres",
- }
- testConfig, err := NewConfig(properties)
+ testConfig, err := NewConfig()
assert.NotNil(t, testConfig)
assert.Nil(t, err)
@@ -50,7 +45,7 @@ func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
t.Fatal(err)
}
- dbManager, err := NewManager(engines.Properties(properties))
+ dbManager, err := NewManager()
if err != nil {
t.Fatal(err)
}
@@ -61,48 +56,6 @@ func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
return manager, mock, err
}
-func TestReadWrite(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("write check success", func(t *testing.T) {
- mock.ExpectExec(`create table if not exists`).
- WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
-
- ok := manager.WriteCheck(ctx, "")
- assert.True(t, ok)
- })
-
- t.Run("write check failed", func(t *testing.T) {
- mock.ExpectExec(`create table if not exists`).
- WillReturnError(fmt.Errorf("some error"))
-
- ok := manager.WriteCheck(ctx, "")
- assert.False(t, ok)
- })
-
- t.Run("read check success", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(pgxmock.NewRows([]string{"check_ts"}).AddRow(1))
-
- ok := manager.ReadCheck(ctx, "")
- assert.True(t, ok)
- })
-
- t.Run("read check failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- ok := manager.ReadCheck(ctx, "")
- assert.False(t, ok)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
func TestPgIsReady(t *testing.T) {
ctx := context.TODO()
manager, mock, _ := MockDatabase(t)
@@ -127,146 +80,3 @@ func TestPgIsReady(t *testing.T) {
t.Errorf("there were unfulfilled expectations: %v", err)
}
}
-
-func TestIsLeaderMember(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
- currentMember := dcs.Member{
- Name: manager.CurrentMemberName,
- }
-
- t.Run("member is nil", func(t *testing.T) {
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, nil)
- assert.False(t, isLeaderMember)
- assert.NotNil(t, err)
- })
-
- t.Run("leader member is nil", func(t *testing.T) {
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, ¤tMember)
- assert.False(t, isLeaderMember)
- assert.NotNil(t, err)
- })
-
- cluster.Leader = &dcs.Leader{
- Name: manager.CurrentMemberName,
- }
- cluster.Members = append(cluster.Members, currentMember)
- t.Run("is leader member", func(t *testing.T) {
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, ¤tMember)
- assert.True(t, isLeaderMember)
- assert.Nil(t, err)
- })
-
- member := &dcs.Member{
- Name: "test",
- }
- t.Run("is not leader member", func(t *testing.T) {
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, member)
- assert.False(t, isLeaderMember)
- assert.Nil(t, err)
- })
-}
-
-func TestPgReload(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("pg reload success", func(t *testing.T) {
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnResult(pgxmock.NewResult("select", 1))
-
- err := manager.PgReload(ctx)
- assert.Nil(t, err)
- })
-
- t.Run("pg reload failed", func(t *testing.T) {
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.PgReload(ctx)
- assert.NotNil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestLock(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("alter system failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnError(fmt.Errorf("alter system failed"))
-
- err := manager.Lock(ctx, "test")
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "alter system failed")
- })
-
- t.Run("pg reload failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter", 1))
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnError(fmt.Errorf("pg reload failed"))
- err := manager.Lock(ctx, "test")
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "pg reload failed")
- })
-
- t.Run("lock success", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter", 1))
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnResult(pgxmock.NewResult("select", 1))
- err := manager.Lock(ctx, "test")
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestUnlock(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("alter system failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnError(fmt.Errorf("alter system failed"))
-
- err := manager.Unlock(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "alter system failed")
- })
-
- t.Run("pg reload failed", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter", 1))
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnError(fmt.Errorf("pg reload failed"))
- err := manager.Unlock(ctx)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "pg reload failed")
- })
-
- t.Run("unlock success", func(t *testing.T) {
- mock.ExpectExec("alter system").
- WillReturnResult(pgxmock.NewResult("alter", 1))
- mock.ExpectExec("select pg_reload_conf()").
- WillReturnResult(pgxmock.NewResult("select", 1))
- err := manager.Unlock(ctx)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/postgres/query.go b/engines/postgres/query.go
index 980835a..5028053 100644
--- a/engines/postgres/query.go
+++ b/engines/postgres/query.go
@@ -23,14 +23,10 @@ import (
"context"
"encoding/json"
"fmt"
- "strings"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/pkg/errors"
- "github.com/spf13/cast"
-
- "github.com/apecloud/dbctl/dcs"
)
// Query is equivalent to QueryWithHost(ctx, sql, ""), query itself.
@@ -77,40 +73,6 @@ func (mgr *Manager) QueryOthers(ctx context.Context, sql string, host string) (r
return conn.Query(ctx, sql)
}
-// GetLeaderAddr query leader addr from db kernel
-func (mgr *Manager) GetLeaderAddr(ctx context.Context) (string, error) {
- queryLeaderAddrSQL := `select ip_port from consensus_cluster_status where server_id = (select current_leader from consensus_member_status);`
- resp, err := mgr.Query(ctx, queryLeaderAddrSQL)
- if err != nil {
- return "", err
- }
-
- resMap, err := ParseQuery(string(resp))
- if err != nil {
- return "", err
- }
-
- return strings.Split(cast.ToString(resMap[0]["ip_port"]), ":")[0], nil
-}
-
-func (mgr *Manager) QueryLeader(ctx context.Context, sql string, cluster *dcs.Cluster) (result []byte, err error) {
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- leaderAddr, err := mgr.GetLeaderAddr(ctx)
- if err != nil {
- return nil, ClusterHasNoLeader
- }
-
- return mgr.QueryWithHost(ctx, sql, leaderAddr)
- }
-
- var host string
- if leaderMember.Name != mgr.CurrentMemberName {
- host = cluster.GetMemberAddr(*leaderMember)
- }
- return mgr.QueryWithHost(ctx, sql, host)
-}
-
// Exec is equivalent to ExecWithHost(ctx, sql, ""), exec itself.
func (mgr *Manager) Exec(ctx context.Context, sql string) (result int64, err error) {
return mgr.ExecWithHost(ctx, sql, "")
@@ -145,24 +107,6 @@ func (mgr *Manager) ExecOthers(ctx context.Context, sql string, host string) (re
return conn.Exec(ctx, sql)
}
-func (mgr *Manager) ExecLeader(ctx context.Context, sql string, cluster *dcs.Cluster) (result int64, err error) {
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- leaderAddr, err := mgr.GetLeaderAddr(ctx)
- if err != nil {
- return 0, ClusterHasNoLeader
- }
-
- return mgr.ExecWithHost(ctx, sql, leaderAddr)
- }
-
- var host string
- if leaderMember.Name != mgr.CurrentMemberName {
- host = cluster.GetMemberAddr(*leaderMember)
- }
- return mgr.ExecWithHost(ctx, sql, host)
-}
-
func parseRows(rows pgx.Rows) (result []byte, err error) {
rs := make([]interface{}, 0)
columnTypes := rows.FieldDescriptions()
diff --git a/engines/postgres/query_test.go b/engines/postgres/query_test.go
index 5f327ac..cbfe660 100644
--- a/engines/postgres/query_test.go
+++ b/engines/postgres/query_test.go
@@ -26,8 +26,6 @@ import (
"github.com/pashagolub/pgxmock/v2"
"github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/dcs"
)
const (
@@ -74,39 +72,6 @@ func TestQuery(t *testing.T) {
assert.Nil(t, resp)
})
- t.Run("query leader success", func(t *testing.T) {
- sql := queryTest
- mock.ExpectQuery("select").
- WillReturnRows(pgxmock.NewRows([]string{"1"}).AddRow("1"))
- cluster := &dcs.Cluster{
- Leader: &dcs.Leader{
- Name: manager.CurrentMemberName,
- },
- }
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
-
- resp, err := manager.QueryLeader(ctx, sql, cluster)
- if err != nil {
- t.Errorf("expect query leader success but failed")
- }
-
- assert.Equal(t, []byte(`[{"1":"1"}]`), resp)
- })
-
- t.Run("query leader failed, cluster has no leader", func(t *testing.T) {
- sql := queryTest
- cluster := &dcs.Cluster{}
-
- _, err := manager.QueryLeader(ctx, sql, cluster)
- if err == nil {
- t.Errorf("expect query leader success but failed")
- }
-
- assert.ErrorIs(t, ClusterHasNoLeader, err)
- })
-
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %v", err)
}
@@ -164,38 +129,6 @@ func TestExec(t *testing.T) {
assert.Equal(t, int64(0), resp)
})
- t.Run("exec leader success", func(t *testing.T) {
- sql := execTest
- mock.ExpectExec("create").
- WillReturnResult(pgxmock.NewResult("CREATE", 1))
- cluster := &dcs.Cluster{
- Leader: &dcs.Leader{
- Name: manager.CurrentMemberName,
- },
- }
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
-
- resp, err := manager.ExecLeader(ctx, sql, cluster)
- if err != nil {
- t.Errorf("expect exec leader success but failed")
- }
- assert.Equal(t, int64(1), resp)
- })
-
- t.Run("exec leader failed, cluster has no leader", func(t *testing.T) {
- sql := execTest
- cluster := &dcs.Cluster{}
-
- _, err := manager.ExecLeader(ctx, sql, cluster)
- if err == nil {
- t.Errorf("expect exec leader success but failed")
- }
-
- assert.ErrorIs(t, ClusterHasNoLeader, err)
- })
-
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %v", err)
}
diff --git a/engines/postgres/types.go b/engines/postgres/types.go
index 787a6cb..d86224d 100644
--- a/engines/postgres/types.go
+++ b/engines/postgres/types.go
@@ -21,16 +21,12 @@ package postgres
import (
"context"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
+
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
- "github.com/pkg/errors"
-)
-var (
- ClusterHasNoLeader = errors.New("cluster has no leader now")
+ "github.com/apecloud/dbctl/engines"
)
const (
@@ -40,7 +36,6 @@ const (
type PgBaseIFace interface {
GetMemberRoleWithHost(ctx context.Context, host string) (string, error)
- IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool
Query(ctx context.Context, sql string) (result []byte, err error)
Exec(ctx context.Context, sql string) (result int64, err error)
}
@@ -63,11 +58,6 @@ type PgxPoolIFace interface {
Close()
}
-type ConsensusMemberHealthStatus struct {
- Connected bool
- LogDelayNum int64
-}
-
type PatroniResp struct {
Role string `json:"role"`
}
diff --git a/engines/postgres/user.go b/engines/postgres/user.go
deleted file mode 100644
index 832b01c..0000000
--- a/engines/postgres/user.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package postgres
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "golang.org/x/exp/slices"
-
- "github.com/apecloud/dbctl/engines/models"
-)
-
-const (
- listUserTpl = `
- SELECT usename AS userName, valuntil 'postgres' and usename not like 'kb%'
- ORDER BY usename;
- `
- descUserTpl = `
- SELECT usename AS userName, valuntil 0 {
- return &users[0], nil
- }
- return nil, nil
-}
-
-func (mgr *Manager) CreateUser(ctx context.Context, userName, password string) error {
- sql := fmt.Sprintf(createUserTpl, userName, password)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) DeleteUser(ctx context.Context, userName string) error {
- sql := fmt.Sprintf(dropUserTpl, userName)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) GrantUserRole(ctx context.Context, userName, roleName string) error {
- var sql string
- if models.SuperUserRole.EqualTo(roleName) {
- sql = "ALTER USER " + userName + " WITH SUPERUSER;"
- } else {
- roleDesc, _ := role2PGRole(roleName)
- sql = fmt.Sprintf(grantTpl, roleDesc, userName)
- }
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) RevokeUserRole(ctx context.Context, userName, roleName string) error {
- var sql string
- if models.SuperUserRole.EqualTo(roleName) {
- sql = "ALTER USER " + userName + " WITH NOSUPERUSER;"
- } else {
- roleDesc, _ := role2PGRole(roleName)
- sql = fmt.Sprintf(revokeTpl, roleDesc, userName)
- }
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-// post-processing
-func pgUserRolesProcessor(data interface{}) ([]models.UserInfo, error) {
- type pgUserInfo struct {
- UserName string `json:"username"`
- Expired bool `json:"expired"`
- Super bool `json:"usesuper"`
- Roles []string `json:"roles"`
- }
- // parse data to struct
- var pgUsers []pgUserInfo
- err := json.Unmarshal(data.([]byte), &pgUsers)
- if err != nil {
- return nil, err
- }
- // parse roles
- users := make([]models.UserInfo, len(pgUsers))
- for i := range pgUsers {
- users[i] = models.UserInfo{
- UserName: pgUsers[i].UserName,
- }
-
- if pgUsers[i].Expired {
- users[i].Expired = "T"
- } else {
- users[i].Expired = "F"
- }
-
- // parse Super attribute
- if pgUsers[i].Super {
- pgUsers[i].Roles = append(pgUsers[i].Roles, string(models.SuperUserRole))
- }
-
- // convert to RoleType and sort by weight
- roleTypes := make([]models.RoleType, 0)
- for _, role := range pgUsers[i].Roles {
- roleTypes = append(roleTypes, models.String2RoleType(role))
- }
- slices.SortFunc(roleTypes, models.SortRoleByWeight)
- if len(roleTypes) > 0 {
- users[i].RoleName = string(roleTypes[0])
- }
- }
- return users, nil
-}
-
-func role2PGRole(roleName string) (string, error) {
- roleType := models.String2RoleType(roleName)
- switch roleType {
- case models.ReadWriteRole:
- return "pg_write_all_data", nil
- case models.ReadOnlyRole:
- return "pg_read_all_data", nil
- }
- return "", fmt.Errorf("role name: %s is not supported", roleName)
-}
diff --git a/engines/postgres/vanillapostgres/manager.go b/engines/postgres/vanillapostgres/manager.go
index ed9a9f7..b8bce8a 100644
--- a/engines/postgres/vanillapostgres/manager.go
+++ b/engines/postgres/vanillapostgres/manager.go
@@ -31,7 +31,6 @@ import (
"github.com/spf13/cast"
"github.com/spf13/viper"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
"github.com/apecloud/dbctl/engines/models"
"github.com/apecloud/dbctl/engines/postgres"
@@ -45,10 +44,10 @@ var _ engines.DBManager = &Manager{}
var Mgr *Manager
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
Mgr = &Manager{}
- baseManager, err := postgres.NewManager(properties)
+ baseManager, err := postgres.NewManager()
if err != nil {
return nil, errors.Errorf("new base manager failed, err: %v", err)
}
@@ -57,25 +56,6 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
return Mgr, nil
}
-func (mgr *Manager) IsLeader(ctx context.Context, _ *dcs.Cluster) (bool, error) {
- isSet, isLeader := mgr.GetIsLeader()
- if isSet {
- return isLeader, nil
- }
-
- return mgr.IsLeaderWithHost(ctx, "")
-}
-
-func (mgr *Manager) IsLeaderWithHost(ctx context.Context, host string) (bool, error) {
- role, err := mgr.GetMemberRoleWithHost(ctx, host)
- if err != nil {
- return false, errors.Errorf("check is leader with host:%s failed, err:%v", host, err)
- }
-
- mgr.Logger.Info(fmt.Sprintf("get member:%s role:%s", host, role))
- return role == models.PRIMARY, nil
-}
-
func (mgr *Manager) IsDBStartupReady() bool {
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
@@ -94,10 +74,7 @@ func (mgr *Manager) IsDBStartupReady() bool {
func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (string, error) {
getRoleFromPatroni := func() (string, error) {
- patroniPort := "8008"
- if viper.IsSet("PATRONI_PORT") {
- patroniPort = viper.GetString("PATRONI_PORT")
- }
+ patroniPort := viper.GetString("PATRONI_PORT")
resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%s", patroniPort))
if err != nil {
@@ -123,7 +100,7 @@ func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (str
}
}
- if viper.IsSet("PATRONIVERSION") {
+ if viper.IsSet("PATRONI_PORT") {
return getRoleFromPatroni()
}
@@ -147,33 +124,3 @@ func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (str
return models.PRIMARY, nil
}
}
-
-func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
- var host string
- if member.Name != mgr.CurrentMemberName {
- host = cluster.GetMemberAddr(*member)
- }
-
- if cluster.Leader != nil && cluster.Leader.Name == member.Name {
- if !mgr.WriteCheck(ctx, host) {
- return false
- }
- }
- if !mgr.ReadCheck(ctx, host) {
- return false
- }
-
- return true
-}
-
-func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
- members := make([]*dcs.Member, 0)
-
- for i, m := range cluster.Members {
- if m.Name != leader && mgr.IsMemberHealthy(ctx, cluster, &m) {
- members = append(members, &cluster.Members[i])
- }
- }
-
- return members
-}
diff --git a/engines/postgres/vanillapostgres/manager_test.go b/engines/postgres/vanillapostgres/manager_test.go
deleted file mode 100644
index f2cc9a5..0000000
--- a/engines/postgres/vanillapostgres/manager_test.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package vanillapostgres
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/pashagolub/pgxmock/v2"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/postgres"
-)
-
-func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
- properties := map[string]string{
- postgres.ConnectionURLKey: "user=test password=test host=localhost port=5432 dbname=postgres",
- }
- testConfig, err := postgres.NewConfig(properties)
- assert.NotNil(t, testConfig)
- assert.Nil(t, err)
-
- viper.Set(constant.KBEnvPodName, "test-pod-0")
- viper.Set(constant.KBEnvClusterCompName, "test")
- viper.Set(constant.KBEnvNamespace, "default")
- viper.Set(postgres.PGDATA, "test")
- viper.Set(postgres.PGMAJOR, 14)
- mock, err := pgxmock.NewPool(pgxmock.MonitorPingsOption(true))
- if err != nil {
- t.Fatal(err)
- }
-
- dbManager, err := NewManager(properties)
- if err != nil {
- t.Fatal(err)
- }
- manager := dbManager.(*Manager)
- manager.Pool = mock
-
- return manager, mock, err
-}
-
-func TestIsLeader(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
-
- t.Run("get member role primary", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(false))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.Nil(t, err)
- assert.Equal(t, true, isLeader)
- })
-
- t.Run("get member role secondary", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(true))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.Nil(t, err)
- assert.Equal(t, false, isLeader)
- })
-
- t.Run("query failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.NotNil(t, err)
- assert.Equal(t, false, isLeader)
- })
-
- t.Run("parse query failed", func(t *testing.T) {
- mock.ExpectQuery("select").
- WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}))
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.NotNil(t, err)
- assert.Equal(t, false, isLeader)
- })
-
- t.Run("has set isLeader", func(t *testing.T) {
- manager.SetIsLeader(true)
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.Nil(t, err)
- assert.Equal(t, true, isLeader)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestHasOtherHealthyMembers(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := MockDatabase(t)
- defer mock.Close()
- cluster := &dcs.Cluster{}
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: manager.CurrentMemberName,
- })
-
- t.Run("", func(t *testing.T) {
- members := manager.HasOtherHealthyMembers(ctx, cluster, manager.CurrentMemberName)
- assert.Equal(t, 0, len(members))
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/redis/manager.go b/engines/redis/manager.go
index 5954f69..794f003 100644
--- a/engines/redis/manager.go
+++ b/engines/redis/manager.go
@@ -43,25 +43,21 @@ var (
type Manager struct {
engines.DBManagerBase
- client redis.UniversalClient
- clientSettings *Settings
- sentinelClient *redis.SentinelClient
-
- ctx context.Context
- cancel context.CancelFunc
- startAt time.Time
- role string
- roleSubscribeUpdateTime int64
- roleProbePeriod int64
- masterName string
- currentRedisHost string
- currentRedisPort string
+ client redis.UniversalClient
+ clientSettings *Settings
+ sentinelClient *redis.SentinelClient
+ masterName string
+ currentRedisHost string
+ currentRedisPort string
}
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("Redis")
+ properties := map[string]string{
+ "redisHost": "127.0.0.1:6379",
+ }
if viper.IsSet("REDIS_DEFAULT_USER") {
redisUser = viper.GetString("REDIS_DEFAULT_USER")
@@ -80,8 +76,7 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
return nil, err
}
mgr := &Manager{
- DBManagerBase: *managerBase,
- roleProbePeriod: int64(viper.GetInt(constant.KBEnvRoleProbePeriod)),
+ DBManagerBase: *managerBase,
}
mgr.masterName = mgr.ClusterCompName
diff --git a/engines/redis/manager_test.go b/engines/redis/manager_test.go
deleted file mode 100644
index cc6c708..0000000
--- a/engines/redis/manager_test.go
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package redis
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "github.com/spf13/viper"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/engines"
-)
-
-const (
-// testData = `{"data":"data"}`
-// testKey = "test"
-// redisHost = "127.0.0.1:6379"
-
-// userName = "kiminonawa"
-// password = "moss"
-// roleName = util.ReadWriteRole
-)
-
-var _ = Describe("Redis DBManager", func() {
- // Set up relevant viper config variables
- viper.Set(constant.KBEnvServiceUser, "testuser")
- viper.Set(constant.KBEnvServicePassword, "testpassword")
- Context("new db manager", func() {
- It("with right configurations", func() {
- properties := engines.Properties{
- "url": "127.0.0.1",
- }
- dbManger, err := NewManager(properties)
- Expect(err).Should(Succeed())
- Expect(dbManger).ShouldNot(BeNil())
- })
-
- It("with wrong configurations", func() {
- properties := engines.Properties{
- "poolSize": "wrong-number",
- }
- dbManger, err := NewManager(properties)
- Expect(err).Should(HaveOccurred())
- Expect(dbManger).Should(BeNil())
- })
- })
-})
-
-// func TestRedisInit(t *testing.T) {
-// r, _ := mockRedisOps(t)
-// defer r.Close()
-// // make sure operations are inited
-// assert.NotNil(t, r.client)
-// assert.NotNil(t, r.OperationsMap[util.ListUsersOp])
-// assert.NotNil(t, r.OperationsMap[util.CreateUserOp])
-// assert.NotNil(t, r.OperationsMap[util.DeleteUserOp])
-// assert.NotNil(t, r.OperationsMap[util.DescribeUserOp])
-// assert.NotNil(t, r.OperationsMap[util.GrantUserRoleOp])
-// assert.NotNil(t, r.OperationsMap[util.RevokeUserRoleOp])
-// }
-// func TestRedisInvokeCreate(t *testing.T) {
-// r, mock := mockRedisOps(t)
-// defer r.Close()
-//
-// result := OpsResult{}
-// request := &ProbeRequest{
-// Data: []byte(testData),
-// Metadata: map[string]string{"key": testKey},
-// Operation: util.CreateOperation,
-// }
-// // mock expectation
-// mock.ExpectDo("SET", testKey, testData).SetVal("ok")
-//
-// // invoke
-// bindingRes, err := r.Invoke(context.TODO(), request)
-// assert.Equal(t, nil, err)
-// assert.NotNil(t, bindingRes)
-// assert.NotNil(t, bindingRes.Data)
-//
-// err = json.Unmarshal(bindingRes.Data, &result)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveSucc, result[util.RespFieldEvent], result[util.RespFieldMessage])
-// }
-//
-// func TestRedisInvokeGet(t *testing.T) {
-// r, mock := mockRedisOps(t)
-// defer r.Close()
-//
-// opsResult := OpsResult{}
-// request := &ProbeRequest{
-// Metadata: map[string]string{"key": testKey},
-// Operation: util.GetOperation,
-// }
-// // mock expectation, set to nil
-// mock.ExpectDo("GET", testKey).RedisNil()
-// mock.ExpectDo("GET", testKey).SetVal(testData)
-//
-// // invoke create
-// bindingRes, err := r.Invoke(context.TODO(), request)
-// assert.Nil(t, err)
-// assert.NotNil(t, bindingRes)
-// assert.NotNil(t, bindingRes.Data)
-// err = json.Unmarshal(bindingRes.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveFail, opsResult[util.RespFieldEvent])
-//
-// // invoke one more time
-// bindingRes, err = r.Invoke(context.TODO(), request)
-// assert.Nil(t, err)
-// assert.NotNil(t, bindingRes.Data)
-// err = json.Unmarshal(bindingRes.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent])
-// var o1 interface{}
-// _ = json.Unmarshal([]byte(opsResult[util.RespFieldMessage].(string)), &o1)
-// assert.Equal(t, testData, o1)
-// }
-//
-// func TestRedisInvokeDelete(t *testing.T) {
-// r, mock := mockRedisOps(t)
-// defer r.Close()
-//
-// opsResult := OpsResult{}
-// request := &ProbeRequest{
-// Metadata: map[string]string{"key": testKey},
-// Operation: util.DeleteOperation,
-// }
-// // mock expectation, set to err
-// mock.ExpectDo("DEL", testKey).SetVal("ok")
-//
-// // invoke delete
-// bindingRes, err := r.Invoke(context.TODO(), request)
-// assert.Nil(t, err)
-// assert.NotNil(t, bindingRes)
-// assert.NotNil(t, bindingRes.Data)
-// err = json.Unmarshal(bindingRes.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent])
-// }
-//
-// func TestRedisGetRoles(t *testing.T) {
-// r, mock := mockRedisOps(t)
-// defer r.Close()
-//
-// opsResult := OpsResult{}
-// request := &ProbeRequest{
-// Operation: util.GetRoleOperation,
-// }
-//
-// // mock expectation, set to err
-// mock.ExpectInfo("Replication").SetVal("role:master\r\nconnected_slaves:1")
-// mock.ExpectInfo("Replication").SetVal("role:slave\r\nmaster_port:6379")
-// // invoke request
-// bindingRes, err := r.Invoke(context.TODO(), request)
-// assert.Nil(t, err)
-// assert.NotNil(t, bindingRes)
-// assert.NotNil(t, bindingRes.Data)
-// err = json.Unmarshal(bindingRes.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent])
-// assert.Equal(t, PRIMARY, opsResult["role"])
-//
-// // invoke one more time
-// bindingRes, err = r.Invoke(context.TODO(), request)
-// assert.Nil(t, err)
-// err = json.Unmarshal(bindingRes.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent])
-// assert.Equal(t, SECONDARY, opsResult["role"])
-// }
-//
-// func TestRedisAccounts(t *testing.T) {
-// // prepare
-// r, mock := mockRedisOps(t)
-// defer r.Close()
-//
-// ctx := context.TODO()
-// // list accounts
-// t.Run("List Accounts", func(t *testing.T) {
-// mock.ExpectDo("ACL", "USERS").SetVal([]string{"ape", "default", "kbadmin"})
-//
-// response, err := r.Invoke(ctx, &ProbeRequest{
-// Operation: util.ListUsersOp,
-// })
-//
-// assert.Nil(t, err)
-// assert.NotNil(t, response)
-// assert.NotNil(t, response.Data)
-// // parse result
-// opsResult := OpsResult{}
-// _ = json.Unmarshal(response.Data, &opsResult)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-//
-// users := make([]util.UserInfo, 0)
-// err = json.Unmarshal([]byte(opsResult[util.RespFieldMessage].(string)), &users)
-// assert.Nil(t, err)
-// assert.NotEmpty(t, users)
-// user := users[0]
-// assert.Equal(t, "ape", user.UserName)
-// mock.ClearExpect()
-// })
-//
-// // create accounts
-// t.Run("Create Accounts", func(t *testing.T) {
-//
-// var (
-// err error
-// opsResult = OpsResult{}
-// response *ProbeResponse
-// request = &ProbeRequest{
-// Operation: util.CreateUserOp,
-// }
-// )
-//
-// testCases := []redisTestCase{
-// {
-// testName: "emptymeta",
-// testMetaData: map[string]string{},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "nousername",
-// testMetaData: map[string]string{"password": "moli"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "nopasswd",
-// testMetaData: map[string]string{"userName": "namae"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoPassword.Error(),
-// },
-// {
-// testName: "validInput",
-// testMetaData: map[string]string{
-// "userName": userName,
-// "password": password,
-// },
-// expectEveType: util.RespEveSucc,
-// expectEveMsg: fmt.Sprintf("created user: %s", userName),
-// },
-// }
-// // mock a user
-// mock.ExpectDo("ACL", "SETUSER", userName, ">"+password).SetVal("ok")
-//
-// for _, accTest := range testCases {
-// request.Metadata = accTest.testMetaData
-// response, err = r.Invoke(ctx, request)
-// assert.Nil(t, err)
-// assert.NotNil(t, response.Data)
-// err = json.Unmarshal(response.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, accTest.expectEveType, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-// assert.Contains(t, opsResult[util.RespFieldMessage], accTest.expectEveMsg)
-// }
-// mock.ClearExpect()
-// })
-// // grant and revoke role
-// t.Run("Grant Accounts", func(t *testing.T) {
-//
-// var (
-// err error
-// opsResult = OpsResult{}
-// response *ProbeResponse
-// )
-//
-// testCases := []redisTestCase{
-// {
-// testName: "emptymeta",
-// testMetaData: map[string]string{},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "nousername",
-// testMetaData: map[string]string{"password": "moli"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "norolename",
-// testMetaData: map[string]string{"userName": "namae"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoRoleName.Error(),
-// },
-// {
-// testName: "invalidRoleName",
-// testMetaData: map[string]string{"userName": "namae", "roleName": "superman"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrInvalidRoleName.Error(),
-// },
-// {
-// testName: "validInput",
-// testMetaData: map[string]string{
-// "userName": userName,
-// "roleName": (string)(roleName),
-// },
-// expectEveType: util.RespEveSucc,
-// },
-// }
-//
-// for _, ops := range []util.OperationKind{util.GrantUserRoleOp, util.RevokeUserRoleOp} {
-// // mock exepctation
-// args := tokenizeCmd2Args(fmt.Sprintf("ACL SETUSER %s %s", userName, r.role2Priv(ops, (string)(roleName))))
-// mock.ExpectDo(args...).SetVal("ok")
-//
-// request := &ProbeRequest{
-// Operation: ops,
-// }
-// for _, accTest := range testCases {
-// request.Metadata = accTest.testMetaData
-// response, err = r.Invoke(ctx, request)
-// assert.Nil(t, err)
-// assert.NotNil(t, response.Data)
-// err = json.Unmarshal(response.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, accTest.expectEveType, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-// if len(accTest.expectEveMsg) > 0 {
-// assert.Contains(t, accTest.expectEveMsg, opsResult[util.RespFieldMessage])
-// }
-// }
-// }
-// mock.ClearExpect()
-// })
-//
-// // desc accounts
-// t.Run("Desc Accounts", func(t *testing.T) {
-// var (
-// err error
-// opsResult = OpsResult{}
-// response *ProbeResponse
-// request = &ProbeRequest{
-// Operation: util.DescribeUserOp,
-// }
-// // mock a user, describing it as an array of interface{}
-// userInfo = []interface{}{
-// "flags",
-// []interface{}{"on"},
-// "passwords",
-// []interface{}{"mock-password"},
-// "commands",
-// "+@all",
-// "keys",
-// "~*",
-// "channels",
-// "",
-// "selectors",
-// []interface{}{},
-// }
-//
-// userInfoMap = map[string]interface{}{
-// "flags": []interface{}{"on"},
-// "passwords": []interface{}{"mock-password"},
-// "commands": "+@all",
-// "keys": "~*",
-// "channels": "",
-// "selectors": []interface{}{},
-// }
-// )
-//
-// testCases := []redisTestCase{
-// {
-// testName: "emptymeta",
-// testMetaData: map[string]string{},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "nousername",
-// testMetaData: map[string]string{"password": "moli"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "validInputButNil",
-// testMetaData: map[string]string{
-// "userName": userName,
-// },
-// expectEveType: util.RespEveFail,
-// expectEveMsg: "redis: nil",
-// },
-// {
-// testName: "validInput",
-// testMetaData: map[string]string{
-// "userName": userName,
-// },
-// expectEveType: util.RespEveSucc,
-// },
-// {
-// testName: "validInputAsMap",
-// testMetaData: map[string]string{
-// "userName": userName,
-// },
-// expectEveType: util.RespEveSucc,
-// },
-// }
-//
-// mock.ExpectDo("ACL", "GETUSER", userName).RedisNil()
-// mock.ExpectDo("ACL", "GETUSER", userName).SetVal(userInfo)
-// mock.ExpectDo("ACL", "GETUSER", userName).SetVal(userInfoMap)
-//
-// for _, accTest := range testCases {
-// request.Metadata = accTest.testMetaData
-// response, err = r.Invoke(ctx, request)
-// assert.Nil(t, err)
-// assert.NotNil(t, response.Data)
-// err = json.Unmarshal(response.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, accTest.expectEveType, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-// if len(accTest.expectEveMsg) > 0 {
-// assert.Contains(t, opsResult[util.RespFieldMessage], accTest.expectEveMsg)
-// }
-// if util.RespEveSucc == opsResult[util.RespFieldEvent] {
-// // parse user info
-// users := make([]util.UserInfo, 0)
-// err = json.Unmarshal([]byte(opsResult[util.RespFieldMessage].(string)), &users)
-// assert.Nil(t, err)
-// assert.Len(t, users, 1)
-// user := users[0]
-// assert.Equal(t, userName, user.UserName)
-// assert.True(t, util.SuperUserRole.EqualTo(user.RoleName))
-// }
-// }
-// mock.ClearExpect()
-// })
-// // delete accounts
-// t.Run("Delete Accounts", func(t *testing.T) {
-//
-// var (
-// err error
-// opsResult = OpsResult{}
-// response *ProbeResponse
-// request = &ProbeRequest{
-// Operation: util.DeleteUserOp,
-// }
-// )
-//
-// testCases := []redisTestCase{
-// {
-// testName: "emptymeta",
-// testMetaData: map[string]string{},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "nousername",
-// testMetaData: map[string]string{"password": "moli"},
-// expectEveType: util.RespEveFail,
-// expectEveMsg: ErrNoUserName.Error(),
-// },
-// {
-// testName: "validInput",
-// testMetaData: map[string]string{
-// "userName": userName,
-// },
-// expectEveType: util.RespEveSucc,
-// expectEveMsg: fmt.Sprintf("deleted user: %s", userName),
-// },
-// }
-// // mock a user
-// mock.ExpectDo("ACL", "DELUSER", userName).SetVal("ok")
-//
-// for _, accTest := range testCases {
-// request.Metadata = accTest.testMetaData
-// response, err = r.Invoke(ctx, request)
-// assert.Nil(t, err)
-// assert.NotNil(t, response.Data)
-// err = json.Unmarshal(response.Data, &opsResult)
-// assert.Nil(t, err)
-// assert.Equal(t, accTest.expectEveType, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-// assert.Contains(t, opsResult[util.RespFieldMessage], accTest.expectEveMsg)
-// }
-// mock.ClearExpect()
-// })
-//
-// t.Run("RoleName Conversion", func(t *testing.T) {
-// type roleTestCase struct {
-// roleName util.RoleType
-// redisPrivs string
-// }
-// grantTestCases := []roleTestCase{
-// {
-// util.SuperUserRole,
-// "+@all allkeys",
-// },
-// {
-// util.ReadWriteRole,
-// "-@all +@write +@read allkeys",
-// },
-// {
-// util.ReadOnlyRole,
-// "-@all +@read allkeys",
-// },
-// }
-// for _, test := range grantTestCases {
-// cmd := r.role2Priv(util.GrantUserRoleOp, (string)(test.roleName))
-// assert.Equal(t, test.redisPrivs, cmd)
-//
-// // allkeys -> ~*
-// cmd = strings.Replace(cmd, "allkeys", "~*", 1)
-// inferredRole := r.priv2Role(cmd)
-// assert.Equal(t, test.roleName, inferredRole)
-// }
-//
-// revokeTestCases := []roleTestCase{
-// {
-// util.SuperUserRole,
-// "-@all allkeys",
-// },
-// {
-// util.ReadWriteRole,
-// "-@all -@write -@read allkeys",
-// },
-// {
-// util.ReadOnlyRole,
-// "-@all -@read allkeys",
-// },
-// }
-// for _, test := range revokeTestCases {
-// cmd := r.role2Priv(util.RevokeUserRoleOp, (string)(test.roleName))
-// assert.Equal(t, test.redisPrivs, cmd)
-// }
-// })
-// // list accounts
-// t.Run("List System Accounts", func(t *testing.T) {
-// mock.ExpectDo("ACL", "USERS").SetVal([]string{"ape", "default", "kbadmin"})
-//
-// response, err := r.Invoke(ctx, &ProbeRequest{
-// Operation: util.ListSystemAccountsOp,
-// })
-//
-// assert.Nil(t, err)
-// assert.NotNil(t, response)
-// assert.NotNil(t, response.Data)
-// // parse result
-// opsResult := OpsResult{}
-// _ = json.Unmarshal(response.Data, &opsResult)
-// assert.Equal(t, util.RespEveSucc, opsResult[util.RespFieldEvent], opsResult[util.RespFieldMessage])
-//
-// users := []string{}
-// err = json.Unmarshal([]byte(opsResult[util.RespFieldMessage].(string)), &users)
-// assert.Nil(t, err)
-// assert.NotEmpty(t, users)
-// assert.Len(t, users, 2)
-// assert.Contains(t, users, "kbadmin")
-// assert.Contains(t, users, "default")
-// mock.ClearExpect()
-// })
-// }
-//
-// func mockRedisOps(t *testing.T) (*Redis, redismock.ClientMock) {
-// client, mock := redismock.NewClientMock()
-// viper.SetDefault("KB_ROLECHECK_DELAY", "0")
-//
-// if client == nil || mock == nil {
-// t.Fatalf("failed to mock a redis client")
-// return nil, nil
-// }
-// r := &Redis{}
-// development, _ := zap.NewDevelopment()
-// r.Logger = zapr.NewLogger(development)
-// r.client = client
-// r.ctx, r.cancel = context.WithCancel(context.Background())
-// _ = r.Init(nil)
-// r.DBPort = 6379
-// return r, mock
-// }
-//
diff --git a/engines/redis/metadata.go b/engines/redis/metadata.go
deleted file mode 100644
index 73cc67a..0000000
--- a/engines/redis/metadata.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package redis
-
-import (
- "fmt"
- "strconv"
- "time"
-)
-
-const (
- maxRetries = "maxRetries"
- maxRetryBackoff = "maxRetryBackoff"
- ttlInSeconds = "ttlInSeconds"
- queryIndexes = "queryIndexes"
- defaultBase = 10
- defaultBitSize = 0
- defaultMaxRetries = 3
- defaultMaxRetryBackoff = time.Second * 2
-)
-
-type Metadata struct {
- MaxRetries int
- MaxRetryBackoff time.Duration
- TTLInSeconds *int
- QueryIndexes string
-}
-
-func ParseRedisMetadata(properties map[string]string) (Metadata, error) {
- m := Metadata{}
-
- m.MaxRetries = defaultMaxRetries
- if val, ok := properties[maxRetries]; ok && val != "" {
- parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize)
- if err != nil {
- return m, fmt.Errorf("redis store error: can't parse maxRetries field: %s", err)
- }
- m.MaxRetries = int(parsedVal)
- }
-
- m.MaxRetryBackoff = defaultMaxRetryBackoff
- if val, ok := properties[maxRetryBackoff]; ok && val != "" {
- parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize)
- if err != nil {
- return m, fmt.Errorf("redis store error: can't parse maxRetryBackoff field: %s", err)
- }
- m.MaxRetryBackoff = time.Duration(parsedVal)
- }
-
- if val, ok := properties[ttlInSeconds]; ok && val != "" {
- parsedVal, err := strconv.ParseInt(val, defaultBase, defaultBitSize)
- if err != nil {
- return m, fmt.Errorf("redis store error: can't parse ttlInSeconds field: %s", err)
- }
- intVal := int(parsedVal)
- m.TTLInSeconds = &intVal
- } else {
- m.TTLInSeconds = nil
- }
-
- if val, ok := properties[queryIndexes]; ok && val != "" {
- m.QueryIndexes = val
- }
- return m, nil
-}
diff --git a/engines/redis/redis.go b/engines/redis/redis.go
index d38710b..2ae3c48 100644
--- a/engines/redis/redis.go
+++ b/engines/redis/redis.go
@@ -31,7 +31,6 @@ import (
const (
ClusterType = "cluster"
- NodeType = "node"
)
func ParseClientFromProperties(properties map[string]string, defaultSettings *Settings) (client redis.UniversalClient, settings *Settings, err error) {
diff --git a/engines/redis/suite_test.go b/engines/redis/suite_test.go
index 79d5c49..1f7a1da 100644
--- a/engines/redis/suite_test.go
+++ b/engines/redis/suite_test.go
@@ -25,18 +25,11 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- "github.com/golang/mock/gomock"
"github.com/spf13/viper"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
-)
-
-var (
- dcsStore dcs.DCS
- mockDCSStore *dcs.MockDCS
)
func init() {
@@ -53,14 +46,4 @@ func TestRedisDBManager(t *testing.T) {
}
var _ = BeforeSuite(func() {
- // Init mock dcs store
- InitMockDCSStore()
})
-
-func InitMockDCSStore() {
- ctrl := gomock.NewController(GinkgoT())
- mockDCSStore = dcs.NewMockDCS(ctrl)
- mockDCSStore.EXPECT().GetClusterFromCache().Return(&dcs.Cluster{}).AnyTimes()
- dcs.SetStore(mockDCSStore)
- dcsStore = mockDCSStore
-}
diff --git a/engines/redis/user.go b/engines/redis/user.go
deleted file mode 100644
index 30c0c2c..0000000
--- a/engines/redis/user.go
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package redis
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "golang.org/x/exp/slices"
-
- "github.com/apecloud/dbctl/engines/models"
-)
-
-const (
- listUserTpl = "ACL USERS"
- descUserTpl = "ACL GETUSER %s"
- createUserTpl = "ACL SETUSER %s >%s"
- dropUserTpl = "ACL DELUSER %s"
- grantTpl = "ACL SETUSER %s %s"
- revokeTpl = "ACL SETUSER %s %s"
-)
-
-var (
- redisPreDefinedUsers = []string{
- "default",
- "kbadmin",
- "kbdataprotection",
- "kbmonitoring",
- "kbprobe",
- "kbreplicator",
- }
-)
-
-func (mgr *Manager) ListUsers(ctx context.Context) ([]models.UserInfo, error) {
- data, err := mgr.Query(ctx, listUserTpl)
- if err != nil {
- mgr.Logger.Error(err, "error executing %s")
- return nil, err
- }
-
- results := make([]string, 0)
- err = json.Unmarshal(data, &results)
- if err != nil {
- return nil, err
- }
- users := make([]models.UserInfo, 0)
- for _, userInfo := range results {
- userName := strings.TrimSpace(userInfo)
- if slices.Contains(redisPreDefinedUsers, userName) {
- continue
- }
- user := models.UserInfo{UserName: userName}
- users = append(users, user)
- }
- return users, nil
-}
-
-func (mgr *Manager) ListSystemAccounts(ctx context.Context) ([]models.UserInfo, error) {
- data, err := mgr.Query(ctx, listUserTpl)
- if err != nil {
- mgr.Logger.Error(err, "error executing %s")
- return nil, err
- }
-
- results := make([]string, 0)
- err = json.Unmarshal(data, &results)
- if err != nil {
- return nil, err
- }
- users := make([]models.UserInfo, 0)
- for _, userInfo := range results {
- userName := strings.TrimSpace(userInfo)
- if !slices.Contains(redisPreDefinedUsers, userName) {
- continue
- }
- user := models.UserInfo{UserName: userName}
- users = append(users, user)
- }
- return users, nil
-}
-
-func (mgr *Manager) DescribeUser(ctx context.Context, userName string) (*models.UserInfo, error) {
- sql := fmt.Sprintf(descUserTpl, userName)
-
- data, err := mgr.Query(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return nil, err
- }
-
- // parse it to a map or an []interface
- // try map first
- var profile map[string]string
- profile, err = parseCommandAndKeyFromMap(data)
- if err != nil {
- // try list
- profile, err = parseCommandAndKeyFromList(data)
- if err != nil {
- return nil, err
- }
- }
-
- user := &models.UserInfo{
- UserName: userName,
- RoleName: (string)(priv2Role(profile["commands"] + " " + profile["keys"])),
- }
- return user, nil
-}
-
-func (mgr *Manager) CreateUser(ctx context.Context, userName, password string) error {
- sql := fmt.Sprintf(createUserTpl, userName, password)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) DeleteUser(ctx context.Context, userName string) error {
- sql := fmt.Sprintf(dropUserTpl, userName)
-
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) GrantUserRole(ctx context.Context, userName, roleName string) error {
- var sql string
- command := role2Priv("+", roleName)
- sql = fmt.Sprintf(grantTpl, userName, command)
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func (mgr *Manager) RevokeUserRole(ctx context.Context, userName, roleName string) error {
- var sql string
- command := role2Priv("-", roleName)
- sql = fmt.Sprintf(revokeTpl, userName, command)
- _, err := mgr.Exec(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "execute sql failed", "sql", sql)
- return err
- }
-
- return nil
-}
-
-func role2Priv(prefix, roleName string) string {
- var command string
-
- roleType := models.String2RoleType(roleName)
- switch roleType {
- case models.SuperUserRole:
- command = fmt.Sprintf("%s@all allkeys", prefix)
- case models.ReadWriteRole:
- command = fmt.Sprintf("-@all %s@write %s@read allkeys", prefix, prefix)
- case models.ReadOnlyRole:
- command = fmt.Sprintf("-@all %s@read allkeys", prefix)
- }
- return command
-}
-
-func priv2Role(commands string) models.RoleType {
- if commands == "-@all" {
- return models.NoPrivileges
- }
- switch commands {
- case "-@all +@read ~*":
- return models.ReadOnlyRole
- case "-@all +@write +@read ~*":
- return models.ReadWriteRole
- case "+@all ~*":
- return models.SuperUserRole
- default:
- return models.CustomizedRole
- }
-}
-
-func parseCommandAndKeyFromMap(data interface{}) (map[string]string, error) {
- var (
- redisUserPrivContxt = []string{"commands", "keys", "channels", "selectors"}
- )
-
- profile := make(map[string]string, 0)
- results := make(map[string]interface{}, 0)
-
- err := json.Unmarshal(data.([]byte), &results)
- if err != nil {
- return nil, err
- }
- for k, v := range results {
- // each key is string, and each v is string or list of string
- if !slices.Contains(redisUserPrivContxt, k) {
- continue
- }
-
- switch v := v.(type) {
- case string:
- profile[k] = v
- case []interface{}:
- selectors := make([]string, 0)
- for _, sel := range v {
- selectors = append(selectors, sel.(string))
- }
- profile[k] = strings.Join(selectors, " ")
- default:
- return nil, fmt.Errorf("unknown data type: %v", v)
- }
- }
- return profile, nil
-}
-
-func parseCommandAndKeyFromList(data interface{}) (map[string]string, error) {
- var (
- redisUserPrivContxt = []string{"commands", "keys", "channels", "selectors"}
- redisUserInfoContext = []string{"flags", "passwords"}
- )
-
- profile := make(map[string]string, 0)
- results := make([]interface{}, 0)
-
- err := json.Unmarshal(data.([]byte), &results)
- if err != nil {
- return nil, err
- }
- // parse line by line
- var context string
- for i := 0; i < len(results); i++ {
- result := results[i]
- switch result := result.(type) {
- case string:
- strVal := strings.TrimSpace(result)
- if len(strVal) == 0 {
- continue
- }
- if slices.Contains(redisUserInfoContext, strVal) {
- i++
- continue
- }
- if slices.Contains(redisUserPrivContxt, strVal) {
- context = strVal
- } else {
- profile[context] = strVal
- }
- case []interface{}:
- selectors := make([]string, 0)
- for _, sel := range result {
- selectors = append(selectors, sel.(string))
- }
- profile[context] = strings.Join(selectors, " ")
- }
- }
- return profile, nil
-}
diff --git a/engines/register/managers.go b/engines/register/managers.go
index a673960..8561118 100644
--- a/engines/register/managers.go
+++ b/engines/register/managers.go
@@ -25,10 +25,8 @@ import (
"github.com/pkg/errors"
"github.com/spf13/afero"
- "github.com/spf13/viper"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/constant"
"github.com/apecloud/dbctl/engines"
"github.com/apecloud/dbctl/engines/etcd"
"github.com/apecloud/dbctl/engines/foxlake"
@@ -36,7 +34,6 @@ import (
"github.com/apecloud/dbctl/engines/mongodb"
"github.com/apecloud/dbctl/engines/mysql"
"github.com/apecloud/dbctl/engines/nebula"
- "github.com/apecloud/dbctl/engines/oceanbase"
"github.com/apecloud/dbctl/engines/opengauss"
"github.com/apecloud/dbctl/engines/oracle"
"github.com/apecloud/dbctl/engines/polardbx"
@@ -48,53 +45,42 @@ import (
"github.com/apecloud/dbctl/engines/wesql"
)
-type managerNewFunc func(engines.Properties) (engines.DBManager, error)
+type ManagerNewFunc func() (engines.DBManager, error)
-var managerNewFuncs = make(map[string]managerNewFunc)
+var managerNewFunctions = make(map[string]ManagerNewFunc)
-// Lorry runs with a single database engine instance at a time,
+// dbctl runs with a single database engine instance at a time,
// so only one dbManager is initialized and cached here during execution.
var dbManager engines.DBManager
var fs = afero.NewOsFs()
func init() {
- RegisterEngine(models.MySQL, "consensus", wesql.NewManager, mysql.NewCommands)
- RegisterEngine(models.MySQL, "replication", mysql.NewManager, mysql.NewCommands)
- RegisterEngine(models.Redis, "replication", redis.NewManager, redis.NewCommands)
- RegisterEngine(models.ETCD, "consensus", etcd.NewManager, nil)
- RegisterEngine(models.MongoDB, "consensus", mongodb.NewManager, mongodb.NewCommands)
- RegisterEngine(models.PolarDBX, "consensus", polardbx.NewManager, mysql.NewCommands)
- RegisterEngine(models.PostgreSQL, "replication", vanillapostgres.NewManager, postgres.NewCommands)
- RegisterEngine(models.PostgreSQL, "consensus", apecloudpostgres.NewManager, postgres.NewCommands)
- RegisterEngine(models.FoxLake, "", nil, foxlake.NewCommands)
- RegisterEngine(models.Nebula, "", nil, nebula.NewCommands)
- RegisterEngine(models.PulsarProxy, "", nil, pulsar.NewProxyCommands)
- RegisterEngine(models.PulsarBroker, "", nil, pulsar.NewBrokerCommands)
- RegisterEngine(models.Oceanbase, "", oceanbase.NewManager, oceanbase.NewCommands)
- RegisterEngine(models.Oracle, "", nil, oracle.NewCommands)
- RegisterEngine(models.OpenGauss, "", nil, opengauss.NewCommands)
-
- // support component definition without workloadType
- RegisterEngine(models.WeSQL, "", wesql.NewManager, mysql.NewCommands)
- RegisterEngine(models.MySQL, "", mysql.NewManager, mysql.NewCommands)
- RegisterEngine(models.Redis, "", redis.NewManager, redis.NewCommands)
- RegisterEngine(models.ETCD, "", etcd.NewManager, nil)
- RegisterEngine(models.MongoDB, "", mongodb.NewManager, mongodb.NewCommands)
- RegisterEngine(models.PolarDBX, "", polardbx.NewManager, mysql.NewCommands)
- RegisterEngine(models.PostgreSQL, "", vanillapostgres.NewManager, postgres.NewCommands)
- RegisterEngine(models.VanillaPostgreSQL, "", vanillapostgres.NewManager, postgres.NewCommands)
- RegisterEngine(models.ApecloudPostgreSQL, "", apecloudpostgres.NewManager, postgres.NewCommands)
+ EngineRegister(models.WeSQL, wesql.NewManager, mysql.NewCommands)
+ EngineRegister(models.MySQL, mysql.NewManager, mysql.NewCommands)
+ EngineRegister(models.Redis, redis.NewManager, redis.NewCommands)
+ EngineRegister(models.ETCD, etcd.NewManager, nil)
+ EngineRegister(models.MongoDB, mongodb.NewManager, mongodb.NewCommands)
+ EngineRegister(models.PolarDBX, polardbx.NewManager, mysql.NewCommands)
+ EngineRegister(models.PostgreSQL, vanillapostgres.NewManager, postgres.NewCommands)
+ EngineRegister(models.VanillaPostgreSQL, vanillapostgres.NewManager, postgres.NewCommands)
+ EngineRegister(models.ApecloudPostgreSQL, apecloudpostgres.NewManager, postgres.NewCommands)
+ EngineRegister(models.FoxLake, nil, foxlake.NewCommands)
+ EngineRegister(models.Nebula, nil, nebula.NewCommands)
+ EngineRegister(models.PulsarProxy, nil, pulsar.NewProxyCommands)
+ EngineRegister(models.PulsarBroker, nil, pulsar.NewBrokerCommands)
+ EngineRegister(models.Oracle, nil, oracle.NewCommands)
+ EngineRegister(models.OpenGauss, nil, opengauss.NewCommands)
}
-func RegisterEngine(characterType models.EngineType, workloadType string, newFunc managerNewFunc, newCommand engines.NewCommandFunc) {
- key := strings.ToLower(string(characterType) + "_" + workloadType)
- managerNewFuncs[key] = newFunc
+func EngineRegister(characterType models.EngineType, newFunc ManagerNewFunc, newCommand engines.NewCommandFunc) {
+ key := strings.ToLower(string(characterType))
+ managerNewFunctions[key] = newFunc
engines.NewCommandFuncs[string(characterType)] = newCommand
}
-func GetManagerNewFunc(characterType, workloadType string) managerNewFunc {
- key := strings.ToLower(characterType + "_" + workloadType)
- return managerNewFuncs[key]
+func GetManagerNewFunc(characterType string) ManagerNewFunc {
+ key := strings.ToLower(characterType)
+ return managerNewFunctions[key]
}
func SetDBManager(manager engines.DBManager) {
@@ -118,37 +104,20 @@ func NewClusterCommands(typeName string) (engines.ClusterCommands, error) {
return newFunc(), nil
}
-func InitDBManager(configDir string) error {
+func InitDBManager(engineType string) error {
if dbManager != nil {
return nil
}
-
- ctrl.Log.Info("Initialize DB manager")
- workloadType := viper.GetString(constant.KBEnvWorkloadType)
- if workloadType == "" {
- ctrl.Log.Info(constant.KBEnvWorkloadType + " ENV not set")
- }
-
- engineType := viper.GetString(constant.KBEnvEngineType)
- if viper.IsSet(constant.KBEnvBuiltinHandler) && engineType == "" {
- workloadType = ""
- engineType = viper.GetString(constant.KBEnvBuiltinHandler)
- }
if engineType == "" {
- return errors.New("engine typpe not set")
- }
-
- err := GetAllComponent(configDir) // find all builtin config file and read
- if err != nil { // Handle errors reading the config file
- return errors.Wrap(err, "fatal error config file")
+ return errors.New("engine type not set")
}
- properties := GetProperties(engineType)
- newFunc := GetManagerNewFunc(engineType, workloadType)
+ ctrl.Log.Info("Initialize DB manager")
+ newFunc := GetManagerNewFunc(engineType)
if newFunc == nil {
- return errors.Errorf("no db manager for characterType %s and workloadType %s", engineType, workloadType)
+ return errors.Errorf("no db manager for engine %s", engineType)
}
- mgr, err := newFunc(properties)
+ mgr, err := newFunc()
if err != nil {
return err
}
@@ -156,57 +125,3 @@ func InitDBManager(configDir string) error {
dbManager = mgr
return nil
}
-
-type Component struct {
- Name string
- Spec ComponentSpec
-}
-
-type ComponentSpec struct {
- Version string
- Metadata []kv
-}
-
-type kv struct {
- Name string
- Value string
-}
-
-var Name2Property = map[string]engines.Properties{}
-
-func readConfig(filename string) (string, engines.Properties, error) {
- viper.SetConfigType("yaml")
- viper.SetConfigFile(filename)
- if err := viper.ReadInConfig(); err != nil {
- return "", nil, err
- }
- component := &Component{}
- if err := viper.Unmarshal(component); err != nil {
- return "", nil, err
- }
- properties := make(engines.Properties)
- properties["version"] = component.Spec.Version
- for _, pair := range component.Spec.Metadata {
- properties[pair.Name] = pair.Value
- }
- return component.Name, properties, nil
-}
-
-func GetAllComponent(dir string) error {
- files, err := afero.ReadDir(fs, dir)
- if err != nil {
- return err
- }
- for _, file := range files {
- name, properties, err := readConfig(dir + "/" + file.Name())
- if err != nil {
- return err
- }
- Name2Property[name] = properties
- }
- return nil
-}
-
-func GetProperties(name string) engines.Properties {
- return Name2Property[name]
-}
diff --git a/engines/register/managers_test.go b/engines/register/managers_test.go
index 9795850..d17f926 100644
--- a/engines/register/managers_test.go
+++ b/engines/register/managers_test.go
@@ -21,103 +21,19 @@ package register
import (
"fmt"
- "os"
"testing"
"github.com/spf13/afero"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
- "github.com/apecloud/dbctl/constant"
"github.com/apecloud/dbctl/engines"
)
const (
- fakeCharacterType = "fake-db"
- fakeWrongContent = "wrong"
- fakeConfigContent = `
-name: fake-db
-spec:
- version: v1
- metadata:
- - name: url # Required
- value: "user=test password=test host=localhost"`
- fakeConfigFile = "/fake-config-file"
- fakeConfigDir = "fake-dir"
+ fakeEngine = "fake-db"
)
-func TestReadConfig(t *testing.T) {
- fs = afero.NewMemMapFs()
- viper.SetFs(fs)
- defer func() {
- fs = afero.NewOsFs()
- viper.Reset()
- }()
-
- t.Run("viper read in config failed", func(t *testing.T) {
- name, property, err := readConfig(fakeConfigFile)
- assert.NotNil(t, err)
- assert.Nil(t, property)
- assert.Equal(t, "", name)
- })
-
- file, err := fs.Create(fakeConfigFile)
- assert.Nil(t, err)
- _, err = file.WriteString(fakeConfigContent)
- assert.Nil(t, err)
- _ = file.Close()
-
- t.Run("read config successfully", func(t *testing.T) {
- name, property, err := readConfig(fakeConfigFile)
- assert.Nil(t, err)
- assert.Equal(t, fakeCharacterType, name)
- assert.Equal(t, "user=test password=test host=localhost", property["url"])
- })
-}
-
-func TestGetAllComponent(t *testing.T) {
- fs = afero.NewMemMapFs()
- viper.SetFs(fs)
- defer func() {
- fs = afero.NewOsFs()
- viper.Reset()
- }()
-
- t.Run("read dir failed", func(t *testing.T) {
- err := GetAllComponent(fakeConfigDir)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "file does not exist")
- })
-
- err := fs.Mkdir(fakeConfigDir, os.ModeDir)
- assert.Nil(t, err)
- file, err := fs.Create(fakeConfigDir + fakeConfigFile)
- assert.Nil(t, err)
- _, err = file.WriteString(fakeWrongContent)
- assert.Nil(t, err)
- _ = file.Close()
-
- t.Run("read config failed", func(t *testing.T) {
- err = GetAllComponent(fakeConfigDir)
- assert.NotNil(t, err)
- })
-
- err = fs.Remove(fakeConfigDir + fakeConfigFile)
- assert.Nil(t, err)
- file, err = fs.Create(fakeConfigDir + fakeConfigFile)
- assert.Nil(t, err)
- _, err = file.WriteString(fakeConfigContent)
- _ = file.Close()
-
- t.Run("get all component successfully", func(t *testing.T) {
- err = GetAllComponent(fakeConfigDir)
- assert.Nil(t, err)
-
- property := GetProperties(fakeCharacterType)
- assert.Equal(t, "user=test password=test host=localhost", property["url"])
- })
-}
-
func TestInitDBManager(t *testing.T) {
fs = afero.NewMemMapFs()
viper.SetFs(fs)
@@ -127,10 +43,9 @@ func TestInitDBManager(t *testing.T) {
viper.Reset()
dbManager = realDBManager
}()
- configDir := fakeConfigDir
t.Run("characterType not set", func(t *testing.T) {
- err := InitDBManager(configDir)
+ err := InitDBManager("")
assert.NotNil(t, err)
// assert.ErrorContains(t, err, "KB_SERVICE_CHARACTER_TYPE not set")
@@ -139,41 +54,22 @@ func TestInitDBManager(t *testing.T) {
// assert.ErrorContains(t, err, "no db manager")
})
- viper.Set(constant.KBEnvBuiltinHandler, fakeCharacterType)
- t.Run("get all component failed", func(t *testing.T) {
- err := InitDBManager(configDir)
-
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "fatal error config file")
- _, err = GetDBManager()
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "no db manager")
- })
-
- err := fs.Mkdir(fakeConfigDir, os.ModeDir)
- assert.Nil(t, err)
- file, err := fs.Create(fakeConfigDir + fakeConfigFile)
- assert.Nil(t, err)
- _, err = file.WriteString(fakeConfigContent)
- assert.Nil(t, err)
- _ = file.Close()
-
t.Run("new func nil", func(t *testing.T) {
- err = InitDBManager(configDir)
+ err := InitDBManager(fakeEngine)
assert.NotNil(t, err)
- assert.ErrorContains(t, err, "no db manager for characterType fake-db and workloadType ")
+ assert.ErrorContains(t, err, "no db manager for engine fake-db")
_, err = GetDBManager()
assert.NotNil(t, err)
assert.ErrorContains(t, err, "no db manager")
})
- fakeNewFunc := func(engines.Properties) (engines.DBManager, error) {
+ fakeNewFunc := func() (engines.DBManager, error) {
return nil, fmt.Errorf("some error")
}
- RegisterEngine(fakeCharacterType, "", fakeNewFunc, nil)
+ EngineRegister(fakeEngine, fakeNewFunc, nil)
t.Run("new func failed", func(t *testing.T) {
- err = InitDBManager(configDir)
+ err := InitDBManager(fakeEngine)
assert.NotNil(t, err)
assert.ErrorContains(t, err, "some error")
@@ -182,14 +78,14 @@ func TestInitDBManager(t *testing.T) {
assert.ErrorContains(t, err, "no db manager")
})
- fakeNewFunc = func(engines.Properties) (engines.DBManager, error) {
+ fakeNewFunc = func() (engines.DBManager, error) {
return &engines.MockManager{}, nil
}
- RegisterEngine(fakeCharacterType, "", fakeNewFunc, func() engines.ClusterCommands {
+ EngineRegister(fakeEngine, fakeNewFunc, func() engines.ClusterCommands {
return nil
})
t.Run("new func successfully", func(t *testing.T) {
- err = InitDBManager(configDir)
+ err := InitDBManager(fakeEngine)
assert.Nil(t, err)
_, err = GetDBManager()
@@ -198,17 +94,17 @@ func TestInitDBManager(t *testing.T) {
SetDBManager(&engines.MockManager{})
t.Run("db manager exists", func(t *testing.T) {
- err = InitDBManager(configDir)
+ err := InitDBManager(fakeEngine)
assert.Nil(t, err)
_, err = GetDBManager()
assert.Nil(t, err)
})
t.Run("new cluster command", func(t *testing.T) {
- _, err = NewClusterCommands("")
+ _, err := NewClusterCommands("")
assert.NotNil(t, err)
assert.ErrorContains(t, err, "unsupported engine type: ")
- _, err = NewClusterCommands(fakeCharacterType)
+ _, err = NewClusterCommands(fakeEngine)
assert.Nil(t, err)
})
}
diff --git a/engines/util.go b/engines/util.go
deleted file mode 100644
index 4907bd3..0000000
--- a/engines/util.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package engines
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-const (
- // types for probe
- CheckRunningType int = iota
- CheckStatusType
- CheckRoleChangedType
-)
-
-func MaxInt64(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
-}
-
-func GetIndex(memberName string) (int, error) {
- i := strings.LastIndex(memberName, "-")
- if i < 0 {
- return 0, fmt.Errorf("the format of member name is wrong: %s", memberName)
- }
- return strconv.Atoi(memberName[i+1:])
-}
-
-func AddSingleQuote(str string) string {
- return "'" + str + "'"
-}
-
-type Properties map[string]string
diff --git a/engines/utils.go b/engines/utils.go
new file mode 100644
index 0000000..2a936ad
--- /dev/null
+++ b/engines/utils.go
@@ -0,0 +1,5 @@
+package engines
+
+func AddSingleQuote(str string) string {
+ return "'" + str + "'"
+}
diff --git a/engines/wesql/config.go b/engines/wesql/config.go
index f4026e1..9997229 100644
--- a/engines/wesql/config.go
+++ b/engines/wesql/config.go
@@ -29,8 +29,8 @@ type Config struct {
var config *Config
-func NewConfig(properties map[string]string) (*Config, error) {
- mysqlConfig, err := mysql.NewConfig(properties)
+func NewConfig() (*Config, error) {
+ mysqlConfig, err := mysql.NewConfig()
if err != nil {
return nil, err
}
diff --git a/engines/wesql/config_test.go b/engines/wesql/config_test.go
index d7f4ce0..e7d217b 100644
--- a/engines/wesql/config_test.go
+++ b/engines/wesql/config_test.go
@@ -23,33 +23,11 @@ import (
"testing"
"github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/engines"
-)
-
-var (
- fakeProperties = engines.Properties{
- "url": "root:@tcp(127.0.0.1:3306)/mysql?multiStatements=true",
- "maxOpenConns": "5",
- }
- fakePropertiesWithWrongURL = engines.Properties{
- "url": "root:@tcp(127.0.0.1:3306)mysql",
- }
- fakePropertiesWithWrongPem = engines.Properties{
- "pemPath": "fake-path",
- }
)
func TestNewConfig(t *testing.T) {
- t.Run("new config failed", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakePropertiesWithWrongPem)
-
- assert.Nil(t, fakeConfig)
- assert.NotNil(t, err)
- })
-
t.Run("new config successfully", func(t *testing.T) {
- fakeConfig, err := NewConfig(fakeProperties)
+ fakeConfig, err := NewConfig()
assert.NotNil(t, fakeConfig)
assert.Nil(t, err)
diff --git a/engines/wesql/conn.go b/engines/wesql/conn.go
deleted file mode 100644
index 7b2f364..0000000
--- a/engines/wesql/conn.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package wesql
-
-import (
- "database/sql"
- "strings"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
-)
-
-// GetDBConnWithMember retrieves a database connection for a specific member of a cluster.
-func (mgr *Manager) GetDBConnWithMember(cluster *dcs.Cluster, member *dcs.Member) (db *sql.DB, err error) {
- if member != nil && member.Name != mgr.CurrentMemberName {
- addr := cluster.GetMemberAddrWithPort(*member)
- db, err = config.GetDBConnWithAddr(addr)
- if err != nil {
- return nil, errors.Wrap(err, "new db connection failed")
- }
- } else {
- db = mgr.DB
- }
- return db, nil
-}
-
-// GetLeaderConn retrieves a database connection to the leader member of a cluster.
-func (mgr *Manager) GetLeaderConn(cluster *dcs.Cluster) (*sql.DB, error) {
- leaderMember := cluster.GetLeaderMember()
- if leaderMember == nil {
- mgr.Logger.Info("Get leader from db cluster local")
- leaderMember = mgr.GetLeaderMember(cluster)
- }
- if leaderMember == nil {
- return nil, errors.New("the cluster has no leader")
- }
- return mgr.GetDBConnWithMember(cluster, leaderMember)
-}
-
-// GetLeaderMember retrieves the leader member of a cluster
-func (mgr *Manager) GetLeaderMember(cluster *dcs.Cluster) *dcs.Member {
- clusterLocalInfo, err := mgr.GetClusterLocalInfo()
- if err != nil || clusterLocalInfo == nil {
- mgr.Logger.Error(err, "Get cluster local info failed")
- return nil
- }
-
- leaderAddr := clusterLocalInfo.GetString("CURRENT_LEADER")
- if leaderAddr == "" {
- return nil
- }
- leaderParts := strings.Split(leaderAddr, ".")
- if len(leaderParts) > 0 {
- return cluster.GetMemberWithName(leaderParts[0])
- }
-
- return nil
-}
diff --git a/engines/wesql/conn_test.go b/engines/wesql/conn_test.go
deleted file mode 100644
index 620e3de..0000000
--- a/engines/wesql/conn_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package wesql
-
-import (
- "fmt"
- "testing"
-
- "github.com/DATA-DOG/go-sqlmock"
- "github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/dcs"
-)
-
-func TestGetDBConnWithMember(t *testing.T) {
- manager, _, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
- }
-
- t.Run("new db connection failed", func(t *testing.T) {
- _, _ = NewConfig(fakePropertiesWithWrongURL)
- db, err := manager.GetDBConnWithMember(cluster, &dcs.Member{})
-
- assert.Nil(t, db)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "new db connection failed")
- })
-
- t.Run("return current member connection", func(t *testing.T) {
- db, err := manager.GetDBConnWithMember(cluster, nil)
-
- assert.NotNil(t, db)
- assert.Nil(t, err)
- assert.Equal(t, db, manager.DB)
- })
-}
-
-func TestGetLeaderMember(t *testing.T) {
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {
- Name: fakePodName,
- },
- },
- }
-
- t.Run("Get cluster local info failed", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- leaderMember := manager.GetLeaderMember(cluster)
- assert.Nil(t, leaderMember)
- })
-
- t.Run("leader addr is empty", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER"}).AddRow(""))
-
- leaderMember := manager.GetLeaderMember(cluster)
- assert.Nil(t, leaderMember)
- })
-
- t.Run("get leader member success", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER"}).AddRow(fakePodName + ".test-wesql.headless"))
-
- leaderMember := manager.GetLeaderMember(cluster)
- assert.NotNil(t, leaderMember)
- assert.Equal(t, fakePodName, leaderMember.Name)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestGetLeaderConn(t *testing.T) {
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
- }
-
- t.Run("the cluster has no leader", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- db, err := manager.GetLeaderConn(cluster)
- assert.Nil(t, db)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "the cluster has no leader")
- })
-
- t.Run("get leader conn successfully", func(t *testing.T) {
- _, _ = NewConfig(fakeProperties)
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER"}).AddRow(fakePodName + ".test-wesql.headless"))
- cluster.Members = []dcs.Member{
- {
- Name: fakePodName,
- },
- }
-
- db, err := manager.GetLeaderConn(cluster)
- assert.NotNil(t, db)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/wesql/get_replica_role.go b/engines/wesql/get_replica_role.go
index 6068b59..4155d71 100644
--- a/engines/wesql/get_replica_role.go
+++ b/engines/wesql/get_replica_role.go
@@ -24,8 +24,6 @@ import (
"fmt"
"github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/engines/mysql"
)
func (mgr *Manager) GetReplicaRole(ctx context.Context) (string, error) {
@@ -58,18 +56,3 @@ func (mgr *Manager) GetReplicaRole(ctx context.Context) (string, error) {
}
return "", errors.Errorf("exec sql %s failed: no data returned", sql)
}
-
-func (mgr *Manager) GetClusterLocalInfo() (mysql.RowMap, error) {
- var result mysql.RowMap
- sql := "select * from information_schema.wesql_cluster_local;"
- err := mysql.QueryRowsMap(mgr.DB, sql, func(rMap mysql.RowMap) error {
- result = rMap
- return nil
- })
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("error executing %s", sql))
- return nil, err
- }
- return result, nil
-
-}
diff --git a/engines/wesql/get_replica_role_test.go b/engines/wesql/get_replica_role_test.go
index 1625cf2..22ccb32 100644
--- a/engines/wesql/get_replica_role_test.go
+++ b/engines/wesql/get_replica_role_test.go
@@ -75,33 +75,3 @@ func TestGetRole(t *testing.T) {
t.Errorf("there were unfulfilled expectations: %v", err)
}
}
-
-func TestGetClusterLocalInfo(t *testing.T) {
- manager, mock, _ := mockDatabase(t)
-
- t.Run("error executing sql", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- clusterLocalInfo, err := manager.GetClusterLocalInfo()
- assert.Nil(t, clusterLocalInfo)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("get cluster local info successfully", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "leader", "1"))
-
- clusterLocalInfo, err := manager.GetClusterLocalInfo()
- assert.NotNil(t, clusterLocalInfo)
- assert.Nil(t, err)
- assert.Equal(t, "test-wesql-0", clusterLocalInfo.GetString("CURRENT_LEADER"))
- assert.Equal(t, "leader", clusterLocalInfo.GetString("ROLE"))
- assert.Equal(t, "1", clusterLocalInfo.GetString("SERVER_ID"))
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/engines/wesql/manager.go b/engines/wesql/manager.go
index 7979e54..aabf28b 100644
--- a/engines/wesql/manager.go
+++ b/engines/wesql/manager.go
@@ -20,39 +20,26 @@ along with this program. If not, see .
package wesql
import (
- "context"
- "database/sql"
- "fmt"
- "strings"
-
- "github.com/pkg/errors"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
"github.com/apecloud/dbctl/engines/mysql"
)
-const (
- Role = "ROLE"
- CurrentLeader = "CURRENT_LEADER"
- Leader = "Leader"
-)
-
type Manager struct {
mysql.Manager
}
var _ engines.DBManager = &Manager{}
-func NewManager(properties engines.Properties) (engines.DBManager, error) {
+func NewManager() (engines.DBManager, error) {
logger := ctrl.Log.WithName("WeSQL")
- _, err := NewConfig(properties)
+ _, err := NewConfig()
if err != nil {
return nil, err
}
- mysqlMgr, err := mysql.NewManager(properties)
+ mysqlMgr, err := mysql.NewManager()
if err != nil {
return nil, err
}
@@ -64,245 +51,3 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) {
mgr.SetLogger(logger)
return mgr, nil
}
-
-func (mgr *Manager) InitializeCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) IsLeader(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
- role, err := mgr.GetReplicaRole(ctx)
-
- if err != nil {
- return false, err
- }
-
- if strings.EqualFold(role, Leader) {
- return true, nil
- }
-
- return false, nil
-}
-
-func (mgr *Manager) IsLeaderMember(_ context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, error) {
- if member == nil {
- return false, nil
- }
-
- leaderMember := mgr.GetLeaderMember(cluster)
- if leaderMember == nil {
- return false, nil
- }
-
- if leaderMember.Name != member.Name {
- return false, nil
- }
-
- return true, nil
-}
-
-func (mgr *Manager) InitiateCluster(_ *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) GetMemberAddrs(ctx context.Context, cluster *dcs.Cluster) []string {
- addrs := make([]string, 0, 3)
- clusterInfo := mgr.GetClusterInfo(ctx, cluster)
- clusterInfo = strings.Split(clusterInfo, "@")[0]
- for _, addr := range strings.Split(clusterInfo, ";") {
- if !strings.Contains(addr, ":") {
- continue
- }
- addrs = append(addrs, strings.Split(addr, "#")[0])
- }
-
- return addrs
-}
-
-func (mgr *Manager) GetAddrWithMemberName(ctx context.Context, cluster *dcs.Cluster, memberName string) string {
- addrs := mgr.GetMemberAddrs(ctx, cluster)
- for _, addr := range addrs {
- if strings.HasPrefix(addr, memberName) {
- return addr
- }
- }
- return ""
-}
-
-func (mgr *Manager) IsCurrentMemberInCluster(ctx context.Context, cluster *dcs.Cluster) bool {
- clusterInfo := mgr.GetClusterInfo(ctx, cluster)
- return strings.Contains(clusterInfo, mgr.CurrentMemberName)
-}
-
-func (mgr *Manager) IsMemberLagging(context.Context, *dcs.Cluster, *dcs.Member) (bool, int64) {
- return false, 0
-}
-
-func (mgr *Manager) Recover(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error {
- return nil
-}
-
-func (mgr *Manager) LeaveMemberFromCluster(ctx context.Context, cluster *dcs.Cluster, memberName string) error {
- db, err := mgr.GetLeaderConn(cluster)
- if err != nil {
- mgr.Logger.Error(err, "Get leader conn failed")
- return err
- }
- addr := mgr.GetAddrWithMemberName(ctx, cluster, memberName)
- if addr == "" {
- mgr.Logger.Info(fmt.Sprintf("member %s already deleted", memberName))
- return nil
- }
-
- sql := fmt.Sprintf("call dbms_consensus.downgrade_follower('%s');"+
- "call dbms_consensus.drop_learner('%s');", addr, addr)
- _, err = db.ExecContext(ctx, sql)
- if err != nil {
- mgr.Logger.Error(err, "delete member from db cluster failed")
- return errors.Wrapf(err, "error executing %s", sql)
- }
- return nil
-}
-
-func (mgr *Manager) IsClusterHealthy(_ context.Context, cluster *dcs.Cluster) bool {
- db, err := mgr.GetLeaderConn(cluster)
- if err != nil {
- mgr.Logger.Error(err, "Get leader conn failed")
- return false
- }
-
- var leaderRecord mysql.RowMap
- sql := "select * from information_schema.wesql_cluster_global;"
- err = mysql.QueryRowsMap(db, sql, func(rMap mysql.RowMap) error {
- if rMap.GetString(Role) == Leader {
- leaderRecord = rMap
- }
- return nil
- })
- if err != nil {
- mgr.Logger.Error(err, fmt.Sprintf("error executing %s", sql))
- return false
- }
-
- if len(leaderRecord) > 0 {
- return true
- }
- return false
-}
-
-// IsClusterInitialized is a method to check if cluster is initialized or not
-func (mgr *Manager) IsClusterInitialized(ctx context.Context, _ *dcs.Cluster) (bool, error) {
- clusterInfo := mgr.GetClusterInfo(ctx, nil)
- if clusterInfo != "" {
- return true, nil
- }
-
- return false, nil
-}
-
-func (mgr *Manager) GetClusterInfo(ctx context.Context, cluster *dcs.Cluster) string {
- var db *sql.DB
- var err error
- if cluster != nil {
- db, err = mgr.GetLeaderConn(cluster)
- if err != nil {
- mgr.Logger.Error(err, "Get leader conn failed")
- return ""
- }
- } else {
- db = mgr.DB
-
- }
- var clusterID, clusterInfo string
- err = db.QueryRowContext(ctx, "select cluster_id, cluster_info from mysql.consensus_info").
- Scan(&clusterID, &clusterInfo)
- if err != nil {
- mgr.Logger.Error(err, "Cluster info query failed")
- }
- return clusterInfo
-}
-
-func (mgr *Manager) Promote(ctx context.Context, cluster *dcs.Cluster) error {
- isLeader, _ := mgr.IsLeader(ctx, nil)
- if isLeader {
- return nil
- }
-
- db, err := mgr.GetLeaderConn(cluster)
- if err != nil {
- return errors.Wrap(err, "Get leader conn failed")
- }
-
- addr := mgr.GetAddrWithMemberName(ctx, cluster, mgr.CurrentMemberName)
- if addr == "" {
- return errors.New("get current member's addr failed")
- }
- resp, err := db.Exec(fmt.Sprintf("call dbms_consensus.change_leader('%s');", addr))
- if err != nil {
- return err
- }
-
- mgr.Logger.Info("promote success", "resp", resp)
- return nil
-}
-
-func (mgr *Manager) IsPromoted(ctx context.Context) bool {
- isLeader, _ := mgr.IsLeader(ctx, nil)
- return isLeader
-}
-
-func (mgr *Manager) Demote(context.Context) error {
- return nil
-}
-
-func (mgr *Manager) Follow(_ context.Context, cluster *dcs.Cluster) error {
- mgr.Logger.Info("current member still follow the leader", "leader name", cluster.Leader.Name)
- return nil
-}
-
-func (mgr *Manager) GetHealthiestMember(*dcs.Cluster, string) *dcs.Member {
- return nil
-}
-
-func (mgr *Manager) HasOtherHealthyLeader(_ context.Context, cluster *dcs.Cluster) *dcs.Member {
- clusterLocalInfo, err := mgr.GetClusterLocalInfo()
- if err != nil || clusterLocalInfo == nil {
- mgr.Logger.Error(err, "Get cluster local info failed")
- return nil
- }
-
- if clusterLocalInfo.GetString(Role) == Leader {
- // I am the leader, just return nil
- return nil
- }
-
- leaderAddr := clusterLocalInfo.GetString(CurrentLeader)
- if leaderAddr == "" {
- return nil
- }
- leaderParts := strings.Split(leaderAddr, ".")
- if len(leaderParts) > 0 {
- return cluster.GetMemberWithName(leaderParts[0])
- }
-
- return nil
-}
-
-// HasOtherHealthyMembers checks if there are any healthy members, excluding the leader
-func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
- members := make([]*dcs.Member, 0)
- for _, member := range cluster.Members {
- if member.Name == leader {
- continue
- }
- if !mgr.IsMemberHealthy(ctx, cluster, &member) {
- continue
- }
- members = append(members, &member)
- }
-
- return members
-}
diff --git a/engines/wesql/manager_test.go b/engines/wesql/manager_test.go
index 78836e0..f3a4319 100644
--- a/engines/wesql/manager_test.go
+++ b/engines/wesql/manager_test.go
@@ -20,18 +20,12 @@ along with this program. If not, see .
package wesql
import (
- "context"
- "database/sql"
- "fmt"
"testing"
"github.com/DATA-DOG/go-sqlmock"
- "github.com/spf13/viper"
"github.com/stretchr/testify/assert"
ctrl "sigs.k8s.io/controller-runtime"
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
"github.com/apecloud/dbctl/engines"
"github.com/apecloud/dbctl/engines/mysql"
)
@@ -39,7 +33,6 @@ import (
const (
fakePodName = "test-wesql-0"
fakeClusterCompName = "test-wesql"
- fakeNamespace = "fake-namespace"
)
func mockDatabase(t *testing.T) (*Manager, sqlmock.Sqlmock, error) {
@@ -48,7 +41,6 @@ func mockDatabase(t *testing.T) (*Manager, sqlmock.Sqlmock, error) {
DBManagerBase: engines.DBManagerBase{
CurrentMemberName: fakePodName,
ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
Logger: ctrl.Log.WithName("WeSQL-TEST"),
},
},
@@ -64,481 +56,10 @@ func mockDatabase(t *testing.T) (*Manager, sqlmock.Sqlmock, error) {
}
func TestNewManager(t *testing.T) {
- t.Run("new config failed", func(t *testing.T) {
- manager, err := NewManager(fakePropertiesWithWrongPem)
-
- assert.Nil(t, manager)
- assert.NotNil(t, err)
- })
-
- t.Run("new mysql manager failed", func(t *testing.T) {
- manager, err := NewManager(fakeProperties)
-
- assert.Nil(t, manager)
- assert.NotNil(t, err)
- })
-
- viper.Set(constant.KBEnvPodName, fakePodName)
- defer viper.Reset()
t.Run("new manger successfully", func(t *testing.T) {
- manager, err := NewManager(fakeProperties)
+ manager, err := NewManager()
assert.Nil(t, err)
assert.NotNil(t, manager)
})
}
-
-func TestIsLeader(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("get role failed", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.False(t, isLeader)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("get role leader", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "leader", "1"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.True(t, isLeader)
- assert.Nil(t, err)
- })
-
- t.Run("get role follower", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-1", "follower", "2"))
-
- isLeader, err := manager.IsLeader(ctx, nil)
- assert.False(t, isLeader)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsLeaderMember(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {
- Name: "test-wesql-1",
- },
- },
- }
-
- t.Run("member is nil", func(t *testing.T) {
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, nil)
- assert.False(t, isLeaderMember)
- assert.Nil(t, err)
- })
-
- member := &dcs.Member{
- Name: fakePodName,
- }
- t.Run("leader member is nil", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, member)
- assert.False(t, isLeaderMember)
- assert.Nil(t, err)
- })
-
- t.Run("member is not Leader member", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER"}).AddRow("test-wesql-1.test-wesql.headless"))
-
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, member)
- assert.False(t, isLeaderMember)
- assert.Nil(t, err)
- })
-
- cluster.Members = append(cluster.Members, *member)
- t.Run("member is Leader member", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER"}).AddRow(fakePodName + ".test-wesql.headless"))
-
- isLeaderMember, err := manager.IsLeaderMember(ctx, cluster, member)
- assert.True(t, isLeaderMember)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestGetClusterInfo(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("Get leader conn failed", func(t *testing.T) {
- clusterInfo := manager.GetClusterInfo(ctx, &dcs.Cluster{})
- assert.Empty(t, clusterInfo)
- })
-
- t.Run("get cluster info failed", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnError(fmt.Errorf("some error"))
-
- clusterInfo := manager.GetClusterInfo(ctx, nil)
- assert.Empty(t, clusterInfo)
- })
-
- t.Run("get cluster info success", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1"))
-
- clusterInfo := manager.GetClusterInfo(ctx, nil)
- assert.Equal(t, "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1", clusterInfo)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestGetMemberAddrs(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306;test-wesql-2.test-wesql-headless@1"))
-
- addrs := manager.GetMemberAddrs(ctx, nil)
- assert.Equal(t, 2, len(addrs))
- assert.Equal(t, "test-wesql-0.test-wesql-headless:13306", addrs[0])
- assert.Equal(t, "test-wesql-1.test-wesql-headless:13306", addrs[1])
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestGetAddrWithMemberName(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- memberNames := []string{"test-wesql-0", "test-wesql-2"}
- expectAddrs := []string{"test-wesql-0.test-wesql-headless:13306", ""}
- for i, name := range memberNames {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306;@1"))
-
- addr := manager.GetAddrWithMemberName(ctx, nil, name)
- assert.Equal(t, expectAddrs[i], addr)
- }
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsCurrentMemberInCluster(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1"))
-
- assert.True(t, manager.IsCurrentMemberInCluster(ctx, nil))
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_LeaveMemberFromCluster(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- ClusterCompName: fakeClusterCompName,
- Namespace: fakeNamespace,
- }
- memberName := fakePodName
-
- t.Run("Get leader conn failed", func(t *testing.T) {
- err := manager.LeaveMemberFromCluster(ctx, cluster, memberName)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "the cluster has no leader")
- })
-
- cluster.Leader = &dcs.Leader{Name: fakePodName}
- cluster.Members = []dcs.Member{{Name: fakePodName}}
- t.Run("member already deleted", func(t *testing.T) {
- err := manager.LeaveMemberFromCluster(ctx, cluster, memberName)
- assert.Nil(t, err)
- })
-
- t.Run("delete member from db cluster failed", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1"))
- mock.ExpectExec("call dbms_consensus").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.LeaveMemberFromCluster(ctx, cluster, memberName)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("delete member successfully", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1"))
- mock.ExpectExec("call dbms_consensus").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.LeaveMemberFromCluster(ctx, cluster, memberName)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_IsClusterHealthy(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{}
-
- t.Run("Get leader conn failed", func(t *testing.T) {
- isHealthy := manager.IsClusterHealthy(ctx, cluster)
- assert.False(t, isHealthy)
- })
-
- cluster.Leader = &dcs.Leader{Name: fakePodName}
- cluster.Members = []dcs.Member{{Name: fakePodName}}
- t.Run("get wesql cluster information failed", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- isHealthy := manager.IsClusterHealthy(ctx, cluster)
- assert.False(t, isHealthy)
- })
-
- t.Run("check cluster healthy status successfully", func(t *testing.T) {
- roles := []string{Leader, "Follow"}
- expectedRes := []bool{true, false}
-
- for i, role := range roles {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{Role}).AddRow(role))
-
- isHealthy := manager.IsClusterHealthy(ctx, cluster)
- assert.Equal(t, expectedRes[i], isHealthy)
- }
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsClusterInitialized(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- t.Run("cluster is initialized", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).
- AddRow("1", "test-wesql-0.test-wesql-headless:13306;test-wesql-1.test-wesql-headless:13306@1"))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.True(t, isInitialized)
- assert.Nil(t, err)
- })
-
- t.Run("cluster is not initialized", func(t *testing.T) {
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}))
-
- isInitialized, err := manager.IsClusterInitialized(ctx, nil)
- assert.False(t, isInitialized)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestIsPromoted(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "leader", "1"))
-
- assert.True(t, manager.IsPromoted(ctx))
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestHasOtherHealthyLeader(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{
- Members: []dcs.Member{},
- }
-
- t.Run("Get cluster local info failed", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnError(fmt.Errorf("some error"))
-
- member := manager.HasOtherHealthyLeader(ctx, cluster)
- assert.Nil(t, member)
- })
-
- t.Run("current member is leader", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE"}).AddRow(fakePodName, Leader))
-
- member := manager.HasOtherHealthyLeader(ctx, cluster)
- assert.Nil(t, member)
- })
-
- t.Run("leader addr is empty", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE"}).AddRow("", "follow"))
-
- member := manager.HasOtherHealthyLeader(ctx, cluster)
- assert.Nil(t, member)
- })
-
- t.Run("member is not in the cluster", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE"}).AddRow(fakePodName, "follow"))
-
- member := manager.HasOtherHealthyLeader(ctx, cluster)
- assert.Nil(t, member)
- })
-
- cluster.Members = append(cluster.Members, dcs.Member{
- Name: fakePodName,
- })
- t.Run("get other healthy leader", func(t *testing.T) {
- mock.ExpectQuery("select *").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE"}).AddRow(fakePodName+".test-wesql-headless", "follow"))
-
- member := manager.HasOtherHealthyLeader(ctx, cluster)
- assert.NotNil(t, member)
- assert.Equal(t, fakePodName, member.Name)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestHasOtherHealthyMembers(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
-
- cluster := &dcs.Cluster{
- Members: []dcs.Member{
- {
- Name: "fake-pod-0",
- },
- {
- Name: "fake-pod-1",
- },
- {
- Name: fakePodName,
- },
- },
- }
- mock.ExpectQuery("select check_ts from kubeblocks.kb_health_check where type=1 limit 1").
- WillReturnError(sql.ErrNoRows)
- _, _ = NewConfig(fakeProperties)
-
- members := manager.HasOtherHealthyMembers(ctx, cluster, "fake-pod-0")
- assert.Len(t, members, 1)
- assert.Equal(t, fakePodName, members[0].Name)
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
-
-func TestManager_Promote(t *testing.T) {
- ctx := context.TODO()
- manager, mock, _ := mockDatabase(t)
- cluster := &dcs.Cluster{}
-
- t.Run("current member is leader", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "leader", "1"))
-
- err := manager.Promote(ctx, cluster)
- assert.Nil(t, err)
- })
-
- t.Run("Get leader conn failed", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "follower", "1"))
-
- err := manager.Promote(ctx, cluster)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "Get leader conn failed")
- })
-
- cluster.Leader = &dcs.Leader{Name: fakePodName}
- cluster.Members = []dcs.Member{{Name: fakePodName}}
- t.Run("get addr failed", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "follower", "1"))
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).AddRow("1", "test-wesql-1.test-wesql-headless:13306;"))
-
- err := manager.Promote(ctx, cluster)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "get current member's addr failed")
- })
-
- t.Run("promote failed", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "follower", "1"))
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).AddRow("1", "test-wesql-0.test-wesql-headless:13306;"))
- mock.ExpectExec("call dbms_consensus").
- WillReturnError(fmt.Errorf("some error"))
-
- err := manager.Promote(ctx, cluster)
- assert.NotNil(t, err)
- assert.ErrorContains(t, err, "some error")
- })
-
- t.Run("promote successfully", func(t *testing.T) {
- mock.ExpectQuery("select CURRENT_LEADER, ROLE, SERVER_ID from information_schema.wesql_cluster_local").
- WillReturnRows(sqlmock.NewRows([]string{"CURRENT_LEADER", "ROLE", "SERVER_ID"}).AddRow("test-wesql-0", "follower", "1"))
- mock.ExpectQuery("select cluster_id, cluster_info from mysql.consensus_info").
- WillReturnRows(sqlmock.NewRows([]string{"cluster_id", "cluster_info"}).AddRow("1", "test-wesql-0.test-wesql-headless:13306;"))
- mock.ExpectExec("call dbms_consensus").
- WillReturnResult(sqlmock.NewResult(1, 1))
-
- err := manager.Promote(ctx, cluster)
- assert.Nil(t, err)
- })
-
- if err := mock.ExpectationsWereMet(); err != nil {
- t.Errorf("there were unfulfilled expectations: %v", err)
- }
-}
diff --git a/go.mod b/go.mod
index 74048ff..f5d8a55 100644
--- a/go.mod
+++ b/go.mod
@@ -6,9 +6,7 @@ toolchain go1.23.10
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
- github.com/IBM/sarama v1.43.2
github.com/apecloud/kubeblocks v0.9.0
- github.com/cenkalti/backoff/v4 v4.2.1
github.com/fasthttp/router v1.4.20
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
@@ -29,17 +27,12 @@ require (
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.50.0
- github.com/xdg-go/scram v1.1.2
go.etcd.io/etcd/client/v3 v3.5.14
go.etcd.io/etcd/server/v3 v3.5.14
go.mongodb.org/mongo-driver v1.15.1
go.uber.org/automaxprocs v1.5.2
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
- golang.org/x/oauth2 v0.18.0
k8s.io/api v0.29.0
- k8s.io/apimachinery v0.29.0
- k8s.io/client-go v12.0.0+incompatible
k8s.io/klog/v2 v2.120.1
sigs.k8s.io/controller-runtime v0.17.2
)
@@ -48,6 +41,7 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@@ -55,9 +49,6 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/eapache/go-resiliency v1.6.0 // indirect
- github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect
- github.com/eapache/queue v1.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
@@ -83,20 +74,12 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
- github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/imdario/mergo v0.3.14 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
- github.com/jcmturner/aescts/v2 v2.0.0 // indirect
- github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
- github.com/jcmturner/gofork v1.7.6 // indirect
- github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
- github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jonboulle/clockwork v0.3.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -109,14 +92,12 @@ require (
github.com/montanaflynn/stats v0.6.6 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
- github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@@ -131,6 +112,7 @@ require (
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
+ github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
@@ -151,7 +133,9 @@ require (
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
+ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/net v0.40.0 // indirect
+ golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
@@ -171,6 +155,8 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.30.1 // indirect
+ k8s.io/apimachinery v0.29.0 // indirect
+ k8s.io/client-go v12.0.0+incompatible // indirect
k8s.io/component-base v0.29.0 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect
diff --git a/go.sum b/go.sum
index 80d2bc2..5f588fb 100644
--- a/go.sum
+++ b/go.sum
@@ -5,8 +5,6 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
-github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw=
-github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@@ -44,12 +42,6 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30=
-github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
-github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws=
-github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
-github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -62,8 +54,6 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fasthttp/router v1.4.20 h1:yPeNxz5WxZGojzolKqiP15DTXnxZce9Drv577GBrDgU=
github.com/fasthttp/router v1.4.20/go.mod h1:um867yNQKtERxBm+C+yzgWxjspTiQoA8z86Ec3fK/tc=
-github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -133,8 +123,6 @@ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwg
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
-github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -146,14 +134,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
-github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
-github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo=
@@ -168,18 +148,6 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
-github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
-github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
-github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
-github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
-github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
-github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
-github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
-github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
-github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
-github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
-github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
-github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg=
github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -227,8 +195,6 @@ github.com/pashagolub/pgxmock/v2 v2.12.0 h1:IVRmQtVFNCoq7NOZ+PdfvB6fwnLJmEuWDhnc
github.com/pashagolub/pgxmock/v2 v2.12.0/go.mod h1:D3YslkN/nJ4+umVqWmbwfSXugJIjPMChkGBG47OJpNw=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
-github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
-github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -253,8 +219,6 @@ github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5E
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
-github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.5.3 h1:fOAp1/uJG+ZtcITgZOfYFmTKPE7n4Vclj1wZFgRciUU=
github.com/redis/go-redis/v9 v9.5.3/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -268,8 +232,6 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee h1:8Iv5m6xEo1NR1AvpV+7XmhI4r39LGNzwUL4YpMuL5vk=
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g=
-github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
-github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
@@ -387,7 +349,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -409,7 +370,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -418,8 +378,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -454,14 +412,12 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -469,7 +425,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@@ -557,8 +512,6 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k=
-k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0=
diff --git a/hack/docgen/main.go b/hack/docgen/main.go
index 87af1db..f33ab67 100644
--- a/hack/docgen/main.go
+++ b/hack/docgen/main.go
@@ -36,7 +36,9 @@ func genMarkdownTreeForOverview(cmd *cobra.Command, dir string) error {
if err != nil {
return err
}
- defer f.Close()
+ defer func() {
+ _ = f.Close()
+ }()
if _, err = io.WriteString(f, `---
title: KubeBlocks dbctl Overview
diff --git a/httpserver/apis.go b/httpserver/apis.go
index f1fa1f6..18d3b1a 100644
--- a/httpserver/apis.go
+++ b/httpserver/apis.go
@@ -138,7 +138,11 @@ func OperationWrapper(op operations.Operation) fasthttp.RequestHandler {
if resp == nil {
respond(reqCtx, withEmpty())
} else {
- body, _ = json.Marshal(resp.Data)
+ if resp.Role != "" {
+ body = []byte(resp.Role)
+ } else {
+ body, _ = json.Marshal(resp.Data)
+ }
respond(reqCtx, withMetadata(resp.Metadata), withJSON(statusCode, body))
}
}
diff --git a/httpserver/config.go b/httpserver/config.go
index 448fc5f..7e3a03e 100644
--- a/httpserver/config.go
+++ b/httpserver/config.go
@@ -37,7 +37,7 @@ var config Config
var logger = ctrl.Log.WithName("HTTPServer")
func InitFlags(fs *pflag.FlagSet) {
- fs.IntVar(&config.Port, "port", 3501, "The HTTP Server listen port for Lorry service.")
- fs.StringVar(&config.Address, "address", "0.0.0.0", "The HTTP Server listen address for Lorry service.")
- fs.BoolVar(&config.APILogging, "api-logging", true, "Enable api logging for Lorry request.")
+ fs.IntVar(&config.Port, "port", 5001, "The HTTP Server listen port for dbctl service.")
+ fs.StringVar(&config.Address, "address", "0.0.0.0", "The HTTP Server listen address for dbctl service.")
+ fs.BoolVar(&config.APILogging, "api-logging", true, "Enable api logging for dbctl request.")
}
diff --git a/httpserver/server.go b/httpserver/server.go
index 07246f1..7702124 100644
--- a/httpserver/server.go
+++ b/httpserver/server.go
@@ -32,7 +32,7 @@ import (
"github.com/apecloud/dbctl/operations"
)
-// Server is an interface for the Lorry HTTP server.
+// Server is an interface for the dbctl HTTP server.
type Server interface {
io.Closer
Router() fasthttp.RequestHandler
@@ -67,7 +67,7 @@ func (s *server) StartNonBlocking() error {
var listeners []net.Listener
if s.config.UnixDomainSocket != "" {
- socket := fmt.Sprintf("%s/lorry.socket", s.config.UnixDomainSocket)
+ socket := fmt.Sprintf("%s/dbctl.socket", s.config.UnixDomainSocket)
l, err := net.Listen("unix", socket)
if err != nil {
return err
diff --git a/httpserver/server_test.go b/httpserver/server_test.go
index 0f2be35..5537f21 100644
--- a/httpserver/server_test.go
+++ b/httpserver/server_test.go
@@ -141,14 +141,14 @@ func TestRouter(t *testing.T) {
assert.Equal(t, "operation exec failed: fake do error", response.Message)
})
- t.Run("return meta data", func(t *testing.T) {
- ctx := mockHTTPRequest("/v1.0/fake-6", fasthttp.MethodPost, `{"data": "test"}`)
- fakeRouterHandler(ctx)
-
- assert.Equal(t, fasthttp.StatusOK, ctx.Response.StatusCode())
- assert.Equal(t, string(ctx.Response.Body()), `{"data":"InRlc3Qi"}`)
- assert.Equal(t, []byte("fake"), ctx.Response.Header.Peek("KB.fake-meta"))
- })
+ //t.Run("return meta data", func(t *testing.T) {
+ // ctx := mockHTTPRequest("/v1.0/fake-6", fasthttp.MethodPost, `{"data": "test"}`)
+ // fakeRouterHandler(ctx)
+ //
+ // assert.Equal(t, fasthttp.StatusOK, ctx.Response.StatusCode())
+ // assert.Equal(t, string(ctx.Response.Body()), `{"data":"InRlc3Qi"}`)
+ // assert.Equal(t, []byte("fake"), ctx.Response.Header.Peek("KB.fake-meta"))
+ //})
}
func TestStartNonBlocking(t *testing.T) {
diff --git a/operations/component/post_provision.go b/operations/component/post_provision.go
deleted file mode 100644
index 2d249fe..0000000
--- a/operations/component/post_provision.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package component
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/engines/models"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type PostProvision struct {
- operations.Base
- Timeout time.Duration
- Command []string
-}
-
-type PostProvisionManager interface {
- PostProvision(ctx context.Context, componentNames, podNames, podIPs, podHostNames, podHostIPs string) error
-}
-
-var postProvision operations.Operation = &PostProvision{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.PostProvisionOperation)), postProvision)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *PostProvision) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- componentNames := req.GetString("componentNames")
- podNames := req.GetString("podNames")
- podIPs := req.GetString("podIPs")
- podHostNames := req.GetString("podHostNames")
- podHostIPs := req.GetString("podHostIPs")
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "get manager failed")
- }
-
- ppManager, ok := manager.(PostProvisionManager)
- if !ok {
- return nil, models.ErrNotImplemented
- }
- err = ppManager.PostProvision(ctx, componentNames, podNames, podIPs, podHostNames, podHostIPs)
- return nil, err
-}
diff --git a/operations/component/pre_terminate.go b/operations/component/pre_terminate.go
deleted file mode 100644
index fb50dab..0000000
--- a/operations/component/pre_terminate.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package component
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/engines/models"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type PreTerminate struct {
- operations.Base
- Timeout time.Duration
-}
-
-type PreTerminateManager interface {
- PreTerminate(ctx context.Context) error
-}
-
-var preTerminate operations.Operation = &PreTerminate{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.PreTerminateOperation)), preTerminate)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *PreTerminate) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "get manager failed")
- }
-
- ptManager, ok := manager.(PreTerminateManager)
- if !ok {
- return nil, models.ErrNotImplemented
- }
- err = ptManager.PreTerminate(ctx)
- return nil, err
-}
diff --git a/operations/register/register.go b/operations/register/register.go
index 9c53a31..6daa917 100644
--- a/operations/register/register.go
+++ b/operations/register/register.go
@@ -21,11 +21,8 @@ package register
import (
"github.com/apecloud/dbctl/operations"
- _ "github.com/apecloud/dbctl/operations/component"
_ "github.com/apecloud/dbctl/operations/replica"
_ "github.com/apecloud/dbctl/operations/sql"
- _ "github.com/apecloud/dbctl/operations/user"
- _ "github.com/apecloud/dbctl/operations/volume"
)
func Register(name string, op operations.Operation) error {
diff --git a/operations/replica/checkrole.go b/operations/replica/checkrole.go
deleted file mode 100644
index 8a75363..0000000
--- a/operations/replica/checkrole.go
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- "github.com/spf13/viper"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-// AccessMode defines SVC access mode enums.
-// +enum
-type AccessMode string
-
-type CheckRole struct {
- operations.Base
- logger logr.Logger
- dcsStore dcs.DCS
- OriRole string
- CheckRoleFailedCount int
- FailedEventReportFrequency int
- Timeout time.Duration
- DBRoles map[string]AccessMode
- Command []string
-}
-
-var checkrole operations.Operation = &CheckRole{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.CheckRoleOperation)), checkrole)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *CheckRole) Init(ctx context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
-
- s.logger = ctrl.Log.WithName("checkrole")
- val := viper.GetString(constant.KBEnvServiceRoles)
- if val != "" {
- if err := json.Unmarshal([]byte(val), &s.DBRoles); err != nil {
- s.logger.Info("KB_DB_ROLES env format error", "error", err)
- }
- }
-
- s.FailedEventReportFrequency = viper.GetInt("KB_FAILED_EVENT_REPORT_FREQUENCY")
- if s.FailedEventReportFrequency < 300 {
- s.FailedEventReportFrequency = 300
- } else if s.FailedEventReportFrequency > 3600 {
- s.FailedEventReportFrequency = 3600
- }
-
- timeoutSeconds := util.DefaultProbeTimeoutSeconds
- if viper.IsSet(constant.KBEnvRoleProbeTimeout) {
- timeoutSeconds = viper.GetInt(constant.KBEnvRoleProbeTimeout)
- }
- // lorry utilizes the pod readiness probe to trigger role probe and 'timeoutSeconds' is directly copied from the 'probe.timeoutSeconds' field of pod.
- // here we give 80% of the total time to role probe job and leave the remaining 20% to kubelet to handle the readiness probe related tasks.
- s.Timeout = time.Duration(timeoutSeconds) * (800 * time.Millisecond)
- s.OriRole = "waitForStart"
- actionJSON := viper.GetString(constant.KBEnvActionCommands)
- if actionJSON != "" {
- actionCommands := map[string][]string{}
- err := json.Unmarshal([]byte(actionJSON), &actionCommands)
- if err != nil {
- s.logger.Info("get action commands failed", "error", err.Error())
- return err
- }
- roleProbeCmd, ok := actionCommands[constant.RoleProbeAction]
- if ok && len(roleProbeCmd) > 0 {
- s.Command = roleProbeCmd
- }
- }
- return nil
-}
-
-func (s *CheckRole) IsReadonly(ctx context.Context) bool {
- return true
-}
-
-func (s *CheckRole) Do(ctx context.Context, _ *operations.OpsRequest) (*operations.OpsResponse, error) {
- resp := &operations.OpsResponse{
- Data: map[string]any{},
- }
- resp.Data["operation"] = util.CheckRoleOperation
- resp.Data["originalRole"] = s.OriRole
- var role string
- var err error
-
- manager, err1 := register.GetDBManager()
- if err1 != nil {
- return nil, errors.Wrap(err1, "get manager failed")
- }
-
- if !manager.IsDBStartupReady() {
- resp.Data["message"] = "db not ready"
- return resp, nil
- }
-
- ctx1, cancel := context.WithTimeout(ctx, s.Timeout)
- defer cancel()
- role, err = manager.GetReplicaRole(ctx1)
-
- if err != nil {
- s.logger.Info("executing checkRole error", "error", err.Error())
- // do not return err, as it will cause readinessprobe to fail
- err = nil
- if s.CheckRoleFailedCount%s.FailedEventReportFrequency == 0 {
- s.logger.Info("role checks failed continuously", "times", s.CheckRoleFailedCount)
- // if err is not nil, send event through kubelet readinessprobe
- err = util.SentEventForProbe(ctx, resp.Data)
- }
- s.CheckRoleFailedCount++
- return resp, err
- }
-
- s.CheckRoleFailedCount = 0
- if isValid, message := s.roleValidate(role); !isValid {
- resp.Data["message"] = message
- return resp, nil
- }
-
- if s.OriRole == role {
- return nil, nil
- }
-
- resp.Data["role"] = role
- resp.Data["event"] = util.OperationSuccess
- s.OriRole = role
- err = util.SentEventForProbe(ctx, resp.Data)
- return resp, err
-}
-
-// Component may have some internal roles that needn't be exposed to end user,
-// and not configured in cluster definition, e.g. ETCD's Candidate.
-// roleValidate is used to filter the internal roles and decrease the number
-// of report events to reduce the possibility of event conflicts.
-func (s *CheckRole) roleValidate(role string) (bool, string) {
- if role == "" {
- // some time db replica may not have role, e.g. oceanbase
- return true, ""
- }
- // do not validate them when db roles setting is missing
- if len(s.DBRoles) == 0 {
- return true, ""
- }
-
- var msg string
- isValid := false
- for r := range s.DBRoles {
- if strings.EqualFold(r, role) {
- isValid = true
- break
- }
- }
- if !isValid {
- msg = fmt.Sprintf("role %s is not configured in cluster definition %v", role, s.DBRoles)
- }
- return isValid, msg
-}
diff --git a/operations/replica/checkrunning.go b/operations/replica/checkrunning.go
deleted file mode 100644
index ab5a274..0000000
--- a/operations/replica/checkrunning.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- "github.com/spf13/viper"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-// CheckRunning checks whether the binding service is in running status,
-// If check fails continuously, report an event at FailedEventReportFrequency frequency
-type CheckRunning struct {
- operations.Base
- logger logr.Logger
- Timeout time.Duration
- DBAddress string
- CheckRunningFailedCount int
- FailedEventReportFrequency int
-}
-
-var checkrunning operations.Operation = &CheckRunning{}
-
-func init() {
- err := operations.Register("checkrunning", checkrunning)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *CheckRunning) Init(ctx context.Context) error {
- s.FailedEventReportFrequency = viper.GetInt("KB_FAILED_EVENT_REPORT_FREQUENCY")
- if s.FailedEventReportFrequency < 300 {
- s.FailedEventReportFrequency = 300
- } else if s.FailedEventReportFrequency > 3600 {
- s.FailedEventReportFrequency = 3600
- }
-
- timeoutSeconds := util.DefaultProbeTimeoutSeconds
- if viper.IsSet(constant.KBEnvRoleProbeTimeout) {
- timeoutSeconds = viper.GetInt(constant.KBEnvRoleProbeTimeout)
- }
- // lorry utilizes the pod readiness probe to trigger probe and 'timeoutSeconds' is directly copied from the 'probe.timeoutSeconds' field of pod.
- // here we give 80% of the total time to probe job and leave the remaining 20% to kubelet to handle the readiness probe related tasks.
- s.Timeout = time.Duration(timeoutSeconds) * (800 * time.Millisecond)
- s.DBAddress = s.getAddress()
- s.logger = ctrl.Log.WithName("checkrunning")
- return nil
-}
-
-func (s *CheckRunning) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "get manager failed")
- }
-
- var message string
- opsRsp := &operations.OpsResponse{}
- opsRsp.Data["operation"] = util.CheckRunningOperation
-
- dbPort, err := manager.GetPort()
- if err != nil {
- return nil, errors.Wrap(err, "get db port failed")
- }
-
- host := net.JoinHostPort(s.DBAddress, strconv.Itoa(dbPort))
- // sql exec timeout needs to be less than httpget's timeout which by default 1s.
- conn, err := net.DialTimeout("tcp", host, 500*time.Millisecond)
- if err != nil {
- message = fmt.Sprintf("running check %s error", host)
- s.logger.Error(err, message)
- opsRsp.Data["event"] = util.OperationFailed
- opsRsp.Data["message"] = message
- if s.CheckRunningFailedCount%s.FailedEventReportFrequency == 0 {
- s.logger.Info("running checks failed continuously", "times", s.CheckRunningFailedCount)
- // resp.Metadata[StatusCode] = OperationFailedHTTPCode
- err = util.SentEventForProbe(ctx, opsRsp.Data)
- }
- s.CheckRunningFailedCount++
- return opsRsp, err
- }
- defer conn.Close()
- s.CheckRunningFailedCount = 0
- message = "TCP Connection Established Successfully!"
- if tcpCon, ok := conn.(*net.TCPConn); ok {
- err := tcpCon.SetLinger(0)
- s.logger.Error(err, "running check, set tcp linger failed")
- }
- opsRsp.Data["event"] = util.OperationSuccess
- opsRsp.Data["message"] = message
- return opsRsp, nil
-}
-
-// getAddress returns component service address, if component is not listening on
-// 127.0.0.1, the Operation needs to overwrite this function and set ops.DBAddress
-func (s *CheckRunning) getAddress() string {
- return "127.0.0.1"
-}
diff --git a/operations/replica/data_dump.go b/operations/replica/data_dump.go
deleted file mode 100644
index e03f616..0000000
--- a/operations/replica/data_dump.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type dataDump struct {
- operations.Base
- logger logr.Logger
- Command []string
-}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.DataDumpOperation)), &dataDump{})
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *dataDump) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- return nil, doCommonAction(ctx, s.logger, "dataDump", s.Command)
-}
-
-func doCommonAction(ctx context.Context, logger logr.Logger, action string, commands []string) error {
- envs, err := util.GetGlobalSharedEnvs()
- if err != nil {
- return err
- }
- output, err := util.ExecCommand(ctx, commands, envs)
- if output != "" {
- logger.Info(action, "output", output)
- }
- return err
-}
diff --git a/operations/replica/data_load.go b/operations/replica/data_load.go
deleted file mode 100644
index 36050ff..0000000
--- a/operations/replica/data_load.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
-
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type dataLoad struct {
- operations.Base
- logger logr.Logger
- Command []string
-}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.DataLoadOperation)), &dataLoad{})
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *dataLoad) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- return nil, doCommonAction(ctx, s.logger, "dataLoad", s.Command)
-}
diff --git a/operations/replica/getlag.go b/operations/replica/getlag.go
deleted file mode 100644
index 37beb46..0000000
--- a/operations/replica/getlag.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type GetLag struct {
- operations.Base
- dcsStore dcs.DCS
- dbManager engines.DBManager
- logger logr.Logger
-}
-
-var getlag operations.Operation = &GetLag{}
-
-func init() {
- err := operations.Register("getlag", getlag)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *GetLag) Init(context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
-
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
- s.logger = ctrl.Log.WithName("getlag")
- return nil
-}
-
-func (s *GetLag) IsReadonly(context.Context) bool {
- return false
-}
-
-func (s *GetLag) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- sql := req.GetString("sql")
- if sql == "" {
- return nil, errors.New("no sql provided")
- }
-
- resp := &operations.OpsResponse{
- Data: map[string]any{},
- }
- resp.Data["operation"] = util.ExecOperation
- k8sStore := s.dcsStore.(*dcs.KubernetesStore)
- cluster := k8sStore.GetClusterFromCache()
-
- lag, err := s.dbManager.GetLag(ctx, cluster)
- if err != nil {
- s.logger.Info("executing getlag error", "error", err)
- return resp, err
- }
-
- resp.Data["lag"] = lag
- return resp, err
-}
diff --git a/operations/replica/getrole.go b/operations/replica/getrole.go
index aba71bf..795d149 100644
--- a/operations/replica/getrole.go
+++ b/operations/replica/getrole.go
@@ -45,7 +45,7 @@ func init() {
}
}
-func (s *GetRole) Init(ctx context.Context) error {
+func (s *GetRole) Init(context.Context) error {
s.Logger = ctrl.Log.WithName("getrole")
dbManager, err := register.GetDBManager()
if err != nil {
@@ -72,6 +72,6 @@ func (s *GetRole) Do(ctx context.Context, req *operations.OpsRequest) (*operatio
return resp, err
}
- resp.Data["role"] = role
+ resp.Role = role
return resp, err
}
diff --git a/operations/replica/healthcheck.go b/operations/replica/healthcheck.go
deleted file mode 100644
index 366520a..0000000
--- a/operations/replica/healthcheck.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- "github.com/spf13/viper"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type CheckStatus struct {
- operations.Base
- LeaderFailedCount int
- FailureThreshold int
- dcsStore dcs.DCS
- dbManager engines.DBManager
- checkFailedCount int
- failedEventReportFrequency int
- logger logr.Logger
-}
-
-type FailoverManager interface {
- Failover(ctx context.Context, cluster *dcs.Cluster, candidate string) error
-}
-
-var checkstatus operations.Operation = &CheckStatus{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.HealthyCheckOperation)), checkstatus)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *CheckStatus) Init(ctx context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
-
- s.failedEventReportFrequency = viper.GetInt("KB_FAILED_EVENT_REPORT_FREQUENCY")
- if s.failedEventReportFrequency < 300 {
- s.failedEventReportFrequency = 300
- } else if s.failedEventReportFrequency > 3600 {
- s.failedEventReportFrequency = 3600
- }
-
- s.FailureThreshold = 3
- s.logger = ctrl.Log.WithName("checkstatus")
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
-
- return nil
-}
-
-func (s *CheckStatus) IsReadonly(ctx context.Context) bool {
- return true
-}
-
-func (s *CheckStatus) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- resp := &operations.OpsResponse{
- Data: map[string]any{},
- }
- resp.Data["operation"] = util.HealthyCheckOperation
-
- k8sStore := s.dcsStore.(*dcs.KubernetesStore)
- cluster := k8sStore.GetClusterFromCache()
- err := s.dbManager.CurrentMemberHealthyCheck(ctx, cluster)
- if err != nil {
- return s.handlerError(ctx, err)
- }
-
- isLeader, err := s.dbManager.IsLeader(ctx, cluster)
- if err != nil {
- return s.handlerError(ctx, err)
- }
-
- if isLeader {
- s.LeaderFailedCount = 0
- s.checkFailedCount = 0
- resp.Data["event"] = util.OperationSuccess
- return resp, nil
- }
- err = s.dbManager.LeaderHealthyCheck(ctx, cluster)
- if err != nil {
- s.LeaderFailedCount++
- if s.LeaderFailedCount > s.FailureThreshold {
- err = s.failover(ctx, cluster)
- if err != nil {
- return s.handlerError(ctx, err)
- }
- }
- return s.handlerError(ctx, err)
- }
- s.LeaderFailedCount = 0
- s.checkFailedCount = 0
- resp.Data["event"] = util.OperationSuccess
- return resp, nil
-}
-
-func (s *CheckStatus) failover(ctx context.Context, cluster *dcs.Cluster) error {
- failoverManger, ok := s.dbManager.(FailoverManager)
- if !ok {
- return errors.New("failover manager not found")
- }
- err := failoverManger.Failover(ctx, cluster, s.dbManager.GetCurrentMemberName())
- if err != nil {
- return errors.Wrap(err, "failover failed")
- }
- return nil
-}
-
-func (s *CheckStatus) handlerError(ctx context.Context, err error) (*operations.OpsResponse, error) {
- resp := &operations.OpsResponse{
- Data: map[string]any{},
- }
- message := err.Error()
- s.logger.Info("healthy checks failed", "error", message)
- resp.Data["event"] = util.OperationFailed
- resp.Data["message"] = message
- if s.checkFailedCount%s.failedEventReportFrequency == 0 {
- s.logger.Info("healthy checks failed continuously", "times", s.checkFailedCount)
- _ = util.SentEventForProbe(ctx, resp.Data)
- }
- s.checkFailedCount++
- return resp, err
-}
diff --git a/operations/replica/join.go b/operations/replica/join.go
deleted file mode 100644
index f6aef5d..0000000
--- a/operations/replica/join.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type Join struct {
- operations.Base
- dcsStore dcs.DCS
- logger logr.Logger
- Timeout time.Duration
- Command []string
-}
-
-var join operations.Operation = &Join{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.JoinMemberOperation)), join)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *Join) Init(ctx context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
-
- return nil
-}
-
-func (s *Join) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "get manager failed")
- }
-
- cluster, err := s.dcsStore.GetCluster()
- if err != nil {
- s.logger.Error(err, "get cluster failed")
- return nil, err
- }
-
- // join current member to db cluster
- err = manager.JoinCurrentMemberToCluster(ctx, cluster)
- if err != nil {
- s.logger.Error(err, "join member to cluster failed")
- return nil, err
- }
-
- return nil, nil
-}
diff --git a/operations/replica/leave.go b/operations/replica/leave.go
deleted file mode 100644
index 89150b5..0000000
--- a/operations/replica/leave.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type Leave struct {
- operations.Base
- dcsStore dcs.DCS
- logger logr.Logger
- Timeout time.Duration
- Command []string
-}
-
-var leave operations.Operation = &Leave{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.LeaveMemberOperation)), leave)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *Leave) Init(ctx context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
- return nil
-}
-
-func (s *Leave) Do(ctx context.Context, _ *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "get manager failed")
- }
-
- cluster, err := s.dcsStore.GetCluster()
- if err != nil {
- s.logger.Error(err, "get cluster failed")
- return nil, err
- }
-
- currentMember := cluster.GetMemberWithName(manager.GetCurrentMemberName())
- if !cluster.HaConfig.IsDeleting(currentMember) {
- cluster.HaConfig.AddMemberToDelete(currentMember)
- _ = s.dcsStore.UpdateHaConfig()
- }
-
- // remove current member from db cluster
- err = manager.LeaveMemberFromCluster(ctx, cluster, manager.GetCurrentMemberName())
- if err != nil {
- s.logger.Error(err, "Leave member from cluster failed")
- return nil, err
- }
-
- if cluster.HaConfig.IsDeleting(currentMember) {
- cluster.HaConfig.FinishDeleted(currentMember)
- _ = s.dcsStore.UpdateHaConfig()
- }
-
- return nil, nil
-}
diff --git a/operations/replica/switchover.go b/operations/replica/switchover.go
deleted file mode 100644
index 47e822c..0000000
--- a/operations/replica/switchover.go
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package replica
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type Switchover struct {
- operations.Base
- dcsStore dcs.DCS
-}
-
-type SwitchoverManager interface {
- Switchover(ctx context.Context, cluster *dcs.Cluster, primary, candidate string, force bool) error
-}
-
-var switchover operations.Operation = &Switchover{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.SwitchoverOperation)), switchover)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *Switchover) Init(_ context.Context) error {
- s.dcsStore = dcs.GetStore()
- if s.dcsStore == nil {
- return errors.New("dcs store init failed")
- }
-
- return nil
-}
-
-func (s *Switchover) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- primary := req.GetString("primary")
- candidate := req.GetString("candidate")
- if primary == "" && candidate == "" {
- return errors.New("primary or candidate must be set")
- }
-
- cluster, err := s.dcsStore.GetCluster()
- if cluster == nil {
- return errors.Wrap(err, "get cluster failed")
- }
-
- manager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
-
- if cluster.HaConfig == nil || !cluster.HaConfig.IsEnable() {
- return errors.New("cluster's ha is disabled")
- }
-
- if primary != "" {
- leaderMember := cluster.GetMemberWithName(primary)
- if leaderMember == nil {
- message := fmt.Sprintf("primary %s not exists", primary)
- return errors.New(message)
- }
-
- if ok, err := manager.IsLeaderMember(ctx, cluster, leaderMember); err != nil || !ok {
- message := fmt.Sprintf("%s is not the primary", primary)
- return errors.New(message)
- }
- }
-
- if candidate != "" {
- candidateMember := cluster.GetMemberWithName(candidate)
- if candidateMember == nil {
- message := fmt.Sprintf("candidate %s not exists", candidate)
- return errors.New(message)
- }
-
- if !manager.IsMemberHealthy(ctx, cluster, candidateMember) {
- message := fmt.Sprintf("candidate %s is unhealthy", candidate)
- return errors.New(message)
- }
- } else if len(manager.HasOtherHealthyMembers(ctx, cluster, primary)) == 0 {
- return errors.New("candidate is not set and has no other healthy members")
- }
-
- return nil
-}
-
-func (s *Switchover) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- primary := req.GetString("primary")
- candidate := req.GetString("candidate")
- // force := req.GetBool("force")
- // if swManager, ok := manager.(SwitchoverManager); ok {
- // cluster, err := s.dcsStore.GetCluster()
- // if cluster == nil {
- // return nil, errors.Wrap(err, "get cluster failed")
- // }
-
- // err = swManager.Switchover(ctx, cluster, primary, candidate, force)
- // return nil, err
- // }
-
- err := s.dcsStore.CreateSwitchover(primary, candidate)
- if err != nil {
- message := fmt.Sprintf("Create switchover failed: %v", err)
- return nil, errors.New(message)
- }
-
- return nil, nil
-}
diff --git a/operations/types.go b/operations/types.go
index 212395b..befee66 100644
--- a/operations/types.go
+++ b/operations/types.go
@@ -32,8 +32,8 @@ type OpsRequest struct {
}
func (r *OpsRequest) GetString(key string) string {
- value, ok := r.Parameters[key]
- if ok {
+ value, exist := r.Parameters[key]
+ if exist {
val, ok := value.(string)
if ok {
return val
@@ -43,8 +43,8 @@ func (r *OpsRequest) GetString(key string) string {
}
func (r *OpsRequest) GetBool(key string) bool {
- value, ok := r.Parameters[key]
- if ok {
+ value, exist := r.Parameters[key]
+ if exist {
val, ok := value.(bool)
if ok {
return val
@@ -55,6 +55,7 @@ func (r *OpsRequest) GetBool(key string) bool {
// OpsResponse is the response for Operation
type OpsResponse struct {
+ Role string `json:"role,omitempty"`
Data map[string]any `json:"data,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
}
diff --git a/operations/user/create.go b/operations/user/create.go
deleted file mode 100644
index 610c200..0000000
--- a/operations/user/create.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/models"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type CreateUser struct {
- operations.Base
- DBManager engines.DBManager
- logger logr.Logger
-}
-
-var createUser operations.Operation = &CreateUser{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.CreateUserOp)), createUser)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *CreateUser) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.DBManager = dbManager
- s.logger = ctrl.Log.WithName("CreateUser")
- return nil
-}
-
-func (s *CreateUser) IsReadonly(ctx context.Context) bool {
- return false
-}
-
-func (s *CreateUser) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- userInfo, err := UserInfoParser(req)
- if err != nil {
- return err
- }
-
- return userInfo.UserNameAndPasswdValidator()
-}
-
-func (s *CreateUser) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- userInfo, _ := UserInfoParser(req)
- resp := operations.NewOpsResponse(util.CreateUserOp)
-
- user, err := s.DBManager.DescribeUser(ctx, userInfo.UserName)
- if err == nil && user != nil {
- return resp.WithSuccess("account already exists")
- }
-
- // for compatibility with old addons that specify accoutprovision action but not work actually.
- err = s.DBManager.CreateUser(ctx, userInfo.UserName, userInfo.Password)
- if err != nil {
- err = errors.Cause(err)
- s.logger.Info("executing CreateUser error", "error", err.Error())
- return resp, err
- }
-
- if userInfo.RoleName != "" {
- err := s.DBManager.GrantUserRole(ctx, userInfo.UserName, userInfo.RoleName)
- if err != nil && err != models.ErrNotImplemented {
- s.logger.Info("executing grantRole error", "error", err.Error())
- return resp, err
- }
- }
-
- return resp.WithSuccess("")
-}
diff --git a/operations/user/delete.go b/operations/user/delete.go
deleted file mode 100644
index 5da854c..0000000
--- a/operations/user/delete.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type DeleteUser struct {
- operations.Base
- dbManager engines.DBManager
- logger logr.Logger
-}
-
-var deleteUser operations.Operation = &DeleteUser{}
-
-func init() {
- err := operations.Register("deleteuser", deleteUser)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *DeleteUser) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
- s.logger = ctrl.Log.WithName("DeleteUser")
- return nil
-}
-
-func (s *DeleteUser) IsReadonly(ctx context.Context) bool {
- return false
-}
-
-func (s *DeleteUser) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- userInfo, err := UserInfoParser(req)
- if err != nil {
- return err
- }
-
- return userInfo.UserNameValidator()
-}
-
-func (s *DeleteUser) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- userInfo, _ := UserInfoParser(req)
- resp := operations.NewOpsResponse(util.DeleteUserOp)
-
- err := s.dbManager.DeleteUser(ctx, userInfo.UserName)
- if err != nil {
- s.logger.Info("executing DeleteUser error", "error", err)
- return resp, err
- }
-
- return resp.WithSuccess("")
-}
diff --git a/operations/user/describe.go b/operations/user/describe.go
deleted file mode 100644
index 3a51803..0000000
--- a/operations/user/describe.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/models"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type DescribeUser struct {
- operations.Base
- dbManager engines.DBManager
- logger logr.Logger
-}
-
-var describeUser operations.Operation = &DescribeUser{}
-
-func init() {
- err := operations.Register("describeuser", describeUser)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *DescribeUser) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
- s.logger = ctrl.Log.WithName("describeUser")
- return nil
-}
-
-func (s *DescribeUser) IsReadonly(ctx context.Context) bool {
- return true
-}
-
-func (s *DescribeUser) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- userInfo, err := UserInfoParser(req)
- if err != nil {
- return err
- }
-
- return userInfo.UserNameValidator()
-}
-
-func (s *DescribeUser) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- userInfo, _ := UserInfoParser(req)
- resp := operations.NewOpsResponse(util.DescribeUserOp)
-
- result, err := s.dbManager.DescribeUser(ctx, userInfo.UserName)
- if err != nil {
- s.logger.Info("executing describeUser error", "error", err)
- return resp, err
- }
-
- resp.Data["user"] = result
- return resp.WithSuccess("")
-}
-
-func UserInfoParser(req *operations.OpsRequest) (*models.UserInfo, error) {
- user := &models.UserInfo{}
- if req == nil || req.Parameters == nil {
- return nil, fmt.Errorf("no Parameters provided")
- } else if jsonData, err := json.Marshal(req.Parameters); err != nil {
- return nil, err
- } else if err = json.Unmarshal(jsonData, user); err != nil {
- return nil, err
- }
- return user, nil
-}
diff --git a/operations/user/describe_test.go b/operations/user/describe_test.go
deleted file mode 100644
index 450f781..0000000
--- a/operations/user/describe_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/apecloud/dbctl/operations"
-)
-
-func TestUserInfoParser(t *testing.T) {
- req := &operations.OpsRequest{
- Parameters: map[string]interface{}{
- "userName": "john",
- "age": 30,
- },
- }
-
- user, err := UserInfoParser(req)
- assert.Nil(t, err)
- assert.Equal(t, "john", user.UserName)
-}
diff --git a/operations/user/grant_role.go b/operations/user/grant_role.go
deleted file mode 100644
index d403a82..0000000
--- a/operations/user/grant_role.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type GrantRole struct {
- operations.Base
- dbManager engines.DBManager
- logger logr.Logger
-}
-
-var grantRole operations.Operation = &GrantRole{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.GrantUserRoleOp)), grantRole)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *GrantRole) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
- s.logger = ctrl.Log.WithName("grantRole")
- return nil
-}
-
-func (s *GrantRole) IsReadonly(ctx context.Context) bool {
- return false
-}
-
-func (s *GrantRole) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- userInfo, err := UserInfoParser(req)
- if err != nil {
- return err
- }
-
- return userInfo.UserNameValidator()
-}
-
-func (s *GrantRole) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- userInfo, _ := UserInfoParser(req)
- resp := operations.NewOpsResponse(util.GrantUserRoleOp)
-
- err := s.dbManager.GrantUserRole(ctx, userInfo.UserName, userInfo.RoleName)
- if err != nil {
- s.logger.Info("executing grantRole error", "error", err)
- return resp, err
- }
-
- return resp.WithSuccess("")
-}
diff --git a/operations/user/list.go b/operations/user/list.go
deleted file mode 100644
index f219168..0000000
--- a/operations/user/list.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type ListUsers struct {
- operations.Base
- DBManager engines.DBManager
- logger logr.Logger
-}
-
-var listusers operations.Operation = &ListUsers{}
-
-func init() {
- err := operations.Register("listusers", listusers)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *ListUsers) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.DBManager = dbManager
- s.logger = ctrl.Log.WithName("listusers")
- return nil
-}
-
-func (s *ListUsers) IsReadonly(ctx context.Context) bool {
- return true
-}
-
-func (s *ListUsers) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- resp := operations.NewOpsResponse(util.ListUsersOp)
-
- result, err := s.DBManager.ListUsers(ctx)
- if err != nil {
- s.logger.Info("executing listusers error", "error", err)
- return resp, err
- }
-
- resp.Data["users"] = result
- return resp.WithSuccess("")
-}
diff --git a/operations/user/list_system_accounts.go b/operations/user/list_system_accounts.go
deleted file mode 100644
index d319b6f..0000000
--- a/operations/user/list_system_accounts.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type ListSystemAccounts struct {
- operations.Base
- DBManager engines.DBManager
- logger logr.Logger
-}
-
-var listSystemAccounts operations.Operation = &ListSystemAccounts{}
-
-func init() {
- err := operations.Register("listsystemaccounts", listSystemAccounts)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *ListSystemAccounts) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.DBManager = dbManager
- s.logger = ctrl.Log.WithName("listSystemAccounts")
- return nil
-}
-
-func (s *ListSystemAccounts) IsReadonly(ctx context.Context) bool {
- return true
-}
-
-func (s *ListSystemAccounts) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- resp := operations.NewOpsResponse(util.ListSystemAccountsOp)
-
- result, err := s.DBManager.ListSystemAccounts(ctx)
- if err != nil {
- s.logger.Info("executing ListSystemAccounts error", "error", err)
- return resp, err
- }
-
- resp.Data["systemAccounts"] = result
- return resp.WithSuccess("")
-}
diff --git a/operations/user/revoke_role.go b/operations/user/revoke_role.go
deleted file mode 100644
index 0906c71..0000000
--- a/operations/user/revoke_role.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package user
-
-import (
- "context"
- "strings"
-
- "github.com/go-logr/logr"
- "github.com/pkg/errors"
- ctrl "sigs.k8s.io/controller-runtime"
-
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type RevokeRole struct {
- operations.Base
- dbManager engines.DBManager
- logger logr.Logger
-}
-
-var revokeRole operations.Operation = &RevokeRole{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.RevokeUserRoleOp)), revokeRole)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *RevokeRole) Init(ctx context.Context) error {
- dbManager, err := register.GetDBManager()
- if err != nil {
- return errors.Wrap(err, "get manager failed")
- }
- s.dbManager = dbManager
- s.logger = ctrl.Log.WithName("revokeRole")
- return nil
-}
-
-func (s *RevokeRole) IsReadonly(ctx context.Context) bool {
- return false
-}
-
-func (s *RevokeRole) PreCheck(ctx context.Context, req *operations.OpsRequest) error {
- userInfo, err := UserInfoParser(req)
- if err != nil {
- return err
- }
-
- return userInfo.UserNameAndRoleValidator()
-}
-
-func (s *RevokeRole) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- userInfo, _ := UserInfoParser(req)
- resp := operations.NewOpsResponse(util.RevokeUserRoleOp)
-
- err := s.dbManager.RevokeUserRole(ctx, userInfo.UserName, userInfo.RoleName)
- if err != nil {
- s.logger.Info("executing RevokeRole error", "error", err)
- return resp, err
- }
-
- return resp.WithSuccess("")
-}
diff --git a/operations/volume/lock.go b/operations/volume/lock.go
deleted file mode 100644
index 5cca235..0000000
--- a/operations/volume/lock.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package volume
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type Lock struct {
- operations.Base
- Timeout time.Duration
- Command []string
-}
-
-var lock operations.Operation = &Lock{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.LockOperation)), lock)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *Lock) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "Get DB manager failed")
- }
-
- err = manager.Lock(ctx, "disk full")
- if err != nil {
- return nil, errors.Wrap(err, "Lock DB failed")
- }
-
- return nil, nil
-}
diff --git a/operations/volume/suite_test.go b/operations/volume/suite_test.go
deleted file mode 100644
index 50b2554..0000000
--- a/operations/volume/suite_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package volume
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- "github.com/golang/mock/gomock"
- "github.com/spf13/viper"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
-
- "github.com/apecloud/dbctl/constant"
- "github.com/apecloud/dbctl/dcs"
- "github.com/apecloud/dbctl/engines"
- "github.com/apecloud/dbctl/engines/register"
-)
-
-var (
- dbManager engines.DBManager
- mockDBManager *engines.MockDBManager
- dcsStore dcs.DCS
- mockDCSStore *dcs.MockDCS
-)
-
-func init() {
- viper.AutomaticEnv()
- viper.SetDefault(constant.KBEnvPodName, "pod-test")
- viper.SetDefault(constant.KBEnvClusterCompName, "cluster-component-test")
- viper.SetDefault(constant.KBEnvNamespace, "namespace-test")
- ctrl.SetLogger(zap.New())
-}
-
-func TestVolumeOperations(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Volume Operations. Suite")
-}
-
-var _ = BeforeSuite(func() {
- // Init mock db manager
- InitMockDBManager()
-
- // Init mock dcs store
- InitMockDCSStore()
-})
-
-var _ = AfterSuite(func() {
-})
-
-func InitMockDBManager() {
- ctrl := gomock.NewController(GinkgoT())
- mockDBManager = engines.NewMockDBManager(ctrl)
- register.SetDBManager(mockDBManager)
- dbManager = mockDBManager
-}
-
-func InitMockDCSStore() {
- ctrl := gomock.NewController(GinkgoT())
- mockDCSStore = dcs.NewMockDCS(ctrl)
- mockDCSStore.EXPECT().GetClusterFromCache().Return(&dcs.Cluster{}).AnyTimes()
- dcs.SetStore(mockDCSStore)
- dcsStore = mockDCSStore
-}
diff --git a/operations/volume/unlock.go b/operations/volume/unlock.go
deleted file mode 100644
index 6bab09a..0000000
--- a/operations/volume/unlock.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package volume
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/apecloud/dbctl/engines/register"
- "github.com/apecloud/dbctl/operations"
- "github.com/apecloud/dbctl/util"
-)
-
-type Unlock struct {
- operations.Base
- Timeout time.Duration
- Command []string
-}
-
-var unlock operations.Operation = &Unlock{}
-
-func init() {
- err := operations.Register(strings.ToLower(string(util.UnlockOperation)), unlock)
- if err != nil {
- panic(err.Error())
- }
-}
-
-func (s *Unlock) Do(ctx context.Context, req *operations.OpsRequest) (*operations.OpsResponse, error) {
- manager, err := register.GetDBManager()
- if err != nil {
- return nil, errors.Wrap(err, "Get DB manager failed")
- }
-
- err = manager.Unlock(ctx)
- if err != nil {
- return nil, errors.Wrap(err, "Unlock DB failed")
- }
-
- return nil, nil
-}
diff --git a/util/command.go b/util/command.go
deleted file mode 100644
index 0a73f5f..0000000
--- a/util/command.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package util
-
-import (
- "context"
- "os"
- "os/exec"
- "strings"
-
- "github.com/pkg/errors"
- "k8s.io/apimachinery/pkg/util/sets"
-
- "github.com/apecloud/dbctl/constant"
-)
-
-func ExecCommand(ctx context.Context, command []string, envs []string) (string, error) {
- if len(command) == 0 {
- return "", errors.New("command can not be empty")
- }
- cmd := exec.CommandContext(ctx, command[0], command[1:]...)
- cmd.Env = envs
- bytes, err := cmd.Output()
- if exitErr, ok := err.(*exec.ExitError); ok {
- err = errors.New(string(exitErr.Stderr))
- }
- return string(bytes), err
-}
-
-func GetGlobalSharedEnvs() ([]string, error) {
- envSetRequired := sets.New(
- constant.KBEnvPodFQDN,
- constant.KBEnvServicePort,
- constant.KBEnvServiceUser,
- constant.KBEnvServicePassword,
- )
- envSetGot := sets.KeySet(map[string]string{})
- envs := make([]string, 0, envSetRequired.Len())
- Es := os.Environ()
- for _, env := range Es {
- keys := strings.SplitN(env, "=", 2)
- if len(keys) != 2 {
- continue
- }
- if envSetRequired.Has(keys[0]) {
- envs = append(envs, env)
- envSetGot.Insert(keys[0])
- }
- }
- // if len(envs) != envSetRequired.Len() {
- // return nil, errors.Errorf("%v envs is unset", sets.List(envSetRequired.Difference(envSetGot)))
- // }
-
- return envs, nil
-}
diff --git a/util/config/decode.go b/util/config/decode.go
index 864ad2e..3d0d669 100644
--- a/util/config/decode.go
+++ b/util/config/decode.go
@@ -87,7 +87,7 @@ func decodeString(f reflect.Type, t reflect.Type, data any) (any, error) {
if t.Implements(typeStringDecoder) {
result = reflect.New(t.Elem()).Interface()
decoder = result.(StringDecoder)
- } else if reflect.PtrTo(t).Implements(typeStringDecoder) {
+ } else if reflect.PointerTo(t).Implements(typeStringDecoder) {
result = reflect.New(t).Interface()
decoder = result.(StringDecoder)
}
diff --git a/util/event.go b/util/event.go
deleted file mode 100644
index 34a0358..0000000
--- a/util/event.go
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package util
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "time"
-
- "github.com/spf13/viper"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/rand"
- "k8s.io/client-go/kubernetes"
- ctlruntime "sigs.k8s.io/controller-runtime"
-
- workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
-
- "github.com/apecloud/dbctl/constant"
-)
-
-var logger = ctlruntime.Log.WithName("event")
-
-func SentEventForProbe(ctx context.Context, data map[string]any) error {
- logger.Info(fmt.Sprintf("send event: %v", data))
- roleUpdateMechanism := workloads.DirectAPIServerEventUpdate
- if viper.IsSet(constant.KBEnvRsmRoleUpdateMechanism) {
- roleUpdateMechanism = workloads.RoleUpdateMechanism(viper.GetString(constant.KBEnvRsmRoleUpdateMechanism))
- }
-
- switch roleUpdateMechanism {
- case workloads.ReadinessProbeEventUpdate:
- return NewProbeError("not sending event directly, use readiness probe instand")
- case workloads.DirectAPIServerEventUpdate:
- operation, ok := data["operation"]
- if !ok {
- return errors.New("operation failed must be set")
- }
- event, err := CreateEvent(string(operation.(OperationKind)), data)
- if err != nil {
- logger.Info("generate event failed", "error", err.Error())
- return err
- }
-
- go func() {
- _ = SendEvent(ctx, event)
- }()
- default:
- logger.Info(fmt.Sprintf("no event sent, RoleUpdateMechanism: %s", roleUpdateMechanism))
- }
-
- return nil
-}
-
-func CreateEvent(reason string, data map[string]any) (*corev1.Event, error) {
- // get pod object
- podName := constant.GetPodName()
- podUID := constant.GetPodUID()
- nodeName := viper.GetString(constant.KBEnvNodeName)
- namespace := constant.GetNamespace()
- msg, err := json.Marshal(data)
- if err != nil {
- return nil, err
- }
-
- event := &corev1.Event{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s.%s", podName, rand.String(16)),
- Namespace: namespace,
- },
- InvolvedObject: corev1.ObjectReference{
- Kind: "Pod",
- Namespace: namespace,
- Name: podName,
- UID: types.UID(podUID),
- FieldPath: "spec.containers{lorry}",
- },
- Reason: reason,
- Message: string(msg),
- Source: corev1.EventSource{
- Component: "lorry",
- Host: nodeName,
- },
- FirstTimestamp: metav1.Now(),
- LastTimestamp: metav1.Now(),
- EventTime: metav1.NowMicro(),
- ReportingController: "lorry",
- ReportingInstance: podName,
- Action: reason,
- Type: "Normal",
- }
- return event, nil
-}
-
-func SendEvent(ctx context.Context, event *corev1.Event) error {
- ctx1 := context.Background()
- config, err := ctlruntime.GetConfig()
- if err != nil {
- logger.Info("get k8s client config failed", "error", err.Error())
- return err
- }
-
- clientset, err := kubernetes.NewForConfig(config)
- if err != nil {
- logger.Info("k8s client create failed", "error", err.Error())
- return err
- }
- namespace := constant.GetNamespace()
- for i := 0; i < 30; i++ {
- _, err = clientset.CoreV1().Events(namespace).Create(ctx1, event, metav1.CreateOptions{})
- if err == nil {
- logger.Info("send event success", "message", event.Message)
- break
- }
- logger.Info("send event failed", "error", err.Error())
- time.Sleep(10 * time.Second)
- }
- return err
-}
diff --git a/util/kubernetes/client.go b/util/kubernetes/client.go
deleted file mode 100644
index a5d229d..0000000
--- a/util/kubernetes/client.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package kubernetes
-
-import (
- "github.com/pkg/errors"
- "k8s.io/client-go/kubernetes"
- clientsetscheme "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- ctlruntime "sigs.k8s.io/controller-runtime"
-
- appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
-)
-
-// GetClientSet returns a kubernetes clientSet.
-func GetClientSet() (*kubernetes.Clientset, error) {
- restConfig, err := ctlruntime.GetConfig()
- if err != nil {
- return nil, errors.Wrap(err, "get kubeConfig failed")
- }
- clientSet, err := kubernetes.NewForConfig(restConfig)
- if err != nil {
- return nil, err
- }
-
- return clientSet, nil
-}
-
-// GetRESTClientForKB returns a kubernetes restClient for KubeBlocks types.
-func GetRESTClientForKB() (*rest.RESTClient, error) {
- restConfig, err := ctlruntime.GetConfig()
- if err != nil {
- return nil, errors.Wrap(err, "get kubeConfig failed")
- }
- _ = appsv1alpha1.AddToScheme(clientsetscheme.Scheme)
- restConfig.GroupVersion = &appsv1alpha1.GroupVersion
- restConfig.APIPath = "/apis"
- restConfig.NegotiatedSerializer = clientsetscheme.Codecs.WithoutConversion()
- client, err := rest.RESTClientFor(restConfig)
- if err != nil {
- return nil, err
- }
-
- return client, nil
-}
diff --git a/util/ping.go b/util/ping.go
deleted file mode 100644
index a5fb76d..0000000
--- a/util/ping.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright (C) 2022-2023 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see .
-*/
-
-package util
-
-import (
- "net"
- "time"
-)
-
-func IsTCPReady(host, port string) (bool, error) {
- address := net.JoinHostPort(host, port)
- timeout := 2 * time.Second
- conn, err := net.DialTimeout("tcp", address, timeout)
- if err != nil {
- return false, err
- }
- defer func() {
- _ = conn.Close()
- }()
-
- return true, nil
-}
diff --git a/util/types.go b/util/types.go
index 27f66a2..a1fddfa 100644
--- a/util/types.go
+++ b/util/types.go
@@ -19,110 +19,21 @@ along with this program. If not, see .
package util
-import (
- "errors"
- "strings"
-)
-
type OperationKind string
const (
RespFieldEvent = "event"
RespFieldMessage = "message"
- RespTypMeta = "metadata"
- RespEveSucc = "Success"
- RespEveFail = "Failed"
-
- GetOperation OperationKind = "get"
- CreateOperation OperationKind = "create"
- DeleteOperation OperationKind = "delete"
- ListOperation OperationKind = "list"
-
- CheckRunningOperation OperationKind = "checkRunning"
- HealthyCheckOperation OperationKind = "healthyCheck"
- CheckRoleOperation OperationKind = "checkRole"
- GetRoleOperation OperationKind = "getRole"
- GetLagOperation OperationKind = "getLag"
- SwitchoverOperation OperationKind = "switchover"
- ExecOperation OperationKind = "exec"
- QueryOperation OperationKind = "query"
- CloseOperation OperationKind = "close"
-
- LockOperation OperationKind = "lockInstance"
- UnlockOperation OperationKind = "unlockInstance"
- VolumeProtection OperationKind = "volumeProtection"
- // for component
- PostProvisionOperation OperationKind = "postProvision"
- PreTerminateOperation OperationKind = "preTerminate"
+ ExecOperation OperationKind = "exec"
+ QueryOperation OperationKind = "query"
+ GetRoleOperation OperationKind = "getRole"
- // actions for cluster accounts management
- ListUsersOp OperationKind = "listUsers"
- CreateUserOp OperationKind = "createUser"
- DeleteUserOp OperationKind = "deleteUser"
- DescribeUserOp OperationKind = "describeUser"
- GrantUserRoleOp OperationKind = "grantUserRole"
- RevokeUserRoleOp OperationKind = "revokeUserRole"
- ListSystemAccountsOp OperationKind = "listSystemAccounts"
-
- JoinMemberOperation OperationKind = "joinMember"
- LeaveMemberOperation OperationKind = "leaveMember"
-
- DataDumpOperation OperationKind = "dataDump"
- DataLoadOperation OperationKind = "dataLoad"
-
- OperationNotImplemented = "NotImplemented"
- OperationInvalid = "Invalid"
- OperationSuccess = "Success"
- OperationFailed = "Failed"
- DefaultProbeTimeoutSeconds = 2
-
- // this is a general script template, which can be used for all kinds of exec request to databases.
- DataScriptRequestTpl string = `
- response=$(curl -s -X POST -H 'Content-Type: application/json' http://%s:3501/v1.0/bindings/%s -d '%s')
- result=$(echo $response | jq -r '.event')
- message=$(echo $response | jq -r '.message')
- if [ "$result" == "Failed" ]; then
- echo $message
- exit 1
- else
- echo "$result"
- exit 0
- fi
- `
+ OperationSuccess = "Success"
+ OperationFailed = "Failed"
)
-type RoleType string
-
-func (r RoleType) EqualTo(role string) bool {
- return strings.EqualFold(string(r), role)
-}
-
-func (r RoleType) GetWeight() int32 {
- switch r {
- case SuperUserRole:
- return 1 << 3
- case ReadWriteRole:
- return 1 << 2
- case ReadOnlyRole:
- return 1 << 1
- case CustomizedRole:
- return 1
- default:
- return 0
- }
-}
-
-const (
- SuperUserRole RoleType = "superuser"
- ReadWriteRole RoleType = "readwrite"
- ReadOnlyRole RoleType = "readonly"
- NoPrivileges RoleType = ""
- CustomizedRole RoleType = "customized"
- InvalidRole RoleType = "invalid"
-)
-
-// ProbeError is the error for Lorry probe api, it implements error interface
+// ProbeError is the error for dbctl probe api, it implements error interface
type ProbeError struct {
message string
}
@@ -138,5 +49,3 @@ func NewProbeError(msg string) error {
message: msg,
}
}
-
-var ErrNotImplemented = errors.New("not implemented")