operator开发

kubebuilder

kubebuilder 2.3.1
k3d v3.2.1
k3s v1.19.3-k3s2

1
2
3
4
5
6
7
8
# crd
apiVersion: etcd.ydzs.io/v1alpha1
kind: EtcdCluster
metadata:
name: demo
spec:
size: 3 # 副本数量
image: cnych/etcd:v3.4.13 # 镜像

初始化项目

kubebuilder init –domain ydzs.io –owner cnych –repo github.com/cnych/etcd-operator

创建API

kubebuilder create api –group etcd –version v1alpha1 –kind EtcdCluster

编辑 Operator 的结构体

修改文件 api/v1alpha1/etcdcluster_types.go 中的 EtcdClusterSpec 结构体

1
2
3
4
5
6
7
type EtcdClusterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file

Size *int32 `json:"size"`
Image string `json:"image"`
}

在项目目录运行make命令生成相关代码

编写业务逻辑

resource文件

在目录 controllers 下面创建一个 resource.go 文件,用来根据我们定义的 EtcdCluster 对象生成对应的 StatefulSet 和 Headless SVC 对象

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
// controllers/resource.go

package controllers

import (
"github.com/cnych/etcd-operator/api/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strconv"
)

var (
EtcdClusterLabelKey = "etcd.ydzs.io/cluster"
EtcdClusterCommonLabelKey = "app"
EtcdDataDirName = "datadir"
)

// 创建STS资源
func MutateStatefulSet(cluster *v1alpha1.EtcdCluster, sts *appsv1.StatefulSet) {
sts.Labels = map[string]string{
EtcdClusterCommonLabelKey: "etcd",
}
sts.Spec = appsv1.StatefulSetSpec{
Replicas: cluster.Spec.Size,
ServiceName: cluster.Name,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
EtcdClusterLabelKey: cluster.Name,
}},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
EtcdClusterLabelKey: cluster.Name,
EtcdClusterCommonLabelKey: "etcd",
},
},
Spec: corev1.PodSpec{
Containers: newContainers(cluster),
},
},
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: EtcdDataDirName,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
}
}

// 创建STS中的容器具体配置
func newContainers(cluster *v1alpha1.EtcdCluster) []corev1.Container {
return []corev1.Container{
corev1.Container{
Name: "etcd",
Image: cluster.Spec.Image,
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
Name: "peer",
ContainerPort: 2380,
},
corev1.ContainerPort{
Name: "client",
ContainerPort: 2379,
},
},
Env: []corev1.EnvVar{
corev1.EnvVar{
Name: "INITIAL_CLUSTER_SIZE",
Value: strconv.Itoa(int(*cluster.Spec.Size)),
},
corev1.EnvVar{
Name: "SET_NAME",
Value: cluster.Name,
},
corev1.EnvVar{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
corev1.EnvVar{
Name: "MY_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
corev1.VolumeMount{
Name: EtcdDataDirName,
MountPath: "/var/run/etcd",
},
},
Command: []string{
"/bin/sh", "-ec",
"HOSTNAME=$(hostname)\n\n ETCDCTL_API=3\n\n eps() {\n EPS=\"\"\n for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do\n EPS=\"${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2379\"\n done\n echo ${EPS}\n }\n\n member_hash() {\n etcdctl member list | grep -w \"$HOSTNAME\" | awk '{ print $1}' | awk -F \",\" '{ print $1}'\n }\n\n initial_peers() {\n PEERS=\"\"\n for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do\n PEERS=\"${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2380\"\n done\n echo ${PEERS}\n }\n\n # etcd-SET_ID\n SET_ID=${HOSTNAME##*-}\n\n # adding a new member to existing cluster (assuming all initial pods are available)\n if [ \"${SET_ID}\" -ge ${INITIAL_CLUSTER_SIZE} ]; then\n # export ETCDCTL_ENDPOINTS=$(eps)\n # member already added?\n\n MEMBER_HASH=$(member_hash)\n if [ -n \"${MEMBER_HASH}\" ]; then\n # the member hash exists but for some reason etcd failed\n # as the datadir has not be created, we can remove the member\n # and retrieve new hash\n echo \"Remove member ${MEMBER_HASH}\"\n etcdctl --endpoints=$(eps) member remove ${MEMBER_HASH}\n fi\n\n echo \"Adding new member\"\n\n etcdctl member --endpoints=$(eps) add ${HOSTNAME} --peer-urls=http://${HOSTNAME}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2380 | grep \"^ETCD_\" > /var/run/etcd/new_member_envs\n\n if [ $? -ne 0 ]; then\n echo \"member add ${HOSTNAME} error.\"\n rm -f /var/run/etcd/new_member_envs\n exit 1\n fi\n\n echo \"==> Loading env vars of existing cluster...\"\n sed -ie \"s/^/export /\" /var/run/etcd/new_member_envs\n cat /var/run/etcd/new_member_envs\n . /var/run/etcd/new_member_envs\n\n exec etcd --listen-peer-urls http://${POD_IP}:2380 \\\n --listen-client-urls http://${POD_IP}:2379,http://127.0.0.1:2379 \\\n --advertise-client-urls http://${HOSTNAME}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2379 \\\n --data-dir /var/run/etcd/default.etcd\n fi\n\n for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do\n while true; do\n echo \"Waiting for ${SET_NAME}-${i}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local to come up\"\n ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local > /dev/null && break\n sleep 1s\n done\n done\n\n echo \"join member ${HOSTNAME}\"\n # join member\n exec etcd --name ${HOSTNAME} \\\n --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2380 \\\n --listen-peer-urls http://${POD_IP}:2380 \\\n --listen-client-urls http://${POD_IP}:2379,http://127.0.0.1:2379 \\\n --advertise-client-urls http://${HOSTNAME}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2379 \\\n --initial-cluster-token etcd-cluster-1 \\\n --data-dir /var/run/etcd/default.etcd \\\n --initial-cluster $(initial_peers) \\\n --initial-cluster-state new",
},
Lifecycle: &corev1.Lifecycle{
PreStop: &corev1.Handler{
Exec: &corev1.ExecAction{
Command: []string{
"/bin/sh", "-ec",
"HOSTNAME=$(hostname)\n\n member_hash() {\n etcdctl member list | grep -w \"$HOSTNAME\" | awk '{ print $1}' | awk -F \",\" '{ print $1}'\n }\n\n eps() {\n EPS=\"\"\n for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do\n EPS=\"${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}.${MY_NAMESPACE}.svc.cluster.local:2379\"\n done\n echo ${EPS}\n }\n\n export ETCDCTL_ENDPOINTS=$(eps)\n SET_ID=${HOSTNAME##*-}\n\n # Removing member from cluster\n if [ \"${SET_ID}\" -ge ${INITIAL_CLUSTER_SIZE} ]; then\n echo \"Removing ${HOSTNAME} from etcd cluster\"\n etcdctl member remove $(member_hash)\n if [ $? -eq 0 ]; then\n # Remove everything otherwise the cluster will no longer scale-up\n rm -rf /var/run/etcd/*\n fi\n fi",
},
},
},
},
},
}
}

// 创建STS对应的SVC
func MutateHeadlessSvc(cluster *v1alpha1.EtcdCluster, svc *corev1.Service) {
svc.Labels = map[string]string{
EtcdClusterCommonLabelKey: "etcd",
}
svc.Spec = corev1.ServiceSpec{
ClusterIP: corev1.ClusterIPNone,
Selector: map[string]string{
EtcdClusterLabelKey: cluster.Name,
},
Ports: []corev1.ServicePort{
corev1.ServicePort{
Name: "peer",
Port: 2380,
},
corev1.ServicePort{
Name: "client",
Port: 2379,
},
},
}
}

Reconcile函数

修改controllers/etcdcluster_controller.go 文件Reconcile 函数,进行逻辑处理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
// +kubebuilder:rbac:groups=etcd.ydzs.io,resources=etcdclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=etcd.ydzs.io,resources=etcdclusters/status,verbs=get;update;patch

func (r *EtcdClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("etcdcluster", req.NamespacedName)

// 首先我们获取 EtcdCluster 实例
var etcdCluster etcdv1alpha1.EtcdCluster
if err := r.Client.Get(ctx, req.NamespacedName, &etcdCluster); err != nil {
// EtcdCluster was deleted,Ignore
log.Info("etcdcluster not found!")
return ctrl.Result{}, client.IgnoreNotFound(err)
}

// 得到 EtcdCluster 过后去创建对应的StatefulSet和Service
// CreateOrUpdate

// CreateOrUpdate Service
var svc corev1.Service
svc.Name = etcdCluster.Name
svc.Namespace = etcdCluster.Namespace
svcResult, err := ctrl.CreateOrUpdate(ctx, r, &svc, func() error {
// 调谐必须在这个函数中去实现
MutateHeadlessSvc(&etcdCluster, &svc)
return controllerutil.SetControllerReference(&etcdCluster, &svc, r.Scheme)
})
if err != nil {
log.Error(err, "CreateOrUpdate etcdcluster Service error")
return ctrl.Result{}, err
}
log.Info("CreateOrUpdate", "Service", svcResult)

// CreateOrUpdate StatefulSet
var sts appsv1.StatefulSet
sts.Name = etcdCluster.Name
sts.Namespace = etcdCluster.Namespace
stsResult, err := ctrl.CreateOrUpdate(ctx, r, &sts, func() error {
MutateStatefulSet(&etcdCluster, &sts)
return controllerutil.SetControllerReference(&etcdCluster, &sts, r.Scheme)
})
if err != nil {
log.Error(err, "CreateOrUpdate etcdcluster StatefulSet error")
return ctrl.Result{}, err
}
log.Info("CreateOrUpdate", "StatefulSet", stsResult)

return ctrl.Result{}, nil
}

验证

创建CRD定义 make install
本地运行控制器 make run
创建CR资源
kubectl create -f etcd_v1alpha1_etcdcluster.yaml
观察集群资sts和svc源创建情况

operator-sdk

基于v1.0.0 版本
采用Kind新建集群
kubectl
kustomize

安装

下载operator-sdk 程序
$ curl -LO https://github.com/operator-framework/operator-sdk/releases/download/v1.0.0/operator-sdk-v1.0.0-x86_64-linux-gnu
$ curl -LO https://github.com/operator-framework/operator-sdk/releases/download/v1.0.0/ansible-operator-v1.0.0-x86_64-linux-gnu
$ curl -LO https://github.com/operator-framework/operator-sdk/releases/download/v1.0.0/helm-operator-v1.0.0-x86_64-linux-gnu

移动到/usr/local/bin 目录
$ chmod +x operator-sdk-v1.0.0-x86_64-linux-gnu && sudo mkdir -p /usr/local/bin/ && sudo cp operator-sdk-v1.0.0-x86_64-linux-gnu /usr/local/bin/operator-sdk && rm operator-sdk-v1.0.0-x86_64-linux-gnu
$ chmod +x ansible-operator-v1.0.0-x86_64-linux-gnu && sudo mkdir -p /usr/local/bin/ && sudo cp ansible-operator-v1.0.0-x86_64-linux-gnu /usr/local/bin/ansible-operator && rm ansible-operator-v1.0.0-x86_64-linux-gnu
$ chmod +x helm-operator-v1.0.0-x86_64-linux-gnu && sudo mkdir -p /usr/local/bin/ && sudo cp helm-operator-v1.0.0-x86_64-linux-gnu /usr/local/bin/helm-operator && rm helm-operator-v1.0.0-x86_64-linux-gnu

Demo示例

新建目录
$ mkdir -p $HOME/projects/memcached-operator
$ cd $HOME/projects/memcached-operator

在目录中初始化memcached-operator示例项目
$ operator-sdk init –domain=example.com –repo=github.com/example-inc/memcached-operator

初始化项目API
$ operator-sdk create api –group=cache –version=v1alpha1 –kind=Memcached

修改API

修改 api/v1alpha1/memcached_types.go 文件更新声明API

1
2
3
4
5
6
7
8
9
10
11
12
// MemcachedSpec defines the desired state of Memcached
type MemcachedSpec struct {
// +kubebuilder:validation:Minimum=0
// Size is the size of the memcached deployment
Size int32 `json:"size"`
}

// MemcachedStatus defines the observed state of Memcached
type MemcachedStatus struct {
// Nodes are the names of the memcached pods
Nodes []string `json:"nodes"`
}

替换controller

替换 controllers/memcached_controller.go 文件,更新controller
https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl

主要方法解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
// 每个控制器都有一个Reconciler对象,运行过程中不断执行Reconcile循环
func (r *MemcachedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("memcached", req.NamespacedName)

// Fetch the Memcached instance
memcached := &cachev1alpha1.Memcached{}
err := r.Get(ctx, req.NamespacedName, memcached)
if err != nil {
if errors.IsNotFound(err) {
// 在集群中找不到自定义的CRD资源,返回并结束循环
log.Info("Memcached resource not found. Ignoring since object must be deleted")
return ctrl.Result{}, nil
}
// 读取资源失败,返回并继续循环
log.Error(err, "Failed to get Memcached")
return ctrl.Result{}, err
}

// 检查CRD声明的Deployment是否创建,如果没有创建则新建一个
found := &appsv1.Deployment{}
err = r.Get(ctx, types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found)
if err != nil && errors.IsNotFound(err) {
// 定义一个新Deployment
dep := r.deploymentForMemcached(memcached)
log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
err = r.Create(ctx, dep)
if err != nil {
log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
return ctrl.Result{}, err
}
// Deployment 创建成功,返回并继续循环
return ctrl.Result{Requeue: true}, nil
} else if err != nil {
// Deployment 创建失败,返回并继续循环
log.Error(err, "Failed to get Deployment")
return ctrl.Result{}, err
}

// 对比Deployment部署Pod数量是否和CRD中siez定义的一样,不一样则更新到一样的数目
size := memcached.Spec.Size
if *found.Spec.Replicas != size {
found.Spec.Replicas = &size
err = r.Update(ctx, found)
if err != nil {
// Deployment副本数更新失败,返回并继续循环
log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
return ctrl.Result{}, err
}
// Deployment副本数更新完成,返回并继续循环
return ctrl.Result{Requeue: true}, nil
}

// 查询Deployment下的所有Pod列表
podList := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace(memcached.Namespace),
client.MatchingLabels(labelsForMemcached(memcached.Name)),
}
if err = r.List(ctx, podList, listOpts...); err != nil {
// 获取Pod列表失败,返回并继续循环
log.Error(err, "Failed to list pods", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name)
return ctrl.Result{}, err
}
podNames := getPodNames(podList.Items)

// 对比status.Nodes中的值是否和Pod列表名称相同,不同则更新
if !reflect.DeepEqual(podNames, memcached.Status.Nodes) {
memcached.Status.Nodes = podNames
err := r.Status().Update(ctx, memcached)
if err != nil {
// 更新status,返回并继续循环
log.Error(err, "Failed to update Memcached status")
return ctrl.Result{}, err
}
}

// 完成流程,结束循环
return ctrl.Result{}, nil
}

// 构建Memcached的Deployment
func (r *MemcachedReconciler) deploymentForMemcached(m *cachev1alpha1.Memcached) *appsv1.Deployment {
ls := labelsForMemcached(m.Name)
replicas := m.Spec.Size

dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: m.Name,
Namespace: m.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: ls,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ls,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Image: "memcached:1.4.36-alpine",
Name: "memcached",
Command: []string{"memcached", "-m=64", "-o", "modern", "-v"},
Ports: []corev1.ContainerPort{{
ContainerPort: 11211,
Name: "memcached",
}},
}},
},
},
},
}
// 关联Deployment与Memcached
ctrl.SetControllerReference(m, dep, r.Scheme)
return dep
}

// 构建Lables
func labelsForMemcached(name string) map[string]string {
return map[string]string{"app": "memcached", "memcached_cr": name}
}

// 根据PodList中的名称构建Pod名称数组
func getPodNames(pods []corev1.Pod) []string {
var podNames []string
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
return podNames
}

// 注册Memcached的控制器
func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&cachev1alpha1.Memcached{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}

编译

$ make manifests
$ make install

运行

在集群外运行

需要提前在本地构建部署kubernetes集群,推荐使用kind快速在本地构建集群
$ make run ENABLE_WEBHOOKS=false

1
2
3
4
5
6
7
8
go run ./main.go
2020-09-09T10:24:49.631+0800 INFO controller-runtime.metrics metrics server is starting to listen {"addr": ":8080"}
2020-09-09T10:24:49.631+0800 INFO setup starting manager
2020-09-09T10:24:49.632+0800 INFO controller-runtime.manager starting metrics server {"path": "/metrics"}
2020-09-09T10:24:49.632+0800 INFO controller Starting EventSource {"reconcilerGroup": "cache.example.com", "reconcilerKind": "Memcached", "controller": "memcached", "source": "kind source: /, Kind="}
2020-09-09T10:24:49.732+0800 INFO controller Starting EventSource {"reconcilerGroup": "cache.example.com", "reconcilerKind": "Memcached", "controller": "memcached", "source": "kind source: /, Kind="}
2020-09-09T10:24:49.832+0800 INFO controller Starting Controller {"reconcilerGroup": "cache.example.com", "reconcilerKind": "Memcached", "controller": "memcached"}
2020-09-09T10:24:49.832+0800 INFO controller Starting workers {"reconcilerGroup": "cache.example.com", "reconcilerKind": "Memcached", "controller": "memcached", "worker count": 1}
在集群内运行

首先构建应用镜像
$ make docker-build IMG=registry.cn-shenzhen.aliyuncs.com/mutoulazy/memcached:v1.2

构建时可以修改Dockerfile新增 go代理 加速镜像构建
ENV GOPROXY https://goproxy.cn,direct

推到镜像到镜像仓库,推送前需要docker login登陆仓库
$ make docker-push IMG=registry.cn-shenzhen.aliyuncs.com/mutoulazy/memcached:v1.2

注意kind集群需要使用 kind load docker-image <镜像名称> 加载本地镜像到kind节点内部去

测试

使用kustomize构建对应部署的yaml文件
$ cd config/default/ && kustomize edit set namespace “default” && cd ../..

部署镜像
$ make deploy IMG=registry.cn-shenzhen.aliyuncs.com/mutoulazy/memcached:v1.2

1
2
3
4
5
6
7
8
9
10
11
12
/usr/local/bin/kustomize build config/default | kubectl apply -f -
namespace/system created
customresourcedefinition.apiextensions.k8s.io/memcacheds.cache.example.com configured
role.rbac.authorization.k8s.io/memcached-operator-leader-election-role created
clusterrole.rbac.authorization.k8s.io/memcached-operator-manager-role created
clusterrole.rbac.authorization.k8s.io/memcached-operator-proxy-role created
clusterrole.rbac.authorization.k8s.io/memcached-operator-metrics-reader created
rolebinding.rbac.authorization.k8s.io/memcached-operator-leader-election-rolebinding created
clusterrolebinding.rbac.authorization.k8s.io/memcached-operator-manager-rolebinding created
clusterrolebinding.rbac.authorization.k8s.io/memcached-operator-proxy-rolebinding created
service/memcached-operator-controller-manager-metrics-service created
deployment.apps/memcached-operator-controller-manager created

创建一个Memcached资源

1
2
3
4
5
6
apiVersion: cache.example.com/v1alpha1
kind: Memcached
metadata:
name: memcached-sample
spec:
size: 3

$ kubectl apply -f config/samples/cache_v1alpha1_memcached.yaml

观察应用Pod

size 定义的是3

1
2
3
4
5
6
7
8
9
10
11
12
NAME                                                     READY   STATUS    RESTARTS   AGE
memcached-operator-controller-manager-794bfd5b55-2svtf 2/2 Running 0 30m
memcached-sample-9b765dfc8-5jsff 1/1 Running 0 57s
memcached-sample-9b765dfc8-vn29h 1/1 Running 0 57s
memcached-sample-9b765dfc8-xrzvl 1/1 Running 0 57s

memcacheds memcached-sample 的status值
status:
nodes:
- memcached-sample-9b765dfc8-5jsff
- memcached-sample-9b765dfc8-vn29h
- memcached-sample-9b765dfc8-xrzvl

修改size为5

1
2
3
4
5
6
7
NAME                                                     READY   STATUS    RESTARTS   AGE
memcached-operator-controller-manager-794bfd5b55-2svtf 2/2 Running 0 31m
memcached-sample-9b765dfc8-5jsff 1/1 Running 0 2m5s
memcached-sample-9b765dfc8-fb5wg 1/1 Running 0 3s
memcached-sample-9b765dfc8-vn29h 1/1 Running 0 2m5s
memcached-sample-9b765dfc8-xrzvl 1/1 Running 0 2m5s
memcached-sample-9b765dfc8-zqq8t 1/1 Running 0 3s

清理应用

$ kubectl delete -f config/samples/cache_v1alpha1_memcached.yaml
$ kubectl delete deployments,service -l control-plane=controller-manager
$ kubectl delete role,rolebinding –all