在Openshift/K8S下搭建Redis Cluster实践

环境 openshift 3.10 redis 5.0.1 理论上同样适用于K8S

参考链接 https://rancher.com/blog/2019/deploying-redis-cluster/

注意: Cluster并不能做到HA

---

实际步骤

oc create -f pv.yaml -n cluster

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-0
  labels:
    pvname: cluster-pv-0
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-0
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-1
  labels:
    pvname: cluster-pv-1
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-1
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-2
  labels:
    pvname: cluster-pv-2
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-2
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-3
  labels:
    pvname: cluster-pv-3
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-3
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-4
  labels:
    pvname: cluster-pv-4
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-4
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cluster-pv-5
  labels:
    pvname: cluster-pv-5
spec:
  capacity:
    storage: 100M
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.64.108
    path: /data/nfs/cluster-5
---

在nfs的/data/nfs/cluster-012345下分别增加空的nodes.conf

oc create -f redis-sts.yaml -n cluster

redis-sts.yaml内容

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-cluster
data:
  update-node.sh: |
    #!/bin/sh
    REDIS_NODES="/data/nodes.conf"
    sed -i -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${REDIS_NODES}
    exec ""
  redis.conf: |+
    cluster-enabled yes
    cluster-require-full-coverage no
    cluster-node-timeout 15000
    cluster-config-file /data/nodes.conf
    cluster-migration-barrier 1
    appendonly yes
    protected-mode no
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-cluster
spec:
  serviceName: redis-cluster
  replicas: 6
  selector:
    matchLabels:
      app: redis-cluster
  template:
    metadata:
      labels:
        app: redis-cluster
    spec:
      containers:
      - name: redis
        image: redis:5.0.1-alpine
        ports:
        - containerPort: 6379
          name: client
        - containerPort: 16379
          name: gossip
        command: ["/conf/update-node.sh", "redis-server", "/conf/redis.conf"]
        env:
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        volumeMounts:
        - name: conf
          mountPath: /conf
          readOnly: false
        - name: data
          mountPath: /data
          readOnly: false
      volumes:
      - name: conf
        configMap:
          name: redis-cluster
          defaultMode: 0755
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi

oc create -f redis-svc.yaml -n cluster

redis-svc.yaml内容

---
apiVersion: v1
kind: Service
metadata:
  name: redis-cluster
spec:
  type: ClusterIP
  ports:
  - port: 6379
    targetPort: 6379
    name: client
  - port: 16379
    targetPort: 16379
    name: gossip
  selector:
    app: redis-cluster

确认部署

oc get pods -n cluster

oc get pv -n cluster

kubectl get pvc -n cluster

初始化

oc exec -it redis-cluster-0 -- redis-cli --cluster create --cluster-replicas 1 $(oc get pods -l app=redis-cluster -o jsonpath=‘{range.items[*]}{.status.podIP}:6379 ‘)

查看

for x in $(seq 0 5); do echo "redis-cluster-$x"; oc exec redis-cluster-$x -- redis-cli role; echo; done

测试

kubectl apply -f app-deployment-service.yaml

app-deployment-service.yaml内容

---
apiVersion: v1
kind: Service
metadata:
  name: hit-counter-lb
spec:
  type: LoadBalancer
  ports:
  - port: 80
    protocol: TCP
    targetPort: 5000
  selector:
      app: myapp
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hit-counter-app
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myapp
  template:
    metadata:
      labels:
        app: myapp
    spec:
      containers:
      - name: myapp
        image: harbor.adb.com/zed/zed:v1
        ports:
        - containerPort: 5000

zed code

package main

import (
	"fmt"
	"github.com/gin-gonic/gin"
	"github.com/go-redis/redis"
)

func main() {
	rdb := redis.NewClusterClient(&redis.ClusterOptions{
		Addrs: []string{"redis-cluster:6379"},
	})


	pong, err := rdb.Ping().Result()
	fmt.Println(pong, err)

	r := gin.Default()
	r.GET("/", func(c *gin.Context) {
		value, err := rdb.Incr("hits3").Result()
		if err != nil {
			fmt.Println(err)
		}
		c.String(200, fmt.Sprintf("I have been hit %d times since deployment", value))
	})
	r.Run("0.0.0.0:5000")
}

zed dockerfile

FROM golang:1.13

ENV GOPROXY https://goproxy.cn
ENV GO111MODULE on

WORKDIR /zed/
COPY . /zed/
RUN go build -o ./zed ./main/cmd.go

EXPOSE 5000
ENTRYPOINT ["./zed"]

相关推荐