k8s实战--20221023

安装k8s集群

配置源:

cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

安装工具

yum install -y kubelet-1.17.17-0 kubectl-1.17.17-0 kubeadm-1.17.17-0

初始化

kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.17 --pod-network-cidr=10.244.0.0/16

配置kubectl

/etc/hosts文件 添加记录: 47.93.32.161 kubernetes

~/.kube/config

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1UQXlNekEyTWprMU5Wb1hEVE15TVRBeU1EQTJNamsxTlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0Q1CmFNdGJ4TDZ3TDFLUW8yM0JVdEgzb1djR3ZMQ0NsdHEveHd5Y09CMHVwVXFKcnhoUVZ5ZGFCSm0rOVBocFh4Y1MKeFBDRng5MjZ2VDFUSU9XOEgxSURSaFp5d3RRa01hQnByTzR4Uml4WUxoMEVqbnN0aFhwMW0vOXdWRGtHZUt1QQorRUZ1Nis0T010UzdEbjlBMmc5VFd5SXk0UVdaVkdtVHNsMjZyZ2JBL1RSTCtHd0w2Yk9hdHFYR1YyWWI0ZUhXCkdiZTR5V0txaTRCbzhnbDNRNXU3ZEJ3QSszQVQyNGZielZiK3JHM3krd0FBczl2SUt5N0t5VVI1clc5Q3A2L3EKclRha0VENlFFbG1ib21zQXJ0Y21zSjI0WUplZUdOTmNsMGRNSFgwcEFuaHovRmkzR2dNUkFYSXgvWm9WVjd3bAp3aVROZDhiYlR3SityUjROOU9jQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIMmJWSmZKbi9qN3FtbXlBc1d1ckYvb3ZLRC8KYlc0VWZhWHQ5akliSWh0TlhUK0Z3T3JCZUpnamJWT1NPOTB4ckNvTDhZYWVHR1FPb0FVRkZzSkJXYXdHWmZYSgo3MHlYMmNHK2VCVmxKQXgyUXRPTFhDSDdwMlJRZ3BQamlOZllTZjJpL0lJSTc1aE9OQ0s1bVVkYkNWMFhma3MzCmRoTUFiRW5KeUdwNjV4VmozbmNFSzdaWnRHaEo2RjVYTzhxckIrZFBOSSs2T1NpL1VUZTBGdzRVTXRaUWZUWlkKSCs3NXlZT045bHh5STNVaWZ3Q2JmUjdUejBnRHZrNWdieVh1M0dScXVUVGdQUlY3bHJvcWZBWXZmRURlcXJaOQprempVQ3J5Wk1aUGpGY29kS3cwNC8yRVBRUEd1UVh2dUVSQ1BnUmRKcUUvZzdUUUhiNHkzbkFsYlVqdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://kubernetes:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSElZWGZNN2paQ3d3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFd01qTXdOakk1TlRWYUZ3MHlNekV3TWpNd05qSTVOVGRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTNBOHZMQklmdm83ZHhpTWQKSXUxQllwdVowcm5oSGF6aHBTd3RnbnJOWlpKdk51Q0x6VjJnVnE2M0Y5UEE5Z29pZzUxY1BPUFR1M01PUG1VcQpPb2dSU0I0ZWdMMVN2SjBQSWh3Q0dNUndFczNueGx2N09KVjhyZTR1QVpXWHY1dUF4S1JGTlBxL2c4bVlqSWZBCkJ4cW1KdGNzWm51ZFNaVm5xUzJLNVhmVkFWMXMyajFza2tROVJ6UU41cDQzZEs1SmtHbzgwZkVwNVNucmxVOG4KRCtWNVczZ25Dd1RVRkwzT0RUdVA1UXZBSUJtNHp3cUkxbXpPNkdpbDNVYUtncFV6T2haTFdtaWo4RjFPV082dApSd0oyQ1IybktZTUpBRXczQ0ZsY094QlFvTVB5S0tBNmsvckZ2SkovYlBYVHNsb0J0T0tUYnU3OXpxN05rM2tGCnN5M1lhUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFalczVkpmaStLSmxoS0tLc29FdnpFU3Z3Q1VkcXNGYjFJagpjaXUvZXEwUVdsbzIyd0p3UEZHOVdhVUZvMGhKcTdUdXZHcXlIMnpsWUtpUy9PL3NyWDZQa0I0U3QvRTBTb253CnlZZ2xWbUlHQ0Y2OTZlTGxJSlBhaGNlRW5SR2RHK2l5dCtLWkZYd1V5V1dWSUk2NnJQVXZib3BMNDQzdUNsVDQKSzhJOW5keFU5Mk52WmQzUkxJdWZFYVJzQkJuQWpyTVRUaFBWNk9rWmZSaXRJTEZiUGxORXVLSForbE9ZbUJCZgpkZDk4QWhkT1cwbFdYMmxMQ2JrSWUyMFdRVjdEMmxaVkJIdzg5YnRRdFA2cjNBOWtpQlVNMEd2WHYwNnZqallOCm5OZFlQeWZCODlYSmI1dVFybWlzd3pqTGEwY0pFb2ZOTzNCWG9KdDczdEhSbEEyQkc1TT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBM0E4dkxCSWZ2bzdkeGlNZEl1MUJZcHVaMHJuaEhhemhwU3d0Z25yTlpaSnZOdUNMCnpWMmdWcTYzRjlQQTlnb2lnNTFjUE9QVHUzTU9QbVVxT29nUlNCNGVnTDFTdkowUElod0NHTVJ3RXMzbnhsdjcKT0pWOHJlNHVBWldYdjV1QXhLUkZOUHEvZzhtWWpJZkFCeHFtSnRjc1pudWRTWlZucVMySzVYZlZBVjFzMmoxcwpra1E5UnpRTjVwNDNkSzVKa0dvODBmRXA1U25ybFU4bkQrVjVXM2duQ3dUVUZMM09EVHVQNVF2QUlCbTR6d3FJCjFtek82R2lsM1VhS2dwVXpPaFpMV21pajhGMU9XTzZ0UndKMkNSMm5LWU1KQUV3M0NGbGNPeEJRb01QeUtLQTYKay9yRnZKSi9iUFhUc2xvQnRPS1RidTc5enE3Tmsza0ZzeTNZYVFJREFRQUJBb0lCQUVlVEdNQzQxR0ppalhlWQpqQzJFQmJSUTcrTStXaDRRMFFPc0x0RTFxQURWZEI3aFFoZDEwR1RoUnVRVkY3bnU2Zkx0QjhjMlF2UEJKR3plCjhyRSsrSUFBYStOcnNMRndWQysvOUY2aDVlSDMzdDhCbytCdm5ySGp1a3NCb08zTllrQ1RQWDlSMzJDNS9VZ1kKRHpsQVRiSnNZaUNqTERGMnl0U1gzNGxyUkhqQWsvUTFLZ2VSL1lDbFZrMEd6NTdvUkxlZUhJM2JNeVoxczJ3Ywpway92QUJjYVZvZGtNRkVPTjJvTVBOUlZUWVd2S0d6VS82dVAxNGVGUmlva29SQ1dtRmVPeHk3QlptaW5ZWVdXCnhISWNVYzN4RjFPR3k5N29ON1A4SHJHMGpaV1o0UU5TRGZUOGNCVUZtR1FlWXVlcTU1eTdhYTMrN2hxdzFtVjEKa1MrSTBsRUNnWUVBN3JPWVN6R095M0tjWk5qeEdnMHBxYVVWNGczVzFYTGtqNFY5d1BpZGZLWVc1RlhPVWtJNwpTVVAxK1pMaHpNRTYvSDU5OTlzSnRIS2JDRHdsRWduNUF3c1AyNmk2NHdVekNkeFBreERsQ3Fia0craWtkOWxTCmI5aHVLUGJ5bFVWZDlYaGZYcTA5YTRTZWVJZHUyWGxNTWFqY0FiQTJzTFZLTjVMK1ZqVEdDZ2NDZ1lFQTdBRzgKMWJVYzlMU0VLMDZRU0o1dGtnbVBaSGhsYzJsZm5VYnErWGU2VWR0QTZTUndUVFJpQ2VZeURIMTRMeGhqZlhyTwphL3cxcTFxQjR0QUlCUGgrZlNvV2NzeFZXUWlYYnlKQnNLaTZFMldtR05Fbk5aSEQ4V2s3UDhoSDBwVUE3ZmxoClJSNUt4RGtpVWRFZ0Z1endPckt2VFMwbmdxU3J0L3hsZC9LS0xnOENnWUVBNlNTN1RENVVWWHhWS3Q4RjVzY1MKZVdNSUU2b211b2FrblZYcU5PL2ZpcDhDM3ZTOE4zWC9TQms4Q0E4aHdvYTlZVjcvRkhaNWJTay9LWFR0VkVndgpVbnljZVpTOUxQMm9FeXAvQW9yTzIyK1VmM25rSWpjK29JV0pXek9RVHFIWkd1Zitaby9ZM01wRXFqN0YwMTM1ClFNdklUMUhZcVJINU1ueTlOR0FKU2tNQ2dZQXJxZUtpODBBcE1lQ21Gb0h4UFladUYyQzFLT1UwSmhHdmluaGMKc1Q4Q2Qvc1paMGx6cnpaWU9JT1g5ZnF2VStiTVIxVTdlMHorcDk4UjlJVVhUbFl3bUFIUk9XYXM4ejc1SzdCVgptOUhaVnR3VkNWVWt2eFF1Yk5hdVpVS01mazdPdUcyN21QQWlQNlBPMlU4RGp0Q2ZPNEhkV0haUDRHRjR4N1NLCkxaQk5Gd0tCZ1FDeVlVRlVraFZqVzdWRU9mTXVBWTEzQTdRTlhUM2FqeURrWTVDVVZhUnJPd0RSUVNNa0QxUEIKNDVhQTZpUWdYUVVlNzdjWFBKNnRNcXh0WDlGQis2WVE4d1daVjlMRGhiRWtIN1NodGxQbFkwaTRTdFNwN3JWMgpQdGN1WmRZV0UzV0Zlc0NhRGdzb3V1TEhJNnlSaytubjd6dWlaeVZEME93cTI5bi9LdzkveUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

安装网络组件

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

删除污点

kubectl get nodes 查看节点
kubectl taint nodes 节点名称 node-role.kubernetes.io/master:NoSchedule-


chaos-mesh 安装

helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=chaos-testing --set dashboard.securityMode=false --set controllerManager.chaosdSecurityMode=false --set chaosDaemon.env.DOCKER_API_VERSION=1.39

在k8s中部署grid浏览器集群

hub.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: selenium-hub
  labels:
    name: selenium-hub
spec:
  replicas: 1
  selector:
    matchLabels:
      name: selenium-hub
  template:
    metadata:
      labels:
        name: selenium-hub
    spec:
      containers:
        - name: selenium-hub
          image: selenium/hub:4.0.0-rc-2-prerelease-20210923
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 4444
            - containerPort: 4442
            - containerPort: 4443
            - containerPort: 5557
          env:
            - name: TZ
              value: "Asia/Shanghai"
          volumeMounts:
            - mountPath: "/etc/localtime"
              name: "host-time"
          livenessProbe:
            httpGet:
              path: /grid/console
              port: 4444
            initialDelaySeconds: 30
            timeoutSeconds: 1
            periodSeconds: 5
            failureThreshold: 3
          readinessProbe:
            httpGet:
              path: /grid/console
              port: 4444
            initialDelaySeconds: 30
            timeoutSeconds: 1
            periodSeconds: 5
            failureThreshold: 3
      volumes:
        - name: "host-time"
          hostPath:
            path: "/etc/localtime"
---
apiVersion: v1
kind: Service
metadata:
  name: selenium-hub
  labels:
    name: selenium-hub
spec:
  type: NodePort
  ports:
    - name: port1
      protocol: TCP
      port: 4442
      targetPort: 4442
    - name: port2
      protocol: TCP
      port: 4443
      targetPort: 4443
    - name: port3
      protocol: TCP
      port: 5557
      targetPort: 5557
    - port: 4444
      targetPort: 4444
      name: port0
      nodePort: 32757
  selector:
    name: selenium-hub
  sessionAffinity: None

node.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: selenium-node-chrome
  labels:
    name: selenium-node-chrome
spec:
  replicas: 2
  selector:
    matchLabels:
      name: selenium-node-chrome
  template:
    metadata:
      labels:
        name: selenium-node-chrome
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - topologyKey: kubernetes.io/hostname
              labelSelector:
                matchLabels:
                  name: selenium-node-chrome
      containers:
        - name: selenium-node-chrome
          image: selenium/node-chrome:4.0.0-rc-2-prerelease-20210923
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 5900
            - containerPort: 5553
          env:
            - name: SE_EVENT_BUS_HOST
              value: "selenium-hub"
            - name: SE_EVENT_BUS_PUBLISH_PORT
              value: "4442"
            - name: SE_EVENT_BUS_SUBSCRIBE_PORT
              value: "4443"
            - name: SE_NODE_MAX_SESSIONS
              value: "20"
            - name: SE_NODE_OVERRIDE_MAX_SESSIONS
              value: "true"
            - name: TZ
              value: "Asia/Shanghai"
          resources:
            requests:
              memory: "500Mi"
          volumeMounts:
            - mountPath: "/dev/shm"
              name: "dshm"
            - mountPath: "/etc/localtime"
              name: "host-time"
      volumes:
        - name: "dshm"
          hostPath:
            path: "/dev/shm"
        - name: "host-time"
          hostPath:
            path: "/etc/localtime"
---
apiVersion: v1
kind: Service
metadata:
  name: selenium-node-chrome
  labels:
    name: selenium-node-chrome
spec:
  type: NodePort
  ports:
    - port: 5900
      targetPort: 5900
      name: port0
      nodePort: 31002
  selector:
    name: selenium-node-chrome
  sessionAffinity: None