ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

a30.ansible 生产实战案例 --基于kubeadm-v1.20安装kubernetes--Kubeadm升级

2022-03-19 09:02:54  阅读:140  来源: 互联网

标签:a30 name kubernetes -- server ansible k8s root yml


19.k8s升级

19.1 升级master

19.1.1 升级master

[root@ansible-server ansible]# mkdir -p roles/kubeadm-update-master/{tasks,vars}
[root@ansible-server ansible]# cd roles/kubeadm-update-master/
[root@ansible-server kubeadm-update-master]# ls
files  tasks  vars

[root@ansible-server kubeadm-update-master]# vim vars/main.yml 
KUBEADM_VERSION: 1.20.15
HARBOR_DOMAIN: harbor.raymonds.cc
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103

[root@ansible-server kubeadm-update-master]# vim tasks/upgrade_master01.yml 
- name: install CentOS or Rocky socat
  yum: 
    name: socat
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.ha
- name: install Ubuntu socat
  apt:
    name: socat
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.ha
- name: down master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for master
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }},kubectl-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-master01"
- name: install Ubuntu kubeadm for master
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00,kubectl={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-master01"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-master01"
- name: get kubeadm version
  shell:
    cmd: kubeadm config images list --kubernetes-version=v{{ KUBEADM_VERSION }} | awk -F "/"  '{print $NF}'
  register: KUBEADM_IMAGES_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download kubeadm image for master01
  shell: |
    {% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{{ i }}
      docker tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.aliyuncs.com/google_containers/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %} 
  when:
    - ansible_hostname=="k8s-master01"
- name: kubeadm upgrade
  shell:
    cmd: |
      kubeadm upgrade apply v{{ KUBEADM_VERSION }} <<EOF
      y
      EOF 
      sleep 240s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm-update-master]# vim tasks/upgrade_master02.yml 
- name: down master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master02"
- name: install CentOS or Rocky kubeadm for master
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }},kubectl-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-master02"
- name: install Ubuntu kubeadm for master
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00,kubectl={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-master02"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-master02"
- name: get kubeadm version
  shell:
    cmd: kubeadm config images list --kubernetes-version=v{{ KUBEADM_VERSION }} | awk -F "/"  '{print $NF}'
  register: KUBEADM_IMAGES_VERSION
  when:
    - ansible_hostname=="k8s-master02"
- name: download kubeadm image for master02
  shell: |
    {% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
      docker pull {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %} 
  when:
    - ansible_hostname=="k8s-master02"
- name: kubeadm upgrade
  shell:
    cmd: |
      kubeadm upgrade apply v{{ KUBEADM_VERSION }} <<EOF
      y
      EOF 
      sleep 240s
  when:
    - ansible_hostname=="k8s-master02"
- name: up master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master02"

[root@ansible-server kubeadm-update-master]# vim tasks/upgrade_master03.yml 
- name: down master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master03"
- name: install CentOS or Rocky kubeadm for master
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }},kubectl-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-master03"
- name: install Ubuntu kubeadm for master
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00,kubectl={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-master03"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-master03"
- name: get kubeadm version
  shell:
    cmd: kubeadm config images list --kubernetes-version=v{{ KUBEADM_VERSION }} | awk -F "/"  '{print $NF}'
  register: KUBEADM_IMAGES_VERSION
  when:
    - ansible_hostname=="k8s-master03"
- name: download kubeadm image for master03
  shell: |
    {% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
      docker pull {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %} 
  when:
    - ansible_hostname=="k8s-master03"
- name: kubeadm upgrade
  shell:
    cmd: |
      kubeadm upgrade apply v{{ KUBEADM_VERSION }} <<EOF
      y
      EOF 
      sleep 240s
  when:
    - ansible_hostname=="k8s-master03"
- name: up master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master03"

[root@ansible-server kubeadm-update-master]# vim tasks/main.yml 
- include: upgrade_master01.yml
- include: upgrade_master02.yml
- include: upgrade_master03.yml

[root@ansible-server kubeadm-update-master]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm-update-master/
roles/kubeadm-update-master/
├── tasks
│   ├── main.yml
│   ├── upgrade_master01.yml
│   ├── upgrade_master02.yml
│   └── upgrade_master03.yml
└── vars
    └── main.yml

2 directories, 5 files

[root@ansible-server ansible]# vim kubeadm_update_master_role.yml
---
- hosts: k8s_cluster:ha

  roles:
    - role: kubeadm-update-master

[root@ansible-server ansible]# ansible-playbook kubeadm_update_master_role.yml 

19.1.2 验证master

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES                  AGE   VERSION
k8s-master01                 Ready    control-plane,master   18h   v1.20.15
k8s-master02.example.local   Ready    control-plane,master   18h   v1.20.15
k8s-master03.example.local   Ready    control-plane,master   18h   v1.20.15
k8s-node01.example.local     Ready    <none>                 18h   v1.20.14
k8s-node02.example.local     Ready    <none>                 18h   v1.20.14
k8s-node03.example.local     Ready    <none>                 18h   v1.20.14

19.2 升级calico

[root@ansible-server ansible]# mkdir -p roles/calico-update/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico-update
[root@ansible-server calico-update]# ls
tasks  templates  vars

[root@ansible-server calico-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103

[root@ansible-server calico-update]# wget https://docs.projectcalico.org/manifests/calico-etcd.yaml -p templates/calico-etcd.yaml.j2

[root@k8s-master01 ~]# vim templates/calico-etcd.yaml.j2
...
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: OnDelete #修改这里,calico不会滚动更新,只有重启了kubelet,才会更新
 template:
    metadata:
      labels:
        k8s-app: calico-node
...

#修改下面内容
[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"

[root@ansible-server calico-update]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.master %}https://{{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2  

[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
  etcd_endpoints: "{% for i in groups.master %}https://{{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"	

[root@ansible-server calico-update]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
  template:
    src: calico-etcd.yaml.j2
    dest: /root/calico-etcd.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/config.yml
- name: get ETCD_KEY key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'
  register: ETCD_KEY
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-key:) null'
    replace: '\1 {{ ETCD_KEY.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'
  register: ETCD_CERT
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-cert:) null'
    replace: '\1 {{ ETCD_CERT.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'
  when:
    - ansible_hostname=="k8s-master01"
  register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-ca:) null'
    replace: '\1 {{ ETCD_CA.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_ca:) ""'
    replace: '\1 "/calico-secrets/etcd-ca"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_cert:) ""'
    replace: '\1 "/calico-secrets/etcd-cert"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_key:) ""'
    replace: '\1 "/calico-secrets/etcd-key"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
    replace: '\1'
  when:
    - ansible_hostname=="k8s-master01"
- name: get POD_SUBNET
  shell:
    cmd: cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'
  register: POD_SUBNET
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '#   (value:) "192.168.0.0/16"'
    replace: '  \1 "{{ POD_SUBNET.stdout }}"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(.*image:) docker.io/calico(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/download_images.yml
- name: get calico version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml
  register: CALICO_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download calico image
  shell: |
    {% for i in CALICO_VERSION.stdout_lines %}
      docker pull registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
      docker tag registry.cn-beijing.aliyuncs.com/raymond9/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/install_calico.yml
- name: install calico
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f calico-etcd.yaml"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master01_calico_container.yml 
- name: down master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep master01 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master02_calico_container.yml 
- name: down master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep master02 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master03_calico_container.yml 
- name: down master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{{ MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl get --kubeconfig=/etc/kubernetes/admin.conf pod -n kube-system -o wide|grep calico |grep master03 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{{ MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml
- include: delete_master01_calico_container.yml
- include: delete_master02_calico_container.yml
- include: delete_master03_calico_container.yml

[root@ansible-server calico-update]# cd ../../
[root@ansible-server ansible]# tree roles/calico-update/
roles/calico-update/
├── tasks
│   ├── calico_file.yml
│   ├── config.yml
│   ├── delete_master01_calico_container.yml
│   ├── delete_master02_calico_container.yml
│   ├── delete_master03_calico_container.yml
│   ├── download_images.yml
│   ├── install_calico.yml
│   └── main.yml
├── templates
│   └── calico-etcd.yaml.j2
└── vars
    └── main.yml

3 directories, 10 files

[root@ansible-server ansible]# vim calico_update_role.yml 
---
- hosts: master

  roles:
    - role: calico-update

[root@ansible-server ansible]# ansible-playbook calico_update_role.yml

19.3 node

19.3.1 升级node

[root@ansible-server ansible]# mkdir -p roles/kubeadm-update-node/{tasks,vars}
[root@ansible-server ansible]# cd roles/kubeadm-update-node/
[root@ansible-server kubeadm-update-node]# ls
tasks  vars

[root@ansible-server kubeadm-update-node]# vim vars/main.yml
KUBEADM_VERSION: 1.20.15

[root@ansible-server kubeadm-update-node]# vim tasks/upgrade_node01.yml 
- name: drain node01
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node01.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-node01"
- name: install Ubuntu kubeadm for node
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-node01"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-node01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node01 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node01
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node01.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm-update-node]# vim tasks/upgrade_node02.yml 
- name: drain node02
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node02.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-node02"
- name: install Ubuntu kubeadm for node
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-node02"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-node02"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node02 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node02
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node02.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm-update-node]# vim tasks/upgrade_node03.yml 
- name: drain node03
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node03.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
  yum:
    name: kubelet-{{ KUBEADM_VERSION }},kubeadm-{{ KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_hostname=="k8s-node03"
- name: install Ubuntu kubeadm for node
  apt:
    name: kubelet={{ KUBEADM_VERSION }}-00,kubeadm={{ KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_hostname=="k8s-node03"
- name: restart kubelet
  systemd:
    name: kubelet
    state: restarted
    daemon_reload: yes
  when:
    - ansible_hostname=="k8s-node03"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node03 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {{ CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node03
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node03.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm-update-node]# vim tasks/main.yml 
- include: upgrade_node01.yml
- include: upgrade_node02.yml
- include: upgrade_node03.yml

[root@ansible-server kubeadm-update-node]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm-update-node/
roles/kubeadm-update-node/
├── tasks
│   ├── main.yml
│   ├── upgrade_node01.yml
│   ├── upgrade_node02.yml
│   └── upgrade_node03.yml
└── vars
    └── main.yml

2 directories, 5 files

[root@ansible-server ansible]# vim kubeadm_update_node_role.yml 
---
- hosts: k8s_cluster

  roles:
    - role: kubeadm-update-node

[root@ansible-server ansible]# ansible-playbook kubeadm_update_node_role.yml

19.3.2 验证node

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES                  AGE   VERSION
k8s-master01                 Ready    control-plane,master   19h   v1.20.15
k8s-master02.example.local   Ready    control-plane,master   19h   v1.20.15
k8s-master03.example.local   Ready    control-plane,master   19h   v1.20.15
k8s-node01.example.local     Ready    <none>                 19h   v1.20.15
k8s-node02.example.local     Ready    <none>                 19h   v1.20.15
k8s-node03.example.local     Ready    <none>                 19h   v1.20.15

19.4 metrics

19.4.1 升级metrics

[root@ansible-server ansible]# mkdir -p roles/metrics-update/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics-update/
[root@ansible-server metrics-update]# ls
files  tasks  vars

[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/

[root@ansible-server metrics-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/

[root@ansible-server metrics-update]# vim files/components.yaml
...
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls #添加这行 
...

[root@ansible-server metrics-update]# vim tasks/metrics_file.yml
- name: copy components.yaml file
  copy:
    src: components.yaml
    dest: /root/components.yaml

[root@ansible-server metrics-update]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/components.yaml
    regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server metrics-update]# vim tasks/download_images.yml
- name: get metrics version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' components.yaml
  register: METRICS_VERSION
- name: download metrics image
  shell: |
    {% for i in METRICS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{{ i }}
      docker tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.aliyuncs.com/google_containers/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}

[root@ansible-server metrics-update]# vim tasks/install_metrics.yml
- name: install metrics
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f components.yaml"

[root@ansible-server metrics-update]# vim tasks/main.yml
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml

[root@ansible-server metrics-update]# cd ../../
[root@ansible-server ansible]# tree roles/metrics-update/
roles/metrics-update/
├── files
│   └── components.yaml
├── tasks
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_metrics.yml
│   ├── main.yml
│   └── metrics_file.yml
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim metrics_update_role.yml 
---
- hosts: master01

  roles:
    - role: metrics-update

[root@ansible-server ansible]# ansible-playbook metrics_update_role.yml

19.4.2 验证metrics

[root@k8s-master01 ~]# kubectl get pod -A|grep metrics
kube-system            metrics-server-5b7c76b46c-nmqs9                      1/1     Running   0          31s

[root@k8s-master01 ~]# kubectl top node
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01                 179m         8%     1426Mi          37%       
k8s-master02.example.local   137m         6%     1240Mi          32%       
k8s-master03.example.local   147m         7%     1299Mi          34%       
k8s-node01.example.local     84m          4%     883Mi           23%       
k8s-node02.example.local     73m          3%     898Mi           23%       
k8s-node03.example.local     72m          3%     915Mi           23% 

19.5 dashboard

19.5.1 升级dashboard

[root@ansible-server ansible]# mkdir -p roles/dashboard-update/{files,templates,vars,tasks}
[root@ansible-server ansible]# cd roles/dashboard-update/
[root@ansible-server dashboard-update]# ls
files  tasks  templates  vars

[root@ansible-server dashboard-update]# vim files/admin.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@ansible-server dashboard-update]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml -P templates/recommended.yaml.j2

[root@ansible-server dashboard-update]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: {{ NODEPORT }} #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@ansible-server dashboard-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
NODEPORT: 30005

[root@ansible-server dashboard-update]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
  template:
    src: recommended.yaml.j2
    dest: /root/recommended.yaml
- name: copy admin.yaml file
  copy:
    src: admin.yaml
    dest: /root/admin.yaml

[root@ansible-server dashboard-update]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/recommended.yaml
    regexp: '(.*image:) kubernetesui(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server dashboard-update]# vim tasks/download_images.yml
- name: get dashboard version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' recommended.yaml
  register: DASHBOARD_VERSION
- name: download dashboard image
  shell: |
    {% for i in DASHBOARD_VERSION.stdout_lines %}
      docker pull kubernetesui/{{ i }}
      docker tag kubernetesui/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi kubernetesui/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}

[root@ansible-server dashboard-update]# vim tasks/install_dashboard.yml
- name: install dashboard
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f recommended.yaml -f admin.yaml"

[root@ansible-server dashboard-v2.4.0]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml

[root@ansible-server dashboard-update]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard-update/
roles/dashboard-update/
├── files
│   └── admin.yaml
├── tasks
│   ├── config.yml
│   ├── dashboard_file.yml
│   ├── download_images.yml
│   ├── install_dashboard.yml
│   └── main.yml
├── templates
│   └── recommended.yaml.j2
└── vars
    └── main.yml

4 directories, 8 files

[root@ansible-server ansible]# vim dashboard_update_role.yml 
---
- hosts: master01

  roles:
    - role: dashboard-update

[root@ansible-server ansible]# ansible-playbook dashboard_update_role.yml 

19.5.2 登录dashboard

https://172.31.3.101:30005

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-mlzc8
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 8e8d6838-f344-4701-85d3-21e39205a77c

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjZMdGRxbV9rX1hsQ0dtT2J1dHlDd1lwQVJORnpKY21Yc0JKYlVXaGlfaG8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLW1semM4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4ZThkNjgzOC1mMzQ0LTQ3MDEtODVkMy0yMWUzOTIwNWE3N2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.dFe6Y-rRNEYWvVK-VNphz4N_tkCNHCG_uRt9iNhdCmtYcD5yy21iYcDjAWMVvmFuyn0QDnUlquPyl3WoASVc91BOKWNgdNkOrFEFKoP32YdgaurnRBkXMDgkAUJXQT-2vekO56UiQtoxK87DVSmFksTAFXlc7zw1VJRE1g10ZiNVTcl-omOiMPvdk5RIjs-Uk859p70_O1oC8Ep-JzBYWCilX2ymNUNNeh4lyt1Fo8Li4N0JLwzQLJgfHfjoQwpd4Irj2agMQ-BW4xT70HsJW4cUt1sJ29cnO1RfhxM8-w-6wBPnGwkJTSre4GfMrjnJoVFN2cbjQg4N0ud_MQMXcw

在这里插入图片描述

标签:a30,name,kubernetes,--,server,ansible,k8s,root,yml
来源: https://blog.csdn.net/qq_25599925/article/details/122628909

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有