cnahub 2 лет назад
Сommit
1bfb9391dc

+ 40 - 0
0.vps-k8s-clear.sh

@@ -0,0 +1,40 @@
+#!/bin/sh
+
+systemctl disable --now kubelet
+systemctl disable --now kube-proxy
+systemctl disable --now kube-scheduler
+systemctl disable --now kube-controller-manager
+systemctl disable --now kube-apiserver
+systemctl disable --now etcd
+
+rm -rf /etc/etcd /etc/kubernetes
+rm -rf /etc/cni/net.d
+rm -rf /var/lib/cni/
+rm -rf /var/lib/etcd
+crictl ps -q | xargs crictl stop
+crictl ps -a -q | xargs crictl rm
+mount -l | grep '/var/lib/kubelet' | awk '{print $3}' | xargs umount
+rm -rf /var/lib/kubelet
+
+systemctl stop containerd
+rm -rf /var/lib/containerd
+systemctl start containerd
+
+export_addr=$(curl ipv4.icanhazip.com)
+sed "/$export_addr .*/d" /etc/hosts -i
+
+markS="##### ca-etcd-k8s envionment #####"
+markE="##################################"
+remove_env_profile() {
+sed "/$markS/d"                   /etc/profile -i 
+sed "/export expiry=.*/d"         /etc/profile -i
+sed "/export ca_config_path=.*/d" /etc/profile -i 
+sed "/export etcd_pki_path=.*/d"  /etc/profile -i 
+sed "/export etcd_ips_list=.*/d"  /etc/profile -i 
+sed "/export k8s_pki_path=.*/d"   /etc/profile -i 
+sed "/export k8s_ips_list=.*/d"   /etc/profile -i  
+sed "/export export_addr=.*/d"    /etc/profile -i  
+sed "/export export_port=.*/d"    /etc/profile -i  
+sed "/$markE/d"                   /etc/profile -i
+}
+remove_env_profile

+ 93 - 0
1.vps-k8s-sysconfig.sh

@@ -0,0 +1,93 @@
+#!/bin/sh
+
+systemctl disable --now firewalld 
+systemctl disable --now dnsmasq
+systemctl disable --now NetworkManager
+
+setenforce 0
+sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
+sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
+
+swapoff -a && sysctl -w vm.swappiness=0
+sed -ri 's/.*swap.*/#&/' /etc/fstab
+
+sed -i '/# End of file/i########################' /etc/security/limits.conf
+sed -i '/# End of file/i* soft nofile 655350'     /etc/security/limits.conf
+sed -i '/# End of file/i* hard nofile 655350'     /etc/security/limits.conf
+sed -i '/# End of file/i* soft nproc 655350'      /etc/security/limits.conf
+sed -i '/# End of file/i* hard nproc 655350'      /etc/security/limits.conf
+sed -i '/# End of file/i* soft memlock unlimited' /etc/security/limits.conf
+sed -i '/# End of file/i* hard memlock unlimited' /etc/security/limits.conf
+sed -i '/# End of file/i########################' /etc/security/limits.conf
+
+modprobe -- ip_vs
+modprobe -- ip_vs_rr
+modprobe -- ip_vs_wrr
+modprobe -- ip_vs_sh
+modprobe -- nf_conntrack
+
+cat <<EOF | sudo tee > /etc/modules-load.d/ipvs.conf
+ip_vs
+ip_vs_lc
+ip_vs_wlc
+ip_vs_rr
+ip_vs_wrr
+ip_vs_lblc
+ip_vs_lblcr
+ip_vs_dh
+ip_vs_sh
+ip_vs_fo
+ip_vs_nq
+ip_vs_sed
+ip_vs_ftp
+ip_vs_sh
+nf_conntrack
+ip_tables
+ip_set
+xt_set
+ipt_set
+ipt_rpfilter
+ipt_REJECT
+ipip
+EOF
+
+# 执行命令
+systemctl enable --now systemd-modules-load.service  #--now = enable+start
+
+#检测是否加载
+lsmod | grep -e ip_vs -e nf_conntrack
+
+
+## 所有节点
+cat <<EOF > /etc/sysctl.d/k8s.conf
+net.ipv4.ip_forward = 1
+net.bridge.bridge-nf-call-iptables = 1
+net.bridge.bridge-nf-call-ip6tables = 1
+fs.may_detach_mounts = 1
+vm.overcommit_memory=1
+net.ipv4.conf.all.route_localnet = 1
+
+vm.panic_on_oom=0
+fs.inotify.max_user_watches=89100
+fs.file-max=52706963
+fs.nr_open=52706963
+net.netfilter.nf_conntrack_max=2310720
+
+net.ipv4.tcp_keepalive_time = 600
+net.ipv4.tcp_keepalive_probes = 3
+net.ipv4.tcp_keepalive_intvl =15
+net.ipv4.tcp_max_tw_buckets = 36000
+net.ipv4.tcp_tw_reuse = 1
+net.ipv4.tcp_max_orphans = 327680
+net.ipv4.tcp_orphan_retries = 3
+net.ipv4.tcp_syncookies = 1
+net.ipv4.tcp_max_syn_backlog = 16768
+net.ipv4.ip_conntrack_max = 65536
+net.ipv4.tcp_timestamps = 0
+net.core.somaxconn = 16768
+EOF
+sysctl --system
+
+lsmod | grep -e ip_vs -e nf_conntrack
+
+timedatectl set-timezone Asia/Shanghai

+ 64 - 0
2.vps-k8s-app-install.sh

@@ -0,0 +1,64 @@
+#!/bin/sh
+
+set -e
+
+yum install wget git jq psmisc net-tools yum-utils device-mapper-persistent-data lvm2 vim tar runc -y
+
+yum install ipvsadm ipset sysstat conntrack -y
+
+if [ ! -f /usr/local/bin/containerd ]; then
+wget https://github.com/containerd/containerd/releases/download/v1.6.18/containerd-1.6.18-linux-amd64.tar.gz
+tar Cxzvf /usr/local containerd-1.6.18-linux-amd64.tar.gz
+mkdir -p /etc/containerd
+containerd config default > /etc/containerd/config.toml
+  
+# 修改 SystemdCgroup
+if grep -F 'SystemdCgroup =' /etc/containerd/config.toml
+then
+  sed -i s#'SystemdCgroup.*=.*false'#'SystemdCgroup = true'#g /etc/containerd/config.toml
+  else
+  sed -i '/\[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options\]/a\            SystemdCgroup = true' /etc/containerd/config.toml
+fi
+
+mkdir -p /usr/local/lib/systemd/system/
+wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -O /usr/local/lib/systemd/system/containerd.service
+
+systemctl daemon-reload
+systemctl enable --now containerd
+fi
+
+
+if [ ! -f /usr/local/bin/crictl ]; then
+VERSION="v1.26.0"
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-$VERSION-linux-amd64.tar.gz
+sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
+#rm -f crictl-$VERSION-linux-amd64.tar.gz
+
+cat > /etc/crictl.yaml <<EOF
+runtime-endpoint: unix:///run/containerd/containerd.sock
+image-endpoint: unix:///run/containerd/containerd.sock
+timeout: 10
+debug: false
+EOF
+fi
+
+
+if [ ! -f /usr/local/bin/cfssl ]; then
+wget https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl_1.6.3_linux_amd64 -O /usr/local/bin/cfssl
+wget https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssljson_1.6.3_linux_amd64 -O /usr/local/bin/cfssljson
+chmod +x /usr/local/bin/cfssl
+chmod +x /usr/local/bin/cfssljson
+fi
+
+
+if [ ! -f /usr/local/bin/etcd ]; then
+wget https://github.com/etcd-io/etcd/releases/download/v3.5.7/etcd-v3.5.7-linux-amd64.tar.gz
+tar -zxvf etcd-v3.5.7-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.7-linux-amd64/etcd{,ctl}
+fi
+
+
+if [ ! -f /usr/local/bin/kubectl ]; then
+wget https://dl.k8s.io/v1.26.1/kubernetes-server-linux-amd64.tar.gz
+tar -xvf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
+fi
+

+ 1155 - 0
3.vps-k8s-install-v1.26.sh

@@ -0,0 +1,1155 @@
+#!/bin/sh
+
+set -e
+
+pod_cidr="196.16.0.0/16"
+svc_cidr="10.96.0.0/16"
+svc_frst=${svc_cidr%.*}.1
+svc_dns=${svc_cidr%.*}.10
+
+origin_dir=$(pwd)
+
+### Setting environment
+export_addr=$(curl ipv4.icanhazip.com)
+export_port=6443
+
+# CA organization
+# expiry 87600h  过期时间10年,可根据自己需求来配置,比如 438000h 50年,876000h 100年
+expiry=876000h
+ca_config_path=/etc/ca/ca-config.json
+
+# etcd path
+etcd_pki_path=/etc/etcd/pki
+etcd_ips_list="$export_addr $(hostname)"
+
+# kubernetes
+k8s_pki_path=/etc/kubernetes/pki
+k8s_ips_list="$export_addr $(hostname)"
+
+
+markS="##### ca-etcd-k8s envionment #####"
+markE="##################################"
+
+write_env_profile() {
+sh <<EOF >> /etc/profile
+echo "$markS"
+echo export expiry=$expiry
+echo export ca_config_path=$ca_config_path
+echo export etcd_pki_path=$etcd_pki_path
+echo export etcd_ips_list=\"${etcd_ips_list}\"
+echo export k8s_pki_path=$k8s_pki_path
+echo export k8s_ips_list=\"${k8s_ips_list}\"
+echo export export_addr=$export_addr
+echo export export_port=$export_port
+echo "$markE"
+EOF
+}
+
+if grep "$markS" /etc/profile > /dev/null
+then
+  echo already write profile env
+  echo "clear the environment and run again"
+  exit
+else
+  write_env_profile
+  echo write profile env success
+  printf "\n$export_addr $(hostname)\n" >> /etc/hosts
+fi
+
+### CA organization
+#ca-config.json
+echo 创建机构配置 $ca_config_path
+mkdir -p $(dirname $ca_config_path)
+cat << EOF > $ca_config_path
+{
+    "signing": {
+        "default": {
+            "expiry": "$expiry"
+        },
+        "profiles": {
+            "server": {
+                "expiry": "$expiry",
+                "usages": [
+                    "signing",
+                    "key encipherment",
+                    "server auth"
+                ]
+            },
+            "client": {
+                "expiry": "$expiry",
+                "usages": [
+                    "signing",
+                    "key encipherment",
+                    "client auth"
+                ]
+            },
+            "peer": {
+                "expiry": "$expiry",
+                "usages": [
+                    "signing",
+                    "key encipherment",
+                    "server auth",
+                    "client auth"
+                ]
+            },
+            "kubernetes": {
+                "expiry": "$expiry",
+                "usages": [
+                    "signing",
+                    "key encipherment",
+                    "server auth",
+                    "client auth"
+                ]
+            },
+            "etcd": {
+                "expiry": "$expiry",
+                "usages": [
+                    "signing",
+                    "key encipherment",
+                    "server auth",
+                    "client auth"
+                ]
+            }
+        }
+    }
+}
+EOF
+
+## 安装etcd
+#创建etcd证书目录和证书
+### 1、生成etcd根ca证书
+
+echo 生成 etcd 根 ca 证书
+mkdir -p $etcd_pki_path
+cd $etcd_pki_path
+
+# etcd-ca-csr 签名请求
+cat <<EOF > etcd-ca-csr.json
+{
+  "CN": "etcd",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  },
+  "names": [
+    {
+      "C": "CN",
+      "ST": "Beijing",
+      "L": "Beijing",
+      "O": "etcd",
+      "OU": "etcd"
+    }
+  ],
+  "ca": {
+    "expiry": "$expiry"
+  }
+}
+EOF
+
+# 生成etcd CA根证书和key
+cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
+
+#为 etcd 颁证书
+echo etcd 颁证书
+
+mkdir -p $etcd_pki_path
+cd $etcd_pki_path
+
+# 创建etcd证书签名 etcd-csr.json
+cat <<EOF > etcd-csr.json
+{
+    "CN": "etcd",
+    "key": {
+        "algo": "rsa",
+        "size": 2048
+    },
+    "hosts": [
+        "127.0.0.1"
+    ],
+    "names": [
+        {
+            "C": "CN",
+            "ST": "Beijing",
+            "L": "Beijing",
+            "O": "etcd",
+            "OU": "System"
+        }
+    ]
+}
+EOF
+
+# hosts 加上 etcd 节点域名、IP
+for addr in ${etcd_ips_list[@]}
+do
+  echo "===>>> etcd hosts add "$addr
+  sed -i "\#\"127.0.0.1\"#i\        \"$addr\"," etcd-csr.json
+done
+
+# 生成etcd证书
+cfssl gencert \
+  -ca=etcd-ca.pem \
+  -ca-key=etcd-ca-key.pem \
+  -config=${ca_config_path} \
+  -profile=etcd \
+  etcd-csr.json | cfssljson -bare etcd
+
+### 配置 etcd.yaml 配置
+
+mkdir -p /var/lib/etcd
+etcd_config_file=$(dirname ${etcd_pki_path})/etcd.yaml
+echo 配置 $etcd_config_file
+cat <<EOF > $etcd_config_file
+name: '{{host}}'                                                 #每个机器可以写自己的域名,不能重复
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://{{ipls}}:2380'    #本机ip+2380端口,代表和集群通信22
+listen-client-urls: 'https://{{ipls}}:2379,http://127.0.0.1:2379' #自己的ip
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://{{ipls}}:2380'            #自己的ip
+advertise-client-urls: 'https://{{ipls}}:2379'                  #自己的ip
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: '{{host0}}=https://{{ipls0}}:2380' #这里不一样
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+  cert-file: '${etcd_pki_path}/etcd.pem'
+  key-file: '${etcd_pki_path}/etcd-key.pem'
+  client-cert-auth: true
+  trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
+  auto-tls: true
+peer-transport-security:
+  cert-file: '${etcd_pki_path}/etcd.pem'
+  key-file: '${etcd_pki_path}/etcd-key.pem'
+  peer-client-cert-auth: true
+  trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
+  auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+
+# 处理 etcd.yaml 模板文件进行替换
+host=(
+$(hostname)
+)
+ipls=(
+$export_addr
+)
+
+for i in 0
+do  
+  sed -i "s/{{host}}/${host[$i]}/g" $etcd_config_file
+  sed -i "s/{{host0}}/${host[0]}/g" $etcd_config_file
+  
+  sed -i "s/{{ipls}}/${ipls[$i]}/g" $etcd_config_file
+  sed -i "s/{{ipls0}}/${ipls[0]}/g" $etcd_config_file
+done
+
+### etcd.service 开机启动
+
+echo 配置 /usr/lib/systemd/system/etcd.service
+cat << EOF > /usr/lib/systemd/system/etcd.service
+[Unit]
+Description=Etcd Service
+Documentation=https://etcd.io/docs/v3.5/op-guide/clustering/
+After=network.target
+
+[Service]
+Type=notify
+ExecStart=/usr/local/bin/etcd --config-file=$etcd_config_file
+Restart=on-failure
+RestartSec=10
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
+Alias=etcd3.service
+EOF
+
+
+echo 启动etcd
+# 加载&开机启动
+systemctl daemon-reload
+systemctl enable --now etcd
+#systemctl status etcd
+
+echo 测试etcd
+### 测试etcd访问
+etcdctl member list --write-out=table
+etcdctl endpoint status --write-out=table
+
+## 安装k8s
+### k8s证书
+#### k8s根ca证书
+
+echo 创建 k8s 相关证书
+mkdir -p ${k8s_pki_path}
+cd ${k8s_pki_path}
+
+cat << EOF > ca-csr.json
+{
+  "CN": "kubernetes",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  },
+  "names": [
+    {
+      "C": "CN",
+      "ST": "Beijing",
+      "L": "Beijing",
+      "O": "Kubernetes",
+      "OU": "Kubernetes"
+    }
+  ],
+  "ca": {
+    "expiry": "$expiry"
+  }
+}
+EOF
+
+## 生成 k8s CA 证书
+
+echo k8s CA 证书
+cfssl gencert -initca ca-csr.json | cfssljson -bare ca
+
+#### apiserver证书
+echo 生成apiserver证书
+cat <<EOF > apiserver-csr.json 
+{
+    "CN": "kube-apiserver",
+    "hosts": [
+        "127.0.0.1",
+        "${svc_frst}",
+        "kubernetes",
+        "kubernetes.default",
+        "kubernetes.default.svc",
+        "kubernetes.default.svc.cluster",
+        "kubernetes.default.svc.cluster.local"
+    ],
+    "key": {
+        "algo": "rsa",
+        "size": 2048
+    },
+    "names": [
+        {
+            "C": "CN",
+            "L": "BeiJing",
+            "ST": "BeiJing",
+            "O": "Kubernetes",
+            "OU": "Kubernetes"
+        }
+    ]
+}
+EOF
+
+# hosts 加上 k8s 节点域名、IP
+for addr in ${k8s_ips_list[@]}
+do
+  echo "===>>> kube-apiserver hosts add "$addr
+  sed -i "\#\"127.0.0.1\"#i\        \"$addr\"," apiserver-csr.json
+done
+
+cfssl gencert \
+  -ca=ca.pem \
+  -ca-key=ca-key.pem \
+  -config=${ca_config_path} \
+  -profile=kubernetes \
+  apiserver-csr.json | cfssljson -bare apiserver
+
+
+#### front-proxy证书
+
+##### 1、front-proxy根ca
+
+echo 生成front-proxy证书
+cat << EOF > front-proxy-ca-csr.json
+{
+  "CN": "kubernetes",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  }
+}
+EOF
+#front-proxy 根ca生成
+cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
+
+##### 2、front-proxy-client证书
+cat << EOF > front-proxy-client-csr.json  
+{
+  "CN": "front-proxy-client",
+  "key": {
+     "algo": "rsa",
+     "size": 2048
+  }
+}
+EOF
+
+#生成front-proxy-client 证书
+echo 生成front-proxy-client 证书
+#忽略警告
+cfssl gencert \
+  -ca=front-proxy-ca.pem   \
+  -ca-key=front-proxy-ca-key.pem  \
+  -config=${ca_config_path}   \
+  -profile=kubernetes   \
+  front-proxy-client-csr.json | cfssljson -bare front-proxy-client
+
+
+#### controller-manage证书
+
+echo 生成 controller-manage证书
+#1、生成证书
+cat <<EOF > controller-manager-csr.json
+{
+  "CN": "system:kube-controller-manager",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  },
+  "names": [
+    {
+      "C": "CN",
+      "ST": "Beijing",
+      "L": "Beijing",
+      "O": "system:kube-controller-manager",
+      "OU": "Kubernetes"
+    }
+  ]
+}
+EOF
+
+cfssl gencert \
+  -ca=ca.pem \
+  -ca-key=ca-key.pem \
+  -config=${ca_config_path} \
+  -profile=kubernetes \
+  controller-manager-csr.json | cfssljson -bare controller-manager
+
+echo 配置 controller-manager.conf
+#2、生成配置
+# set-cluster:设置一个集群项
+kubectl config set-cluster kubernetes \
+     --certificate-authority=${k8s_pki_path}/ca.pem \
+     --embed-certs=true \
+     --server=https://$export_addr:$export_port \
+     --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
+
+# 设置一个环境项,一个上下文
+kubectl config set-context system:kube-controller-manager@kubernetes \
+    --cluster=kubernetes \
+    --user=system:kube-controller-manager \
+    --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
+
+# set-credentials 设置一个用户项
+kubectl config set-credentials system:kube-controller-manager \
+     --client-certificate=${k8s_pki_path}/controller-manager.pem \
+     --client-key=${k8s_pki_path}/controller-manager-key.pem \
+     --embed-certs=true \
+     --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
+
+# 使用某个环境当做默认环境
+kubectl config use-context system:kube-controller-manager@kubernetes \
+     --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
+# 后来也用来自动批复kubelet证书
+
+
+echo 生成scheduler证书
+#### scheduler证书
+#1、生成证书
+cat <<EOF > scheduler-csr.json
+{
+  "CN": "system:kube-scheduler",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  },
+  "names": [
+    {
+      "C": "CN",
+      "ST": "Beijing",
+      "L": "Beijing",
+      "O": "system:kube-scheduler",
+      "OU": "Kubernetes"
+    }
+  ]
+}
+EOF
+
+cfssl gencert \
+   -ca=ca.pem \
+   -ca-key=ca-key.pem \
+   -config=${ca_config_path} \
+   -profile=kubernetes \
+   scheduler-csr.json | cfssljson -bare scheduler
+
+echo 配置 scheduler.conf
+#2、生成配置
+#k8s集群安全操作相关
+kubectl config set-cluster kubernetes \
+     --certificate-authority=${k8s_pki_path}/ca.pem \
+     --embed-certs=true \
+     --server=https://$export_addr:$export_port \
+     --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
+
+kubectl config set-credentials system:kube-scheduler \
+     --client-certificate=${k8s_pki_path}/scheduler.pem \
+     --client-key=${k8s_pki_path}/scheduler-key.pem \
+     --embed-certs=true \
+     --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
+
+kubectl config set-context system:kube-scheduler@kubernetes \
+     --cluster=kubernetes \
+     --user=system:kube-scheduler \
+     --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
+
+kubectl config use-context system:kube-scheduler@kubernetes \
+     --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
+
+
+
+echo 生成 admin 证书
+#### admin证书
+#1、生成证书
+cat <<EOF > admin-csr.json
+{
+  "CN": "admin",
+  "key": {
+    "algo": "rsa",
+    "size": 2048
+  },
+  "names": [
+    {
+      "C": "CN",
+      "ST": "Beijing",
+      "L": "Beijing",
+      "O": "system:masters",
+      "OU": "Kubernetes"
+    }
+  ]
+}
+EOF
+
+cfssl gencert \
+   -ca=ca.pem \
+   -ca-key=ca-key.pem \
+   -config=${ca_config_path} \
+   -profile=kubernetes \
+   admin-csr.json | cfssljson -bare admin
+
+echo 配置 admin.conf
+#2、生成配置
+##k8s集群admin.conf授权操作相关 ===>>> ~/.kube/config
+kubectl config set-cluster kubernetes \
+	--certificate-authority=${k8s_pki_path}/ca.pem \
+	--embed-certs=true \
+	--server=https://$export_addr:$export_port \
+	--kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
+
+kubectl config set-credentials kubernetes-admin \
+	--client-certificate=${k8s_pki_path}/admin.pem \
+	--client-key=${k8s_pki_path}/admin-key.pem \
+	--embed-certs=true \
+	--kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
+
+kubectl config set-context kubernetes-admin@kubernetes \
+	--cluster=kubernetes \
+	--user=kubernetes-admin \
+	--kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
+
+kubectl config use-context kubernetes-admin@kubernetes \
+  --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
+
+#### ServiceAccount Key生成
+openssl genrsa -out ${k8s_pki_path}/sa.key 2048
+openssl rsa -in ${k8s_pki_path}/sa.key -pubout -out ${k8s_pki_path}/sa.pub
+
+
+echo 准备k8s组件环境目录
+### k8s组件
+#### 1、目录准备
+mkdir -p $(dirname ${k8s_pki_path})/manifests /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
+
+echo 配置 /usr/lib/systemd/system/kube-apiserver.service
+#### 2、配置apiserver服务
+cat <<EOF > /usr/lib/systemd/system/kube-apiserver.service
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \\
+      --v=2  \\
+      --allow-privileged=true  \\
+      --bind-address=0.0.0.0  \\
+      --secure-port=${export_port}  \\
+      --advertise-address=${export_addr} \\
+      --service-cluster-ip-range=${svc_cidr}  \\
+      --service-node-port-range=30000-32767  \\
+      --etcd-servers=https://${export_addr}:2379 \\
+      --etcd-cafile=${etcd_pki_path}/etcd-ca.pem  \\
+      --etcd-certfile=${etcd_pki_path}/etcd.pem  \\
+      --etcd-keyfile=${etcd_pki_path}/etcd-key.pem  \\
+      --client-ca-file=${k8s_pki_path}/ca.pem  \\
+      --tls-cert-file=${k8s_pki_path}/apiserver.pem  \\
+      --tls-private-key-file=${k8s_pki_path}/apiserver-key.pem  \\
+      --kubelet-client-certificate=${k8s_pki_path}/apiserver.pem  \\
+      --kubelet-client-key=${k8s_pki_path}/apiserver-key.pem  \\
+      --service-account-key-file=${k8s_pki_path}/sa.pub  \\
+      --service-account-signing-key-file=${k8s_pki_path}/sa.key  \\
+      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
+      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
+      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
+      --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
+      --authorization-mode=Node,RBAC  \\
+      --enable-bootstrap-token-auth=true  \\
+      --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem  \\
+      --proxy-client-cert-file=${k8s_pki_path}/front-proxy-client.pem  \\
+      --proxy-client-key-file=${k8s_pki_path}/front-proxy-client-key.pem  \\
+      --requestheader-allowed-names=aggregator,front-proxy-client  \\
+      --requestheader-group-headers=X-Remote-Group  \\
+      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
+      --requestheader-username-headers=X-Remote-User
+      # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+echo 启动 kube-apiserver
+#启动apiserver
+systemctl daemon-reload
+systemctl enable --now kube-apiserver
+#systemctl status kube-apiserver
+
+
+echo 配置 /usr/lib/systemd/system/kube-controller-manager.service
+#### 3、配置controller-manager服务
+#196.16.0.0/16 是 pod 的网段,如果修改要注意不要和宿主机,docker等的网段冲突
+
+cat <<EOF > /usr/lib/systemd/system/kube-controller-manager.service
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-controller-manager \\
+      --v=2 \\
+      --root-ca-file=${k8s_pki_path}/ca.pem \\
+      --cluster-signing-cert-file=${k8s_pki_path}/ca.pem \\
+      --cluster-signing-key-file=${k8s_pki_path}/ca-key.pem \\
+      --service-account-private-key-file=${k8s_pki_path}/sa.key \\
+      --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf \\
+      --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
+      --leader-elect=true \\
+      --use-service-account-credentials=true \\
+      --node-monitor-grace-period=40s \\
+      --node-monitor-period=5s \\
+      --pod-eviction-timeout=2m0s \\
+      --controllers=*,bootstrapsigner,tokencleaner \\
+      --allocate-node-cidrs=true \\
+      --cluster-cidr=${pod_cidr} \\
+      --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem \\
+      --node-cidr-mask-size=24
+      
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+echo 启动 kube-controller-manager
+systemctl daemon-reload
+systemctl enable --now kube-controller-manager
+#systemctl status kube-controller-manager
+
+
+echo 配置 /usr/lib/systemd/system/kube-scheduler.service
+#### 4、配置scheduler
+cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
+[Unit]
+Description=Kubernetes Scheduler
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-scheduler \\
+      --v=2 \\
+      --leader-elect=true \\
+      --authentication-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
+      --authorization-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
+      --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+echo 启动 kube-scheduler
+systemctl daemon-reload
+systemctl enable --now kube-scheduler
+#systemctl status kube-scheduler
+
+
+echo TLS与引导启动配置
+#### 5、TLS与引导启动原理
+#1、master配置bootstrap 说明,这部分不执行
+
+echo 准备 $(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
+#设置集群
+kubectl config set-cluster kubernetes \
+	--certificate-authority=${k8s_pki_path}/ca.pem \
+	--embed-certs=true \
+	--server=https://${export_addr}:${export_port} \
+	--kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
+
+#设置秘钥
+kubectl config set-credentials tls-bootstrap-token-user \
+	--token=a2e4f9.781b15d024bb7876 \
+	--kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf 
+
+#设置上下文
+kubectl config set-context tls-bootstrap-token-user@kubernetes \
+	--cluster=kubernetes \
+	--user=tls-bootstrap-token-user \
+	--kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
+
+#使用设置
+kubectl config use-context tls-bootstrap-token-user@kubernetes \
+  --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
+
+#2、设置kubectl执行权限
+#kubectl 能不能操作集群是看 /root/.kube 下有没有config文件,而config就是我们之前生成的admin.conf,具有操作权限的
+mkdir -p /root/.kube
+\cp -f $(dirname ${k8s_pki_path})/admin.conf /root/.kube/config
+
+#验证集群目前状态,如果不能正常查询集群状态,需要排查k8s前面的组件是否有故障
+kubectl get cs
+
+
+#3、创建集群引导权限文件
+#准备 bootstrap.secret.yaml
+
+echo 准备 $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
+cat <<EOF > $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+  name: bootstrap-token-a2e4f9
+  namespace: kube-system
+type: bootstrap.kubernetes.io/token
+stringData:
+  description: "The default bootstrap token generated by 'kubelet '."
+  token-id: a2e4f9
+  token-secret: 781b15d024bb7876
+  usage-bootstrap-authentication: "true"
+  usage-bootstrap-signing: "true"
+  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
+ 
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: kubelet-bootstrap
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:node-bootstrapper
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:bootstrappers:default-node-token
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: node-autoapprove-bootstrap
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:bootstrappers:default-node-token
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: node-autoapprove-certificate-rotation
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:nodes
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "true"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:kube-apiserver-to-kubelet
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes/proxy
+      - nodes/stats
+      - nodes/log
+      - nodes/spec
+      - nodes/metrics
+    verbs:
+      - "*"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: system:kube-apiserver
+  namespace: ""
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:kube-apiserver-to-kubelet
+subjects:
+  - apiGroup: rbac.authorization.k8s.io
+    kind: User
+    name: kube-apiserver
+EOF
+
+kubectl create -f $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
+
+#### 6、引导k8s节点启动
+##### 1、配置kubelet
+###### 1、kubelet.service
+#所有节点,配置kubelet服务
+echo 引导k8s节点启动
+echo 配置 /usr/lib/systemd/system/kubelet.service
+cat << EOF > /usr/lib/systemd/system/kubelet.service
+[Unit]
+Description=Kubernetes Kubelet
+Documentation=https://github.com/kubernetes/kubernetes
+After=containerd.service
+Requires=containerd.service
+
+[Service]
+ExecStart=/usr/local/bin/kubelet
+
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+echo 配置 /etc/systemd/system/kubelet.service.d/10-kubelet.conf
+cat << EOF > /etc/systemd/system/kubelet.service.d/10-kubelet.conf
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf --kubeconfig=$(dirname ${k8s_pki_path})/kubelet.conf"
+Environment="KUBELET_SYSTEM_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
+Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
+ExecStart=
+ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
+EOF
+
+#/etc/kubernetes/kubelet.conf 会自动生成
+
+###### 2、kubelet-conf.yml
+#clusterDNS 为service网络的第10个ip值,改成自己的。如:10.96.0.10
+
+echo 生成 $(dirname ${k8s_pki_path})/kubelet-conf.yml
+cat << EOF > $(dirname ${k8s_pki_path})/kubelet-conf.yml
+apiVersion: kubelet.config.k8s.io/v1beta1
+kind: KubeletConfiguration
+address: 0.0.0.0
+port: 10250
+readOnlyPort: 10255
+authentication:
+  anonymous:
+    enabled: false
+  webhook:
+    cacheTTL: 2m0s
+    enabled: true
+  x509:
+    clientCAFile: ${k8s_pki_path}/ca.pem
+authorization:
+  mode: Webhook
+  webhook:
+    cacheAuthorizedTTL: 5m0s
+    cacheUnauthorizedTTL: 30s
+cgroupDriver: systemd
+cgroupsPerQOS: true
+clusterDNS:
+- ${svc_dns}
+clusterDomain: cluster.local
+containerLogMaxFiles: 5
+containerLogMaxSize: 10Mi
+contentType: application/vnd.kubernetes.protobuf
+cpuCFSQuota: true
+cpuManagerPolicy: none
+cpuManagerReconcilePeriod: 10s
+enableControllerAttachDetach: true
+enableDebuggingHandlers: true
+enforceNodeAllocatable:
+- pods
+eventBurst: 10
+eventRecordQPS: 5
+evictionHard:
+  imagefs.available: 15%
+  memory.available: 100Mi
+  nodefs.available: 10%
+  nodefs.inodesFree: 5%
+evictionPressureTransitionPeriod: 5m0s  #缩小相应的配置
+failSwapOn: true
+fileCheckFrequency: 20s
+hairpinMode: promiscuous-bridge
+healthzBindAddress: 127.0.0.1
+healthzPort: 10248
+httpCheckFrequency: 20s
+imageGCHighThresholdPercent: 85
+imageGCLowThresholdPercent: 80
+imageMinimumGCAge: 2m0s
+iptablesDropBit: 15
+iptablesMasqueradeBit: 14
+kubeAPIBurst: 10
+kubeAPIQPS: 5
+makeIPTablesUtilChains: true
+maxOpenFiles: 1000000
+maxPods: 110
+nodeStatusUpdateFrequency: 10s
+oomScoreAdj: -999
+podPidsLimit: -1
+registryBurst: 10
+registryPullQPS: 5
+resolvConf: /etc/resolv.conf
+rotateCertificates: true
+runtimeRequestTimeout: 2m0s
+serializeImagePulls: true
+staticPodPath: $(dirname ${k8s_pki_path})/manifests
+streamingConnectionIdleTimeout: 4h0m0s
+syncFrequency: 1m0s
+volumeStatsAggPeriod: 1m0s
+EOF
+
+
+###### 3、启动 kubelet
+# 启动 kubelet
+echo 启动 kubelet
+systemctl daemon-reload
+systemctl enable --now kubelet
+#systemctl status kubelet
+
+echo 检查集群node, Ready 或 NotReady 都算正常的
+kubectl get node
+# 返回 Ready 或 NotReady 目前到这里都是正常的,只是还不能正常使用,需安装 cni,比如 calico
+
+##### 2、配置kube-proxy
+###### 1、生成 kube-proxy.conf
+#创建kube-proxy的sa
+echo 配置 kube-proxy 权限
+kubectl -n kube-system create serviceaccount kube-proxy
+
+#创建角色绑定
+kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
+
+echo "================================================================================"
+
+K8S_DIR=$(dirname ${k8s_pki_path})
+get_secret_token() {
+  sleep 1s
+  #导出变量,方便后面使用
+  SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
+  JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
+}
+get_secret_token
+
+reget_count=0
+while [ -z $JWT_TOKEN ]
+do
+  if [ $reget_count -ge 5 ]
+  then
+    echo "reget_count $reget_count -ge 5, please check config"
+    exit 1
+  fi
+  sleep 1s
+  get_secret_token
+  ((reget_count+=1))
+  echo "reget_count $reget_count..."
+done
+
+echo "================================================================================"
+echo $SECRET
+echo $JWT_TOKEN
+echo "================================================================================"
+
+# 生成kube-proxy配置
+# --server: 指定自己的apiserver地址或者lb地址
+echo 生成 ${K8S_DIR}/kube-proxy.conf
+kubectl config set-cluster kubernetes \
+	--certificate-authority=${k8s_pki_path}/ca.pem \
+	--embed-certs=true \
+	--server=https://${export_addr}:6443 \
+	--kubeconfig=${K8S_DIR}/kube-proxy.conf
+
+# kube-proxy秘钥设置
+kubectl config set-credentials kubernetes \
+	--token=${JWT_TOKEN} \
+	--kubeconfig=${K8S_DIR}/kube-proxy.conf
+
+kubectl config set-context kubernetes \
+	--cluster=kubernetes \
+	--user=kubernetes \
+	--kubeconfig=${K8S_DIR}/kube-proxy.conf
+
+kubectl config use-context kubernetes \
+	--kubeconfig=${K8S_DIR}/kube-proxy.conf
+
+###### 2、配置 kube-proxy.service
+echo 配置 /usr/lib/systemd/system/kube-proxy.service
+cat << EOF > /usr/lib/systemd/system/kube-proxy.service
+[Unit]
+Description=Kubernetes Kube Proxy
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-proxy \\
+  --config=$(dirname ${k8s_pki_path})/kube-proxy.yaml \\
+  --v=2
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+###### 3、准备 kube-proxy.yaml
+#注意修改自己的Pod网段范围
+echo 准备 $(dirname ${k8s_pki_path})/kube-proxy.yaml
+cat <<EOF > $(dirname ${k8s_pki_path})/kube-proxy.yaml
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+bindAddress: 0.0.0.0
+clientConnection:
+  acceptContentTypes: ""
+  burst: 10
+  contentType: application/vnd.kubernetes.protobuf
+  kubeconfig: $(dirname ${k8s_pki_path})/kube-proxy.conf   #kube-proxy引导文件
+  qps: 5
+clusterCIDR: ${pod_cidr}  #修改为自己的Pod-CIDR
+configSyncPeriod: 15m0s
+conntrack:
+  max: null
+  maxPerCore: 32768
+  min: 131072
+  tcpCloseWaitTimeout: 1h0m0s
+  tcpEstablishedTimeout: 24h0m0s
+enableProfiling: false
+healthzBindAddress: 0.0.0.0:10256
+hostnameOverride: ""
+iptables:
+  masqueradeAll: false
+  masqueradeBit: 14
+  minSyncPeriod: 0s
+  syncPeriod: 30s
+ipvs:
+  masqueradeAll: true
+  minSyncPeriod: 5s
+  scheduler: "rr"
+  syncPeriod: 30s
+kind: KubeProxyConfiguration
+metricsBindAddress: 127.0.0.1:10249
+mode: "ipvs"
+nodePortAddresses: null
+oomScoreAdj: -999
+portRange: ""
+udpIdleTimeout: 250ms
+EOF
+
+
+#启动 kube-proxy
+echo 启动 kube-proxy
+systemctl daemon-reload
+systemctl enable --now kube-proxy
+#systemctl status kube-proxy
+
+echo "================================================================================"
+sleep 1s
+
+cd $origin_dir
+#### 7、部署calico
+
+echo 部署calico
+#kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
+#kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
+
+mkdir -p k8s-components
+cd k8s-components
+calico_version=v3.25
+if [ ! -f calico-$calico_version.yaml ]; then 
+curl https://docs.tigera.io/archive/$calico_version/manifests/calico.yaml  -o calico-$calico_version.yaml
+fi
+cp calico-$calico_version.yaml calico.yaml
+# CIDR=$(grep -- "--cluster-cidr=" /usr/lib/systemd/system/kube-controller-manager.service | awk '{print $1}' | awk -F= '{print $2}')
+# echo $CIDR
+CIDR=$pod_cidr
+sed -i "s|# - name: CALICO_IPV4POOL_CIDR|- name: CALICO_IPV4POOL_CIDR|"  calico.yaml
+sed -i "s|#   value: \"192.168.0.0/16\"|  value: \"$CIDR\"|"             calico.yaml
+kubectl apply -f calico.yaml
+
+echo "================================================================================"
+sleep 1s
+
+#### 8、部署coreDNS
+echo 部署coreDNS
+coredns=coredns-deployment
+if [ ! -d $coredns  ]; then
+  git clone https://github.com/coredns/deployment.git $coredns
+fi
+cd $coredns/kubernetes
+
+#改为 service 网段的 第 10 个ip, 例如10.96.0.10
+#./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
+# 拼装 service 网段的 第 10 个ip
+# 或直接用 $svc_dns
+svc_ip=$(kubectl get svc | grep kubernetes | awk '{print $3}')
+svc_dns_ip=${svc_ip}0
+cmd="./deploy.sh -s -i ${svc_dns_ip} | kubectl apply -f -"
+echo $cmd
+eval $cmd
+

+ 8 - 0
k8s-extensions/1.vps-k8s-extension-metrics-server.sh

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# 备份,不使用,直接用已下载改好的 metrics-server yaml 文件
+version=v0.6.2
+save_file=metrics-server-extension-$version.yaml
+wget https://github.com/kubernetes-sigs/metrics-server/releases/download/$version/components.yaml -O $save_file
+sed -i '\|- --cert-dir=/tmp|i\        - --kubelet-insecure-tls' $save_file
+kubectl apply -f $save_file

+ 29 - 0
k8s-extensions/dashboard-ingress.yaml

@@ -0,0 +1,29 @@
+# https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: dashboard
+  namespace: kubernetes-dashboard
+  annotations:
+    kubernetes.io/ingress.class: "nginx"    
+    nginx.ingress.kubernetes.io/use-regex: "true" # 开启use-regex,启用path的正则匹配
+    nginx.ingress.kubernetes.io/rewrite-target: / # 路径重写
+    nginx.ingress.kubernetes.io/ssl-redirect: "true" # 默认为 true,启用 TLS 时,http请求会 308 重定向到https    
+    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" # 默认为 http,开启后端服务使用 proxy_pass https://协议
+spec:
+  tls:
+  - hosts:
+    - dashboard.ushub.cn ## 泛域名可以写数组里使用同一个 secretName
+    secretName: ushub.cn
+  rules:
+  - host: dashboard.ushub.cn
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: kubernetes-dashboard
+            port:
+              number: 443

+ 648 - 0
k8s-extensions/ingress-nginx-v1.6.4.yaml

@@ -0,0 +1,648 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+  name: ingress-nginx
+---
+apiVersion: v1
+automountServiceAccountToken: true
+kind: ServiceAccount
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx
+  namespace: ingress-nginx
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+  namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx
+  namespace: ingress-nginx
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - namespaces
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - pods
+  - secrets
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/status
+  verbs:
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingressclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - coordination.k8s.io
+  resourceNames:
+  - ingress-nginx-leader
+  resources:
+  - leases
+  verbs:
+  - get
+  - update
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - create
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - list
+  - watch
+  - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+  namespace: ingress-nginx
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - get
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - endpoints
+  - nodes
+  - pods
+  - secrets
+  - namespaces
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/status
+  verbs:
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingressclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - list
+  - watch
+  - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+rules:
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - validatingwebhookconfigurations
+  verbs:
+  - get
+  - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx
+  namespace: ingress-nginx
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+  name: ingress-nginx
+  namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+  namespace: ingress-nginx
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+  name: ingress-nginx-admission
+  namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: ingress-nginx
+subjects:
+- kind: ServiceAccount
+  name: ingress-nginx
+  namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: ingress-nginx-admission
+subjects:
+- kind: ServiceAccount
+  name: ingress-nginx-admission
+  namespace: ingress-nginx
+---
+apiVersion: v1
+data:
+  allow-snippet-annotations: "true"
+kind: ConfigMap
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-controller
+  namespace: ingress-nginx
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-controller
+  namespace: ingress-nginx
+spec:
+  ipFamilies:
+  - IPv4
+  ipFamilyPolicy: SingleStack
+  ports:
+  - appProtocol: http
+    name: http
+    port: 80
+    protocol: TCP
+    targetPort: http
+  - appProtocol: https
+    name: https
+    port: 443
+    protocol: TCP
+    targetPort: https
+  selector:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+  type: NodePort
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-controller-admission
+  namespace: ingress-nginx
+spec:
+  ports:
+  - appProtocol: https
+    name: https-webhook
+    port: 443
+    targetPort: webhook
+  selector:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+  type: ClusterIP
+---
+apiVersion: apps/v1
+#kind: Deployment
+kind: DaemonSet
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-controller
+  namespace: ingress-nginx
+spec:
+  minReadySeconds: 0
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      app.kubernetes.io/component: controller
+      app.kubernetes.io/instance: ingress-nginx
+      app.kubernetes.io/name: ingress-nginx
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/component: controller
+        app.kubernetes.io/instance: ingress-nginx
+        app.kubernetes.io/name: ingress-nginx
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet   ## dns悼开集靠
+      hostNetwork: true  ## 拷慨ginx靠本豢靠靠43犊楷靠靠炕靠
+      containers:
+      - args:
+        - /nginx-ingress-controller
+        - --election-id=ingress-nginx-leader
+        - --controller-class=k8s.io/ingress-nginx
+        - --ingress-class=nginx
+        - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
+        - --validating-webhook=:8443
+        - --validating-webhook-certificate=/usr/local/certificates/cert
+        - --validating-webhook-key=/usr/local/certificates/key
+        env:
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: LD_PRELOAD
+          value: /usr/local/lib/libmimalloc.so
+        image: registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f
+        imagePullPolicy: IfNotPresent
+        lifecycle:
+          preStop:
+            exec:
+              command:
+              - /wait-shutdown
+        livenessProbe:
+          failureThreshold: 5
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 1
+        name: controller
+        ports:
+        - containerPort: 80
+          name: http
+          protocol: TCP
+        - containerPort: 443
+          name: https
+          protocol: TCP
+        - containerPort: 8443
+          name: webhook
+          protocol: TCP
+        readinessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /healthz
+            port: 10254
+            scheme: HTTP
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          successThreshold: 1
+          timeoutSeconds: 1
+        resources:
+          requests:
+            cpu: 100m
+            memory: 90Mi
+          limits:
+            cpu: 500m
+            memory: 500Mi
+        securityContext:
+          allowPrivilegeEscalation: true
+          capabilities:
+            add:
+            - NET_BIND_SERVICE
+            drop:
+            - ALL
+          runAsUser: 101
+        volumeMounts:
+        - mountPath: /usr/local/certificates/
+          name: webhook-cert
+          readOnly: true
+      dnsPolicy: ClusterFirst
+      nodeSelector:
+        kubernetes.io/os: linux
+        node-role: ingress
+      serviceAccountName: ingress-nginx
+      terminationGracePeriodSeconds: 300
+      volumes:
+      - name: webhook-cert
+        secret:
+          secretName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission-create
+  namespace: ingress-nginx
+spec:
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/component: admission-webhook
+        app.kubernetes.io/instance: ingress-nginx
+        app.kubernetes.io/name: ingress-nginx
+        app.kubernetes.io/part-of: ingress-nginx
+        app.kubernetes.io/version: 1.6.4
+      name: ingress-nginx-admission-create
+    spec:
+      containers:
+      - args:
+        - create
+        - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
+        - --namespace=$(POD_NAMESPACE)
+        - --secret-name=ingress-nginx-admission
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
+        imagePullPolicy: IfNotPresent
+        name: create
+        securityContext:
+          allowPrivilegeEscalation: false
+      nodeSelector:
+        kubernetes.io/os: linux
+      restartPolicy: OnFailure
+      securityContext:
+        fsGroup: 2000
+        runAsNonRoot: true
+        runAsUser: 2000
+      serviceAccountName: ingress-nginx-admission
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission-patch
+  namespace: ingress-nginx
+spec:
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/component: admission-webhook
+        app.kubernetes.io/instance: ingress-nginx
+        app.kubernetes.io/name: ingress-nginx
+        app.kubernetes.io/part-of: ingress-nginx
+        app.kubernetes.io/version: 1.6.4
+      name: ingress-nginx-admission-patch
+    spec:
+      containers:
+      - args:
+        - patch
+        - --webhook-name=ingress-nginx-admission
+        - --namespace=$(POD_NAMESPACE)
+        - --patch-mutating=false
+        - --secret-name=ingress-nginx-admission
+        - --patch-failure-policy=Fail
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
+        imagePullPolicy: IfNotPresent
+        name: patch
+        securityContext:
+          allowPrivilegeEscalation: false
+      nodeSelector:
+        kubernetes.io/os: linux
+      restartPolicy: OnFailure
+      securityContext:
+        fsGroup: 2000
+        runAsNonRoot: true
+        runAsUser: 2000
+      serviceAccountName: ingress-nginx-admission
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  labels:
+    app.kubernetes.io/component: controller
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: nginx
+spec:
+  controller: k8s.io/ingress-nginx
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+  labels:
+    app.kubernetes.io/component: admission-webhook
+    app.kubernetes.io/instance: ingress-nginx
+    app.kubernetes.io/name: ingress-nginx
+    app.kubernetes.io/part-of: ingress-nginx
+    app.kubernetes.io/version: 1.6.4
+  name: ingress-nginx-admission
+webhooks:
+- admissionReviewVersions:
+  - v1
+  clientConfig:
+    service:
+      name: ingress-nginx-controller-admission
+      namespace: ingress-nginx
+      path: /networking/v1/ingresses
+  failurePolicy: Fail
+  matchPolicy: Equivalent
+  name: validate.nginx.ingress.kubernetes.io
+  rules:
+  - apiGroups:
+    - networking.k8s.io
+    apiVersions:
+    - v1
+    operations:
+    - CREATE
+    - UPDATE
+    resources:
+    - ingresses
+  sideEffects: None

+ 18 - 0
k8s-extensions/kubernetes-dashboard-v2.7.0-admin.yaml

@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: admin-user
+  namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: admin-user
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cluster-admin
+subjects:
+- kind: ServiceAccount
+  name: admin-user
+  namespace: kubernetes-dashboard

+ 309 - 0
k8s-extensions/kubernetes-dashboard-v2.7.0.yaml

@@ -0,0 +1,309 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: kubernetes-dashboard
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kubernetes-dashboard
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kubernetes-dashboard
+spec:
+  ports:
+    - port: 443
+      targetPort: 8443
+      nodePort: 32333
+  type: NodePort
+  selector:
+    k8s-app: kubernetes-dashboard
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard-certs
+  namespace: kubernetes-dashboard
+type: Opaque
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard-csrf
+  namespace: kubernetes-dashboard
+type: Opaque
+data:
+  csrf: ""
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard-key-holder
+  namespace: kubernetes-dashboard
+type: Opaque
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard-settings
+  namespace: kubernetes-dashboard
+
+---
+
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kubernetes-dashboard
+rules:
+  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+  - apiGroups: [""]
+    resources: ["secrets"]
+    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+    verbs: ["get", "update", "delete"]
+    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    resourceNames: ["kubernetes-dashboard-settings"]
+    verbs: ["get", "update"]
+    # Allow Dashboard to get metrics.
+  - apiGroups: [""]
+    resources: ["services"]
+    resourceNames: ["heapster", "dashboard-metrics-scraper"]
+    verbs: ["proxy"]
+  - apiGroups: [""]
+    resources: ["services/proxy"]
+    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+    verbs: ["get"]
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+rules:
+  # Allow Metrics Scraper to get metrics from the Metrics server
+  - apiGroups: ["metrics.k8s.io"]
+    resources: ["pods", "nodes"]
+    verbs: ["get", "list", "watch"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kubernetes-dashboard
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: kubernetes-dashboard
+subjects:
+  - kind: ServiceAccount
+    name: kubernetes-dashboard
+    namespace: kubernetes-dashboard
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: kubernetes-dashboard
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: kubernetes-dashboard
+subjects:
+  - kind: ServiceAccount
+    name: kubernetes-dashboard
+    namespace: kubernetes-dashboard
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  labels:
+    k8s-app: kubernetes-dashboard
+  name: kubernetes-dashboard
+  namespace: kubernetes-dashboard
+spec:
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: kubernetes-dashboard
+  template:
+    metadata:
+      labels:
+        k8s-app: kubernetes-dashboard
+    spec:
+      securityContext:
+        seccompProfile:
+          type: RuntimeDefault
+      containers:
+        - name: kubernetes-dashboard
+          image: kubernetesui/dashboard:v2.7.0
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 8443
+              protocol: TCP
+          args:
+            - --auto-generate-certificates
+            - --namespace=kubernetes-dashboard
+            # Uncomment the following line to manually specify Kubernetes API server Host
+            # If not specified, Dashboard will attempt to auto discover the API server and connect
+            # to it. Uncomment only if the default does not work.
+            # - --apiserver-host=http://my-address:port
+          volumeMounts:
+            - name: kubernetes-dashboard-certs
+              mountPath: /certs
+              # Create on-disk volume to store exec logs
+            - mountPath: /tmp
+              name: tmp-volume
+          livenessProbe:
+            httpGet:
+              scheme: HTTPS
+              path: /
+              port: 8443
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          securityContext:
+            allowPrivilegeEscalation: false
+            readOnlyRootFilesystem: true
+            runAsUser: 1001
+            runAsGroup: 2001
+      volumes:
+        - name: kubernetes-dashboard-certs
+          secret:
+            secretName: kubernetes-dashboard-certs
+        - name: tmp-volume
+          emptyDir: {}
+      serviceAccountName: kubernetes-dashboard
+      nodeSelector:
+        "kubernetes.io/os": linux
+      # Comment the following tolerations if Dashboard must not be deployed on master
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+  labels:
+    k8s-app: dashboard-metrics-scraper
+  name: dashboard-metrics-scraper
+  namespace: kubernetes-dashboard
+spec:
+  ports:
+    - port: 8000
+      targetPort: 8000
+  selector:
+    k8s-app: dashboard-metrics-scraper
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  labels:
+    k8s-app: dashboard-metrics-scraper
+  name: dashboard-metrics-scraper
+  namespace: kubernetes-dashboard
+spec:
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      k8s-app: dashboard-metrics-scraper
+  template:
+    metadata:
+      labels:
+        k8s-app: dashboard-metrics-scraper
+    spec:
+      securityContext:
+        seccompProfile:
+          type: RuntimeDefault
+      containers:
+        - name: dashboard-metrics-scraper
+          image: kubernetesui/metrics-scraper:v1.0.8
+          ports:
+            - containerPort: 8000
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              scheme: HTTP
+              path: /
+              port: 8000
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          volumeMounts:
+          - mountPath: /tmp
+            name: tmp-volume
+          securityContext:
+            allowPrivilegeEscalation: false
+            readOnlyRootFilesystem: true
+            runAsUser: 1001
+            runAsGroup: 2001
+      serviceAccountName: kubernetes-dashboard
+      nodeSelector:
+        "kubernetes.io/os": linux
+      # Comment the following tolerations if Dashboard must not be deployed on master
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      volumes:
+        - name: tmp-volume
+          emptyDir: {}
+

+ 197 - 0
k8s-extensions/metrics-server-v0.6.2.yaml

@@ -0,0 +1,197 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: metrics-server
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    k8s-app: metrics-server
+    rbac.authorization.k8s.io/aggregate-to-admin: "true"
+    rbac.authorization.k8s.io/aggregate-to-edit: "true"
+    rbac.authorization.k8s.io/aggregate-to-view: "true"
+  name: system:aggregated-metrics-reader
+rules:
+- apiGroups:
+  - metrics.k8s.io
+  resources:
+  - pods
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: system:metrics-server
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - nodes/metrics
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: metrics-server-auth-reader
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: metrics-server:system:auth-delegator
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: system:metrics-server
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+  name: metrics-server
+  namespace: kube-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: metrics-server
+  namespace: kube-system
+spec:
+  ports:
+  - name: https
+    port: 443
+    protocol: TCP
+    targetPort: https
+  selector:
+    k8s-app: metrics-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: metrics-server
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      k8s-app: metrics-server
+  strategy:
+    rollingUpdate:
+      maxUnavailable: 0
+  template:
+    metadata:
+      labels:
+        k8s-app: metrics-server
+    spec:
+      containers:
+      - args:
+        - --kubelet-insecure-tls
+        - --cert-dir=/tmp
+        - --secure-port=4443
+        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+        - --kubelet-use-node-status-port
+        - --metric-resolution=15s
+        image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2
+        imagePullPolicy: IfNotPresent
+        livenessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /livez
+            port: https
+            scheme: HTTPS
+          periodSeconds: 10
+        name: metrics-server
+        ports:
+        - containerPort: 4443
+          name: https
+          protocol: TCP
+        readinessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /readyz
+            port: https
+            scheme: HTTPS
+          initialDelaySeconds: 20
+          periodSeconds: 10
+        resources:
+          requests:
+            cpu: 100m
+            memory: 200Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          readOnlyRootFilesystem: true
+          runAsNonRoot: true
+          runAsUser: 1000
+        volumeMounts:
+        - mountPath: /tmp
+          name: tmp-dir
+      nodeSelector:
+        kubernetes.io/os: linux
+      priorityClassName: system-cluster-critical
+      serviceAccountName: metrics-server
+      volumes:
+      - emptyDir: {}
+        name: tmp-dir
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+  labels:
+    k8s-app: metrics-server
+  name: v1beta1.metrics.k8s.io
+spec:
+  group: metrics.k8s.io
+  groupPriorityMinimum: 100
+  insecureSkipTLSVerify: true
+  service:
+    name: metrics-server
+    namespace: kube-system
+  version: v1beta1
+  versionPriority: 100