|
@@ -0,0 +1,1155 @@
|
|
|
|
+#!/bin/sh
|
|
|
|
+
|
|
|
|
+set -e
|
|
|
|
+
|
|
|
|
+pod_cidr="196.16.0.0/16"
|
|
|
|
+svc_cidr="10.96.0.0/16"
|
|
|
|
+svc_frst=${svc_cidr%.*}.1
|
|
|
|
+svc_dns=${svc_cidr%.*}.10
|
|
|
|
+
|
|
|
|
+origin_dir=$(pwd)
|
|
|
|
+
|
|
|
|
+### Setting environment
|
|
|
|
+export_addr=$(curl ipv4.icanhazip.com)
|
|
|
|
+export_port=6443
|
|
|
|
+
|
|
|
|
+# CA organization
|
|
|
|
+# expiry 87600h 过期时间10年,可根据自己需求来配置,比如 438000h 50年,876000h 100年
|
|
|
|
+expiry=876000h
|
|
|
|
+ca_config_path=/etc/ca/ca-config.json
|
|
|
|
+
|
|
|
|
+# etcd path
|
|
|
|
+etcd_pki_path=/etc/etcd/pki
|
|
|
|
+etcd_ips_list="$export_addr $(hostname)"
|
|
|
|
+
|
|
|
|
+# kubernetes
|
|
|
|
+k8s_pki_path=/etc/kubernetes/pki
|
|
|
|
+k8s_ips_list="$export_addr $(hostname)"
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+markS="##### ca-etcd-k8s envionment #####"
|
|
|
|
+markE="##################################"
|
|
|
|
+
|
|
|
|
+write_env_profile() {
|
|
|
|
+sh <<EOF >> /etc/profile
|
|
|
|
+echo "$markS"
|
|
|
|
+echo export expiry=$expiry
|
|
|
|
+echo export ca_config_path=$ca_config_path
|
|
|
|
+echo export etcd_pki_path=$etcd_pki_path
|
|
|
|
+echo export etcd_ips_list=\"${etcd_ips_list}\"
|
|
|
|
+echo export k8s_pki_path=$k8s_pki_path
|
|
|
|
+echo export k8s_ips_list=\"${k8s_ips_list}\"
|
|
|
|
+echo export export_addr=$export_addr
|
|
|
|
+echo export export_port=$export_port
|
|
|
|
+echo "$markE"
|
|
|
|
+EOF
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+if grep "$markS" /etc/profile > /dev/null
|
|
|
|
+then
|
|
|
|
+ echo already write profile env
|
|
|
|
+ echo "clear the environment and run again"
|
|
|
|
+ exit
|
|
|
|
+else
|
|
|
|
+ write_env_profile
|
|
|
|
+ echo write profile env success
|
|
|
|
+ printf "\n$export_addr $(hostname)\n" >> /etc/hosts
|
|
|
|
+fi
|
|
|
|
+
|
|
|
|
+### CA organization
|
|
|
|
+#ca-config.json
|
|
|
|
+echo 创建机构配置 $ca_config_path
|
|
|
|
+mkdir -p $(dirname $ca_config_path)
|
|
|
|
+cat << EOF > $ca_config_path
|
|
|
|
+{
|
|
|
|
+ "signing": {
|
|
|
|
+ "default": {
|
|
|
|
+ "expiry": "$expiry"
|
|
|
|
+ },
|
|
|
|
+ "profiles": {
|
|
|
|
+ "server": {
|
|
|
|
+ "expiry": "$expiry",
|
|
|
|
+ "usages": [
|
|
|
|
+ "signing",
|
|
|
|
+ "key encipherment",
|
|
|
|
+ "server auth"
|
|
|
|
+ ]
|
|
|
|
+ },
|
|
|
|
+ "client": {
|
|
|
|
+ "expiry": "$expiry",
|
|
|
|
+ "usages": [
|
|
|
|
+ "signing",
|
|
|
|
+ "key encipherment",
|
|
|
|
+ "client auth"
|
|
|
|
+ ]
|
|
|
|
+ },
|
|
|
|
+ "peer": {
|
|
|
|
+ "expiry": "$expiry",
|
|
|
|
+ "usages": [
|
|
|
|
+ "signing",
|
|
|
|
+ "key encipherment",
|
|
|
|
+ "server auth",
|
|
|
|
+ "client auth"
|
|
|
|
+ ]
|
|
|
|
+ },
|
|
|
|
+ "kubernetes": {
|
|
|
|
+ "expiry": "$expiry",
|
|
|
|
+ "usages": [
|
|
|
|
+ "signing",
|
|
|
|
+ "key encipherment",
|
|
|
|
+ "server auth",
|
|
|
|
+ "client auth"
|
|
|
|
+ ]
|
|
|
|
+ },
|
|
|
|
+ "etcd": {
|
|
|
|
+ "expiry": "$expiry",
|
|
|
|
+ "usages": [
|
|
|
|
+ "signing",
|
|
|
|
+ "key encipherment",
|
|
|
|
+ "server auth",
|
|
|
|
+ "client auth"
|
|
|
|
+ ]
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+## 安装etcd
|
|
|
|
+#创建etcd证书目录和证书
|
|
|
|
+### 1、生成etcd根ca证书
|
|
|
|
+
|
|
|
|
+echo 生成 etcd 根 ca 证书
|
|
|
|
+mkdir -p $etcd_pki_path
|
|
|
|
+cd $etcd_pki_path
|
|
|
|
+
|
|
|
|
+# etcd-ca-csr 签名请求
|
|
|
|
+cat <<EOF > etcd-ca-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "etcd",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "etcd",
|
|
|
|
+ "OU": "etcd"
|
|
|
|
+ }
|
|
|
|
+ ],
|
|
|
|
+ "ca": {
|
|
|
|
+ "expiry": "$expiry"
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+# 生成etcd CA根证书和key
|
|
|
|
+cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
|
|
|
|
+
|
|
|
|
+#为 etcd 颁证书
|
|
|
|
+echo etcd 颁证书
|
|
|
|
+
|
|
|
|
+mkdir -p $etcd_pki_path
|
|
|
|
+cd $etcd_pki_path
|
|
|
|
+
|
|
|
|
+# 创建etcd证书签名 etcd-csr.json
|
|
|
|
+cat <<EOF > etcd-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "etcd",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "hosts": [
|
|
|
|
+ "127.0.0.1"
|
|
|
|
+ ],
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "etcd",
|
|
|
|
+ "OU": "System"
|
|
|
|
+ }
|
|
|
|
+ ]
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+# hosts 加上 etcd 节点域名、IP
|
|
|
|
+for addr in ${etcd_ips_list[@]}
|
|
|
|
+do
|
|
|
|
+ echo "===>>> etcd hosts add "$addr
|
|
|
|
+ sed -i "\#\"127.0.0.1\"#i\ \"$addr\"," etcd-csr.json
|
|
|
|
+done
|
|
|
|
+
|
|
|
|
+# 生成etcd证书
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=etcd-ca.pem \
|
|
|
|
+ -ca-key=etcd-ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=etcd \
|
|
|
|
+ etcd-csr.json | cfssljson -bare etcd
|
|
|
|
+
|
|
|
|
+### 配置 etcd.yaml 配置
|
|
|
|
+
|
|
|
|
+mkdir -p /var/lib/etcd
|
|
|
|
+etcd_config_file=$(dirname ${etcd_pki_path})/etcd.yaml
|
|
|
|
+echo 配置 $etcd_config_file
|
|
|
|
+cat <<EOF > $etcd_config_file
|
|
|
|
+name: '{{host}}' #每个机器可以写自己的域名,不能重复
|
|
|
|
+data-dir: /var/lib/etcd
|
|
|
|
+wal-dir: /var/lib/etcd/wal
|
|
|
|
+snapshot-count: 5000
|
|
|
|
+heartbeat-interval: 100
|
|
|
|
+election-timeout: 1000
|
|
|
|
+quota-backend-bytes: 0
|
|
|
|
+listen-peer-urls: 'https://{{ipls}}:2380' #本机ip+2380端口,代表和集群通信22
|
|
|
|
+listen-client-urls: 'https://{{ipls}}:2379,http://127.0.0.1:2379' #自己的ip
|
|
|
|
+max-snapshots: 3
|
|
|
|
+max-wals: 5
|
|
|
|
+cors:
|
|
|
|
+initial-advertise-peer-urls: 'https://{{ipls}}:2380' #自己的ip
|
|
|
|
+advertise-client-urls: 'https://{{ipls}}:2379' #自己的ip
|
|
|
|
+discovery:
|
|
|
|
+discovery-fallback: 'proxy'
|
|
|
|
+discovery-proxy:
|
|
|
|
+discovery-srv:
|
|
|
|
+initial-cluster: '{{host0}}=https://{{ipls0}}:2380' #这里不一样
|
|
|
|
+initial-cluster-token: 'etcd-k8s-cluster'
|
|
|
|
+initial-cluster-state: 'new'
|
|
|
|
+strict-reconfig-check: false
|
|
|
|
+enable-v2: true
|
|
|
|
+enable-pprof: true
|
|
|
|
+proxy: 'off'
|
|
|
|
+proxy-failure-wait: 5000
|
|
|
|
+proxy-refresh-interval: 30000
|
|
|
|
+proxy-dial-timeout: 1000
|
|
|
|
+proxy-write-timeout: 5000
|
|
|
|
+proxy-read-timeout: 0
|
|
|
|
+client-transport-security:
|
|
|
|
+ cert-file: '${etcd_pki_path}/etcd.pem'
|
|
|
|
+ key-file: '${etcd_pki_path}/etcd-key.pem'
|
|
|
|
+ client-cert-auth: true
|
|
|
|
+ trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
|
|
|
|
+ auto-tls: true
|
|
|
|
+peer-transport-security:
|
|
|
|
+ cert-file: '${etcd_pki_path}/etcd.pem'
|
|
|
|
+ key-file: '${etcd_pki_path}/etcd-key.pem'
|
|
|
|
+ peer-client-cert-auth: true
|
|
|
|
+ trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
|
|
|
|
+ auto-tls: true
|
|
|
|
+debug: false
|
|
|
|
+log-package-levels:
|
|
|
|
+log-outputs: [default]
|
|
|
|
+force-new-cluster: false
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+# 处理 etcd.yaml 模板文件进行替换
|
|
|
|
+host=(
|
|
|
|
+$(hostname)
|
|
|
|
+)
|
|
|
|
+ipls=(
|
|
|
|
+$export_addr
|
|
|
|
+)
|
|
|
|
+
|
|
|
|
+for i in 0
|
|
|
|
+do
|
|
|
|
+ sed -i "s/{{host}}/${host[$i]}/g" $etcd_config_file
|
|
|
|
+ sed -i "s/{{host0}}/${host[0]}/g" $etcd_config_file
|
|
|
|
+
|
|
|
|
+ sed -i "s/{{ipls}}/${ipls[$i]}/g" $etcd_config_file
|
|
|
|
+ sed -i "s/{{ipls0}}/${ipls[0]}/g" $etcd_config_file
|
|
|
|
+done
|
|
|
|
+
|
|
|
|
+### etcd.service 开机启动
|
|
|
|
+
|
|
|
|
+echo 配置 /usr/lib/systemd/system/etcd.service
|
|
|
|
+cat << EOF > /usr/lib/systemd/system/etcd.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Etcd Service
|
|
|
|
+Documentation=https://etcd.io/docs/v3.5/op-guide/clustering/
|
|
|
|
+After=network.target
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+Type=notify
|
|
|
|
+ExecStart=/usr/local/bin/etcd --config-file=$etcd_config_file
|
|
|
|
+Restart=on-failure
|
|
|
|
+RestartSec=10
|
|
|
|
+LimitNOFILE=65536
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+Alias=etcd3.service
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 启动etcd
|
|
|
|
+# 加载&开机启动
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now etcd
|
|
|
|
+#systemctl status etcd
|
|
|
|
+
|
|
|
|
+echo 测试etcd
|
|
|
|
+### 测试etcd访问
|
|
|
|
+etcdctl member list --write-out=table
|
|
|
|
+etcdctl endpoint status --write-out=table
|
|
|
|
+
|
|
|
|
+## 安装k8s
|
|
|
|
+### k8s证书
|
|
|
|
+#### k8s根ca证书
|
|
|
|
+
|
|
|
|
+echo 创建 k8s 相关证书
|
|
|
|
+mkdir -p ${k8s_pki_path}
|
|
|
|
+cd ${k8s_pki_path}
|
|
|
|
+
|
|
|
|
+cat << EOF > ca-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "kubernetes",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "Kubernetes",
|
|
|
|
+ "OU": "Kubernetes"
|
|
|
|
+ }
|
|
|
|
+ ],
|
|
|
|
+ "ca": {
|
|
|
|
+ "expiry": "$expiry"
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+## 生成 k8s CA 证书
|
|
|
|
+
|
|
|
|
+echo k8s CA 证书
|
|
|
|
+cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
|
|
|
+
|
|
|
|
+#### apiserver证书
|
|
|
|
+echo 生成apiserver证书
|
|
|
|
+cat <<EOF > apiserver-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "kube-apiserver",
|
|
|
|
+ "hosts": [
|
|
|
|
+ "127.0.0.1",
|
|
|
|
+ "${svc_frst}",
|
|
|
|
+ "kubernetes",
|
|
|
|
+ "kubernetes.default",
|
|
|
|
+ "kubernetes.default.svc",
|
|
|
|
+ "kubernetes.default.svc.cluster",
|
|
|
|
+ "kubernetes.default.svc.cluster.local"
|
|
|
|
+ ],
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "L": "BeiJing",
|
|
|
|
+ "ST": "BeiJing",
|
|
|
|
+ "O": "Kubernetes",
|
|
|
|
+ "OU": "Kubernetes"
|
|
|
|
+ }
|
|
|
|
+ ]
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+# hosts 加上 k8s 节点域名、IP
|
|
|
|
+for addr in ${k8s_ips_list[@]}
|
|
|
|
+do
|
|
|
|
+ echo "===>>> kube-apiserver hosts add "$addr
|
|
|
|
+ sed -i "\#\"127.0.0.1\"#i\ \"$addr\"," apiserver-csr.json
|
|
|
|
+done
|
|
|
|
+
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=ca.pem \
|
|
|
|
+ -ca-key=ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=kubernetes \
|
|
|
|
+ apiserver-csr.json | cfssljson -bare apiserver
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#### front-proxy证书
|
|
|
|
+
|
|
|
|
+##### 1、front-proxy根ca
|
|
|
|
+
|
|
|
|
+echo 生成front-proxy证书
|
|
|
|
+cat << EOF > front-proxy-ca-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "kubernetes",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+#front-proxy 根ca生成
|
|
|
|
+cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
|
|
|
|
+
|
|
|
|
+##### 2、front-proxy-client证书
|
|
|
|
+cat << EOF > front-proxy-client-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "front-proxy-client",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+#生成front-proxy-client 证书
|
|
|
|
+echo 生成front-proxy-client 证书
|
|
|
|
+#忽略警告
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=front-proxy-ca.pem \
|
|
|
|
+ -ca-key=front-proxy-ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=kubernetes \
|
|
|
|
+ front-proxy-client-csr.json | cfssljson -bare front-proxy-client
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#### controller-manage证书
|
|
|
|
+
|
|
|
|
+echo 生成 controller-manage证书
|
|
|
|
+#1、生成证书
|
|
|
|
+cat <<EOF > controller-manager-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "system:kube-controller-manager",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "system:kube-controller-manager",
|
|
|
|
+ "OU": "Kubernetes"
|
|
|
|
+ }
|
|
|
|
+ ]
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=ca.pem \
|
|
|
|
+ -ca-key=ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=kubernetes \
|
|
|
|
+ controller-manager-csr.json | cfssljson -bare controller-manager
|
|
|
|
+
|
|
|
|
+echo 配置 controller-manager.conf
|
|
|
|
+#2、生成配置
|
|
|
|
+# set-cluster:设置一个集群项
|
|
|
|
+kubectl config set-cluster kubernetes \
|
|
|
|
+ --certificate-authority=${k8s_pki_path}/ca.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --server=https://$export_addr:$export_port \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
|
|
|
|
+
|
|
|
|
+# 设置一个环境项,一个上下文
|
|
|
|
+kubectl config set-context system:kube-controller-manager@kubernetes \
|
|
|
|
+ --cluster=kubernetes \
|
|
|
|
+ --user=system:kube-controller-manager \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
|
|
|
|
+
|
|
|
|
+# set-credentials 设置一个用户项
|
|
|
|
+kubectl config set-credentials system:kube-controller-manager \
|
|
|
|
+ --client-certificate=${k8s_pki_path}/controller-manager.pem \
|
|
|
|
+ --client-key=${k8s_pki_path}/controller-manager-key.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
|
|
|
|
+
|
|
|
|
+# 使用某个环境当做默认环境
|
|
|
|
+kubectl config use-context system:kube-controller-manager@kubernetes \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
|
|
|
|
+# 后来也用来自动批复kubelet证书
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 生成scheduler证书
|
|
|
|
+#### scheduler证书
|
|
|
|
+#1、生成证书
|
|
|
|
+cat <<EOF > scheduler-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "system:kube-scheduler",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "system:kube-scheduler",
|
|
|
|
+ "OU": "Kubernetes"
|
|
|
|
+ }
|
|
|
|
+ ]
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=ca.pem \
|
|
|
|
+ -ca-key=ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=kubernetes \
|
|
|
|
+ scheduler-csr.json | cfssljson -bare scheduler
|
|
|
|
+
|
|
|
|
+echo 配置 scheduler.conf
|
|
|
|
+#2、生成配置
|
|
|
|
+#k8s集群安全操作相关
|
|
|
|
+kubectl config set-cluster kubernetes \
|
|
|
|
+ --certificate-authority=${k8s_pki_path}/ca.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --server=https://$export_addr:$export_port \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
|
|
|
|
+
|
|
|
|
+kubectl config set-credentials system:kube-scheduler \
|
|
|
|
+ --client-certificate=${k8s_pki_path}/scheduler.pem \
|
|
|
|
+ --client-key=${k8s_pki_path}/scheduler-key.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
|
|
|
|
+
|
|
|
|
+kubectl config set-context system:kube-scheduler@kubernetes \
|
|
|
|
+ --cluster=kubernetes \
|
|
|
|
+ --user=system:kube-scheduler \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
|
|
|
|
+
|
|
|
|
+kubectl config use-context system:kube-scheduler@kubernetes \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 生成 admin 证书
|
|
|
|
+#### admin证书
|
|
|
|
+#1、生成证书
|
|
|
|
+cat <<EOF > admin-csr.json
|
|
|
|
+{
|
|
|
|
+ "CN": "admin",
|
|
|
|
+ "key": {
|
|
|
|
+ "algo": "rsa",
|
|
|
|
+ "size": 2048
|
|
|
|
+ },
|
|
|
|
+ "names": [
|
|
|
|
+ {
|
|
|
|
+ "C": "CN",
|
|
|
|
+ "ST": "Beijing",
|
|
|
|
+ "L": "Beijing",
|
|
|
|
+ "O": "system:masters",
|
|
|
|
+ "OU": "Kubernetes"
|
|
|
|
+ }
|
|
|
|
+ ]
|
|
|
|
+}
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+cfssl gencert \
|
|
|
|
+ -ca=ca.pem \
|
|
|
|
+ -ca-key=ca-key.pem \
|
|
|
|
+ -config=${ca_config_path} \
|
|
|
|
+ -profile=kubernetes \
|
|
|
|
+ admin-csr.json | cfssljson -bare admin
|
|
|
|
+
|
|
|
|
+echo 配置 admin.conf
|
|
|
|
+#2、生成配置
|
|
|
|
+##k8s集群admin.conf授权操作相关 ===>>> ~/.kube/config
|
|
|
|
+kubectl config set-cluster kubernetes \
|
|
|
|
+ --certificate-authority=${k8s_pki_path}/ca.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --server=https://$export_addr:$export_port \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
|
|
|
|
+
|
|
|
|
+kubectl config set-credentials kubernetes-admin \
|
|
|
|
+ --client-certificate=${k8s_pki_path}/admin.pem \
|
|
|
|
+ --client-key=${k8s_pki_path}/admin-key.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
|
|
|
|
+
|
|
|
|
+kubectl config set-context kubernetes-admin@kubernetes \
|
|
|
|
+ --cluster=kubernetes \
|
|
|
|
+ --user=kubernetes-admin \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
|
|
|
|
+
|
|
|
|
+kubectl config use-context kubernetes-admin@kubernetes \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
|
|
|
|
+
|
|
|
|
+#### ServiceAccount Key生成
|
|
|
|
+openssl genrsa -out ${k8s_pki_path}/sa.key 2048
|
|
|
|
+openssl rsa -in ${k8s_pki_path}/sa.key -pubout -out ${k8s_pki_path}/sa.pub
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 准备k8s组件环境目录
|
|
|
|
+### k8s组件
|
|
|
|
+#### 1、目录准备
|
|
|
|
+mkdir -p $(dirname ${k8s_pki_path})/manifests /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
|
|
|
|
+
|
|
|
|
+echo 配置 /usr/lib/systemd/system/kube-apiserver.service
|
|
|
|
+#### 2、配置apiserver服务
|
|
|
|
+cat <<EOF > /usr/lib/systemd/system/kube-apiserver.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Kubernetes API Server
|
|
|
|
+Documentation=https://github.com/kubernetes/kubernetes
|
|
|
|
+After=network.target
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+ExecStart=/usr/local/bin/kube-apiserver \\
|
|
|
|
+ --v=2 \\
|
|
|
|
+ --allow-privileged=true \\
|
|
|
|
+ --bind-address=0.0.0.0 \\
|
|
|
|
+ --secure-port=${export_port} \\
|
|
|
|
+ --advertise-address=${export_addr} \\
|
|
|
|
+ --service-cluster-ip-range=${svc_cidr} \\
|
|
|
|
+ --service-node-port-range=30000-32767 \\
|
|
|
|
+ --etcd-servers=https://${export_addr}:2379 \\
|
|
|
|
+ --etcd-cafile=${etcd_pki_path}/etcd-ca.pem \\
|
|
|
|
+ --etcd-certfile=${etcd_pki_path}/etcd.pem \\
|
|
|
|
+ --etcd-keyfile=${etcd_pki_path}/etcd-key.pem \\
|
|
|
|
+ --client-ca-file=${k8s_pki_path}/ca.pem \\
|
|
|
|
+ --tls-cert-file=${k8s_pki_path}/apiserver.pem \\
|
|
|
|
+ --tls-private-key-file=${k8s_pki_path}/apiserver-key.pem \\
|
|
|
|
+ --kubelet-client-certificate=${k8s_pki_path}/apiserver.pem \\
|
|
|
|
+ --kubelet-client-key=${k8s_pki_path}/apiserver-key.pem \\
|
|
|
|
+ --service-account-key-file=${k8s_pki_path}/sa.pub \\
|
|
|
|
+ --service-account-signing-key-file=${k8s_pki_path}/sa.key \\
|
|
|
|
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
|
|
|
|
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
|
|
|
|
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
|
|
|
|
+ --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
|
|
|
|
+ --authorization-mode=Node,RBAC \\
|
|
|
|
+ --enable-bootstrap-token-auth=true \\
|
|
|
|
+ --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem \\
|
|
|
|
+ --proxy-client-cert-file=${k8s_pki_path}/front-proxy-client.pem \\
|
|
|
|
+ --proxy-client-key-file=${k8s_pki_path}/front-proxy-client-key.pem \\
|
|
|
|
+ --requestheader-allowed-names=aggregator,front-proxy-client \\
|
|
|
|
+ --requestheader-group-headers=X-Remote-Group \\
|
|
|
|
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \\
|
|
|
|
+ --requestheader-username-headers=X-Remote-User
|
|
|
|
+ # --token-auth-file=/etc/kubernetes/token.csv
|
|
|
|
+
|
|
|
|
+Restart=on-failure
|
|
|
|
+RestartSec=10s
|
|
|
|
+LimitNOFILE=65535
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+echo 启动 kube-apiserver
|
|
|
|
+#启动apiserver
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now kube-apiserver
|
|
|
|
+#systemctl status kube-apiserver
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 配置 /usr/lib/systemd/system/kube-controller-manager.service
|
|
|
|
+#### 3、配置controller-manager服务
|
|
|
|
+#196.16.0.0/16 是 pod 的网段,如果修改要注意不要和宿主机,docker等的网段冲突
|
|
|
|
+
|
|
|
|
+cat <<EOF > /usr/lib/systemd/system/kube-controller-manager.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Kubernetes Controller Manager
|
|
|
|
+Documentation=https://github.com/kubernetes/kubernetes
|
|
|
|
+After=network.target
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+ExecStart=/usr/local/bin/kube-controller-manager \\
|
|
|
|
+ --v=2 \\
|
|
|
|
+ --root-ca-file=${k8s_pki_path}/ca.pem \\
|
|
|
|
+ --cluster-signing-cert-file=${k8s_pki_path}/ca.pem \\
|
|
|
|
+ --cluster-signing-key-file=${k8s_pki_path}/ca-key.pem \\
|
|
|
|
+ --service-account-private-key-file=${k8s_pki_path}/sa.key \\
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf \\
|
|
|
|
+ --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
|
|
|
|
+ --leader-elect=true \\
|
|
|
|
+ --use-service-account-credentials=true \\
|
|
|
|
+ --node-monitor-grace-period=40s \\
|
|
|
|
+ --node-monitor-period=5s \\
|
|
|
|
+ --pod-eviction-timeout=2m0s \\
|
|
|
|
+ --controllers=*,bootstrapsigner,tokencleaner \\
|
|
|
|
+ --allocate-node-cidrs=true \\
|
|
|
|
+ --cluster-cidr=${pod_cidr} \\
|
|
|
|
+ --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem \\
|
|
|
|
+ --node-cidr-mask-size=24
|
|
|
|
+
|
|
|
|
+Restart=always
|
|
|
|
+RestartSec=10s
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+echo 启动 kube-controller-manager
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now kube-controller-manager
|
|
|
|
+#systemctl status kube-controller-manager
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo 配置 /usr/lib/systemd/system/kube-scheduler.service
|
|
|
|
+#### 4、配置scheduler
|
|
|
|
+cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Kubernetes Scheduler
|
|
|
|
+Documentation=https://github.com/kubernetes/kubernetes
|
|
|
|
+After=network.target
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+ExecStart=/usr/local/bin/kube-scheduler \\
|
|
|
|
+ --v=2 \\
|
|
|
|
+ --leader-elect=true \\
|
|
|
|
+ --authentication-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
|
|
|
|
+ --authorization-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
|
|
|
|
+
|
|
|
|
+Restart=always
|
|
|
|
+RestartSec=10s
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+echo 启动 kube-scheduler
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now kube-scheduler
|
|
|
|
+#systemctl status kube-scheduler
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+echo TLS与引导启动配置
|
|
|
|
+#### 5、TLS与引导启动原理
|
|
|
|
+#1、master配置bootstrap 说明,这部分不执行
|
|
|
|
+
|
|
|
|
+echo 准备 $(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
|
|
|
|
+#设置集群
|
|
|
|
+kubectl config set-cluster kubernetes \
|
|
|
|
+ --certificate-authority=${k8s_pki_path}/ca.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --server=https://${export_addr}:${export_port} \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
|
|
|
|
+
|
|
|
|
+#设置秘钥
|
|
|
|
+kubectl config set-credentials tls-bootstrap-token-user \
|
|
|
|
+ --token=a2e4f9.781b15d024bb7876 \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
|
|
|
|
+
|
|
|
|
+#设置上下文
|
|
|
|
+kubectl config set-context tls-bootstrap-token-user@kubernetes \
|
|
|
|
+ --cluster=kubernetes \
|
|
|
|
+ --user=tls-bootstrap-token-user \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
|
|
|
|
+
|
|
|
|
+#使用设置
|
|
|
|
+kubectl config use-context tls-bootstrap-token-user@kubernetes \
|
|
|
|
+ --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
|
|
|
|
+
|
|
|
|
+#2、设置kubectl执行权限
|
|
|
|
+#kubectl 能不能操作集群是看 /root/.kube 下有没有config文件,而config就是我们之前生成的admin.conf,具有操作权限的
|
|
|
|
+mkdir -p /root/.kube
|
|
|
|
+\cp -f $(dirname ${k8s_pki_path})/admin.conf /root/.kube/config
|
|
|
|
+
|
|
|
|
+#验证集群目前状态,如果不能正常查询集群状态,需要排查k8s前面的组件是否有故障
|
|
|
|
+kubectl get cs
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#3、创建集群引导权限文件
|
|
|
|
+#准备 bootstrap.secret.yaml
|
|
|
|
+
|
|
|
|
+echo 准备 $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
|
|
|
|
+cat <<EOF > $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
|
|
|
|
+apiVersion: v1
|
|
|
|
+kind: Secret
|
|
|
|
+metadata:
|
|
|
|
+ name: bootstrap-token-a2e4f9
|
|
|
|
+ namespace: kube-system
|
|
|
|
+type: bootstrap.kubernetes.io/token
|
|
|
|
+stringData:
|
|
|
|
+ description: "The default bootstrap token generated by 'kubelet '."
|
|
|
|
+ token-id: a2e4f9
|
|
|
|
+ token-secret: 781b15d024bb7876
|
|
|
|
+ usage-bootstrap-authentication: "true"
|
|
|
|
+ usage-bootstrap-signing: "true"
|
|
|
|
+ auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
|
|
|
|
+
|
|
|
|
+---
|
|
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
|
|
+kind: ClusterRoleBinding
|
|
|
|
+metadata:
|
|
|
|
+ name: kubelet-bootstrap
|
|
|
|
+roleRef:
|
|
|
|
+ apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: ClusterRole
|
|
|
|
+ name: system:node-bootstrapper
|
|
|
|
+subjects:
|
|
|
|
+- apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: Group
|
|
|
|
+ name: system:bootstrappers:default-node-token
|
|
|
|
+---
|
|
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
|
|
+kind: ClusterRoleBinding
|
|
|
|
+metadata:
|
|
|
|
+ name: node-autoapprove-bootstrap
|
|
|
|
+roleRef:
|
|
|
|
+ apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: ClusterRole
|
|
|
|
+ name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
|
|
|
|
+subjects:
|
|
|
|
+- apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: Group
|
|
|
|
+ name: system:bootstrappers:default-node-token
|
|
|
|
+---
|
|
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
|
|
+kind: ClusterRoleBinding
|
|
|
|
+metadata:
|
|
|
|
+ name: node-autoapprove-certificate-rotation
|
|
|
|
+roleRef:
|
|
|
|
+ apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: ClusterRole
|
|
|
|
+ name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
|
|
|
|
+subjects:
|
|
|
|
+- apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: Group
|
|
|
|
+ name: system:nodes
|
|
|
|
+---
|
|
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
|
|
+kind: ClusterRole
|
|
|
|
+metadata:
|
|
|
|
+ annotations:
|
|
|
|
+ rbac.authorization.kubernetes.io/autoupdate: "true"
|
|
|
|
+ labels:
|
|
|
|
+ kubernetes.io/bootstrapping: rbac-defaults
|
|
|
|
+ name: system:kube-apiserver-to-kubelet
|
|
|
|
+rules:
|
|
|
|
+ - apiGroups:
|
|
|
|
+ - ""
|
|
|
|
+ resources:
|
|
|
|
+ - nodes/proxy
|
|
|
|
+ - nodes/stats
|
|
|
|
+ - nodes/log
|
|
|
|
+ - nodes/spec
|
|
|
|
+ - nodes/metrics
|
|
|
|
+ verbs:
|
|
|
|
+ - "*"
|
|
|
|
+---
|
|
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
|
|
+kind: ClusterRoleBinding
|
|
|
|
+metadata:
|
|
|
|
+ name: system:kube-apiserver
|
|
|
|
+ namespace: ""
|
|
|
|
+roleRef:
|
|
|
|
+ apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: ClusterRole
|
|
|
|
+ name: system:kube-apiserver-to-kubelet
|
|
|
|
+subjects:
|
|
|
|
+ - apiGroup: rbac.authorization.k8s.io
|
|
|
|
+ kind: User
|
|
|
|
+ name: kube-apiserver
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+kubectl create -f $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
|
|
|
|
+
|
|
|
|
+#### 6、引导k8s节点启动
|
|
|
|
+##### 1、配置kubelet
|
|
|
|
+###### 1、kubelet.service
|
|
|
|
+#所有节点,配置kubelet服务
|
|
|
|
+echo 引导k8s节点启动
|
|
|
|
+echo 配置 /usr/lib/systemd/system/kubelet.service
|
|
|
|
+cat << EOF > /usr/lib/systemd/system/kubelet.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Kubernetes Kubelet
|
|
|
|
+Documentation=https://github.com/kubernetes/kubernetes
|
|
|
|
+After=containerd.service
|
|
|
|
+Requires=containerd.service
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+ExecStart=/usr/local/bin/kubelet
|
|
|
|
+
|
|
|
|
+Restart=always
|
|
|
|
+StartLimitInterval=0
|
|
|
|
+RestartSec=10
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+echo 配置 /etc/systemd/system/kubelet.service.d/10-kubelet.conf
|
|
|
|
+cat << EOF > /etc/systemd/system/kubelet.service.d/10-kubelet.conf
|
|
|
|
+[Service]
|
|
|
|
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf --kubeconfig=$(dirname ${k8s_pki_path})/kubelet.conf"
|
|
|
|
+Environment="KUBELET_SYSTEM_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
|
|
|
|
+Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
|
|
|
|
+Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
|
|
|
|
+ExecStart=
|
|
|
|
+ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+#/etc/kubernetes/kubelet.conf 会自动生成
|
|
|
|
+
|
|
|
|
+###### 2、kubelet-conf.yml
|
|
|
|
+#clusterDNS 为service网络的第10个ip值,改成自己的。如:10.96.0.10
|
|
|
|
+
|
|
|
|
+echo 生成 $(dirname ${k8s_pki_path})/kubelet-conf.yml
|
|
|
|
+cat << EOF > $(dirname ${k8s_pki_path})/kubelet-conf.yml
|
|
|
|
+apiVersion: kubelet.config.k8s.io/v1beta1
|
|
|
|
+kind: KubeletConfiguration
|
|
|
|
+address: 0.0.0.0
|
|
|
|
+port: 10250
|
|
|
|
+readOnlyPort: 10255
|
|
|
|
+authentication:
|
|
|
|
+ anonymous:
|
|
|
|
+ enabled: false
|
|
|
|
+ webhook:
|
|
|
|
+ cacheTTL: 2m0s
|
|
|
|
+ enabled: true
|
|
|
|
+ x509:
|
|
|
|
+ clientCAFile: ${k8s_pki_path}/ca.pem
|
|
|
|
+authorization:
|
|
|
|
+ mode: Webhook
|
|
|
|
+ webhook:
|
|
|
|
+ cacheAuthorizedTTL: 5m0s
|
|
|
|
+ cacheUnauthorizedTTL: 30s
|
|
|
|
+cgroupDriver: systemd
|
|
|
|
+cgroupsPerQOS: true
|
|
|
|
+clusterDNS:
|
|
|
|
+- ${svc_dns}
|
|
|
|
+clusterDomain: cluster.local
|
|
|
|
+containerLogMaxFiles: 5
|
|
|
|
+containerLogMaxSize: 10Mi
|
|
|
|
+contentType: application/vnd.kubernetes.protobuf
|
|
|
|
+cpuCFSQuota: true
|
|
|
|
+cpuManagerPolicy: none
|
|
|
|
+cpuManagerReconcilePeriod: 10s
|
|
|
|
+enableControllerAttachDetach: true
|
|
|
|
+enableDebuggingHandlers: true
|
|
|
|
+enforceNodeAllocatable:
|
|
|
|
+- pods
|
|
|
|
+eventBurst: 10
|
|
|
|
+eventRecordQPS: 5
|
|
|
|
+evictionHard:
|
|
|
|
+ imagefs.available: 15%
|
|
|
|
+ memory.available: 100Mi
|
|
|
|
+ nodefs.available: 10%
|
|
|
|
+ nodefs.inodesFree: 5%
|
|
|
|
+evictionPressureTransitionPeriod: 5m0s #缩小相应的配置
|
|
|
|
+failSwapOn: true
|
|
|
|
+fileCheckFrequency: 20s
|
|
|
|
+hairpinMode: promiscuous-bridge
|
|
|
|
+healthzBindAddress: 127.0.0.1
|
|
|
|
+healthzPort: 10248
|
|
|
|
+httpCheckFrequency: 20s
|
|
|
|
+imageGCHighThresholdPercent: 85
|
|
|
|
+imageGCLowThresholdPercent: 80
|
|
|
|
+imageMinimumGCAge: 2m0s
|
|
|
|
+iptablesDropBit: 15
|
|
|
|
+iptablesMasqueradeBit: 14
|
|
|
|
+kubeAPIBurst: 10
|
|
|
|
+kubeAPIQPS: 5
|
|
|
|
+makeIPTablesUtilChains: true
|
|
|
|
+maxOpenFiles: 1000000
|
|
|
|
+maxPods: 110
|
|
|
|
+nodeStatusUpdateFrequency: 10s
|
|
|
|
+oomScoreAdj: -999
|
|
|
|
+podPidsLimit: -1
|
|
|
|
+registryBurst: 10
|
|
|
|
+registryPullQPS: 5
|
|
|
|
+resolvConf: /etc/resolv.conf
|
|
|
|
+rotateCertificates: true
|
|
|
|
+runtimeRequestTimeout: 2m0s
|
|
|
|
+serializeImagePulls: true
|
|
|
|
+staticPodPath: $(dirname ${k8s_pki_path})/manifests
|
|
|
|
+streamingConnectionIdleTimeout: 4h0m0s
|
|
|
|
+syncFrequency: 1m0s
|
|
|
|
+volumeStatsAggPeriod: 1m0s
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+###### 3、启动 kubelet
|
|
|
|
+# 启动 kubelet
|
|
|
|
+echo 启动 kubelet
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now kubelet
|
|
|
|
+#systemctl status kubelet
|
|
|
|
+
|
|
|
|
+echo 检查集群node, Ready 或 NotReady 都算正常的
|
|
|
|
+kubectl get node
|
|
|
|
+# 返回 Ready 或 NotReady 目前到这里都是正常的,只是还不能正常使用,需安装 cni,比如 calico
|
|
|
|
+
|
|
|
|
+##### 2、配置kube-proxy
|
|
|
|
+###### 1、生成 kube-proxy.conf
|
|
|
|
+#创建kube-proxy的sa
|
|
|
|
+echo 配置 kube-proxy 权限
|
|
|
|
+kubectl -n kube-system create serviceaccount kube-proxy
|
|
|
|
+
|
|
|
|
+#创建角色绑定
|
|
|
|
+kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
|
|
|
|
+
|
|
|
|
+echo "================================================================================"
|
|
|
|
+
|
|
|
|
+K8S_DIR=$(dirname ${k8s_pki_path})
|
|
|
|
+get_secret_token() {
|
|
|
|
+ sleep 1s
|
|
|
|
+ #导出变量,方便后面使用
|
|
|
|
+ SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
|
|
|
|
+ JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
|
|
|
|
+}
|
|
|
|
+get_secret_token
|
|
|
|
+
|
|
|
|
+reget_count=0
|
|
|
|
+while [ -z $JWT_TOKEN ]
|
|
|
|
+do
|
|
|
|
+ if [ $reget_count -ge 5 ]
|
|
|
|
+ then
|
|
|
|
+ echo "reget_count $reget_count -ge 5, please check config"
|
|
|
|
+ exit 1
|
|
|
|
+ fi
|
|
|
|
+ sleep 1s
|
|
|
|
+ get_secret_token
|
|
|
|
+ ((reget_count+=1))
|
|
|
|
+ echo "reget_count $reget_count..."
|
|
|
|
+done
|
|
|
|
+
|
|
|
|
+echo "================================================================================"
|
|
|
|
+echo $SECRET
|
|
|
|
+echo $JWT_TOKEN
|
|
|
|
+echo "================================================================================"
|
|
|
|
+
|
|
|
|
+# 生成kube-proxy配置
|
|
|
|
+# --server: 指定自己的apiserver地址或者lb地址
|
|
|
|
+echo 生成 ${K8S_DIR}/kube-proxy.conf
|
|
|
|
+kubectl config set-cluster kubernetes \
|
|
|
|
+ --certificate-authority=${k8s_pki_path}/ca.pem \
|
|
|
|
+ --embed-certs=true \
|
|
|
|
+ --server=https://${export_addr}:6443 \
|
|
|
|
+ --kubeconfig=${K8S_DIR}/kube-proxy.conf
|
|
|
|
+
|
|
|
|
+# kube-proxy秘钥设置
|
|
|
|
+kubectl config set-credentials kubernetes \
|
|
|
|
+ --token=${JWT_TOKEN} \
|
|
|
|
+ --kubeconfig=${K8S_DIR}/kube-proxy.conf
|
|
|
|
+
|
|
|
|
+kubectl config set-context kubernetes \
|
|
|
|
+ --cluster=kubernetes \
|
|
|
|
+ --user=kubernetes \
|
|
|
|
+ --kubeconfig=${K8S_DIR}/kube-proxy.conf
|
|
|
|
+
|
|
|
|
+kubectl config use-context kubernetes \
|
|
|
|
+ --kubeconfig=${K8S_DIR}/kube-proxy.conf
|
|
|
|
+
|
|
|
|
+###### 2、配置 kube-proxy.service
|
|
|
|
+echo 配置 /usr/lib/systemd/system/kube-proxy.service
|
|
|
|
+cat << EOF > /usr/lib/systemd/system/kube-proxy.service
|
|
|
|
+[Unit]
|
|
|
|
+Description=Kubernetes Kube Proxy
|
|
|
|
+Documentation=https://github.com/kubernetes/kubernetes
|
|
|
|
+After=network.target
|
|
|
|
+
|
|
|
|
+[Service]
|
|
|
|
+ExecStart=/usr/local/bin/kube-proxy \\
|
|
|
|
+ --config=$(dirname ${k8s_pki_path})/kube-proxy.yaml \\
|
|
|
|
+ --v=2
|
|
|
|
+
|
|
|
|
+Restart=always
|
|
|
|
+RestartSec=10s
|
|
|
|
+
|
|
|
|
+[Install]
|
|
|
|
+WantedBy=multi-user.target
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+###### 3、准备 kube-proxy.yaml
|
|
|
|
+#注意修改自己的Pod网段范围
|
|
|
|
+echo 准备 $(dirname ${k8s_pki_path})/kube-proxy.yaml
|
|
|
|
+cat <<EOF > $(dirname ${k8s_pki_path})/kube-proxy.yaml
|
|
|
|
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
|
|
|
+bindAddress: 0.0.0.0
|
|
|
|
+clientConnection:
|
|
|
|
+ acceptContentTypes: ""
|
|
|
|
+ burst: 10
|
|
|
|
+ contentType: application/vnd.kubernetes.protobuf
|
|
|
|
+ kubeconfig: $(dirname ${k8s_pki_path})/kube-proxy.conf #kube-proxy引导文件
|
|
|
|
+ qps: 5
|
|
|
|
+clusterCIDR: ${pod_cidr} #修改为自己的Pod-CIDR
|
|
|
|
+configSyncPeriod: 15m0s
|
|
|
|
+conntrack:
|
|
|
|
+ max: null
|
|
|
|
+ maxPerCore: 32768
|
|
|
|
+ min: 131072
|
|
|
|
+ tcpCloseWaitTimeout: 1h0m0s
|
|
|
|
+ tcpEstablishedTimeout: 24h0m0s
|
|
|
|
+enableProfiling: false
|
|
|
|
+healthzBindAddress: 0.0.0.0:10256
|
|
|
|
+hostnameOverride: ""
|
|
|
|
+iptables:
|
|
|
|
+ masqueradeAll: false
|
|
|
|
+ masqueradeBit: 14
|
|
|
|
+ minSyncPeriod: 0s
|
|
|
|
+ syncPeriod: 30s
|
|
|
|
+ipvs:
|
|
|
|
+ masqueradeAll: true
|
|
|
|
+ minSyncPeriod: 5s
|
|
|
|
+ scheduler: "rr"
|
|
|
|
+ syncPeriod: 30s
|
|
|
|
+kind: KubeProxyConfiguration
|
|
|
|
+metricsBindAddress: 127.0.0.1:10249
|
|
|
|
+mode: "ipvs"
|
|
|
|
+nodePortAddresses: null
|
|
|
|
+oomScoreAdj: -999
|
|
|
|
+portRange: ""
|
|
|
|
+udpIdleTimeout: 250ms
|
|
|
|
+EOF
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#启动 kube-proxy
|
|
|
|
+echo 启动 kube-proxy
|
|
|
|
+systemctl daemon-reload
|
|
|
|
+systemctl enable --now kube-proxy
|
|
|
|
+#systemctl status kube-proxy
|
|
|
|
+
|
|
|
|
+echo "================================================================================"
|
|
|
|
+sleep 1s
|
|
|
|
+
|
|
|
|
+cd $origin_dir
|
|
|
|
+#### 7、部署calico
|
|
|
|
+
|
|
|
|
+echo 部署calico
|
|
|
|
+#kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
|
|
|
|
+#kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
|
|
|
|
+
|
|
|
|
+mkdir -p k8s-components
|
|
|
|
+cd k8s-components
|
|
|
|
+calico_version=v3.25
|
|
|
|
+if [ ! -f calico-$calico_version.yaml ]; then
|
|
|
|
+curl https://docs.tigera.io/archive/$calico_version/manifests/calico.yaml -o calico-$calico_version.yaml
|
|
|
|
+fi
|
|
|
|
+cp calico-$calico_version.yaml calico.yaml
|
|
|
|
+# CIDR=$(grep -- "--cluster-cidr=" /usr/lib/systemd/system/kube-controller-manager.service | awk '{print $1}' | awk -F= '{print $2}')
|
|
|
|
+# echo $CIDR
|
|
|
|
+CIDR=$pod_cidr
|
|
|
|
+sed -i "s|# - name: CALICO_IPV4POOL_CIDR|- name: CALICO_IPV4POOL_CIDR|" calico.yaml
|
|
|
|
+sed -i "s|# value: \"192.168.0.0/16\"| value: \"$CIDR\"|" calico.yaml
|
|
|
|
+kubectl apply -f calico.yaml
|
|
|
|
+
|
|
|
|
+echo "================================================================================"
|
|
|
|
+sleep 1s
|
|
|
|
+
|
|
|
|
+#### 8、部署coreDNS
|
|
|
|
+echo 部署coreDNS
|
|
|
|
+coredns=coredns-deployment
|
|
|
|
+if [ ! -d $coredns ]; then
|
|
|
|
+ git clone https://github.com/coredns/deployment.git $coredns
|
|
|
|
+fi
|
|
|
|
+cd $coredns/kubernetes
|
|
|
|
+
|
|
|
|
+#改为 service 网段的 第 10 个ip, 例如10.96.0.10
|
|
|
|
+#./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
|
|
|
|
+# 拼装 service 网段的 第 10 个ip
|
|
|
|
+# 或直接用 $svc_dns
|
|
|
|
+svc_ip=$(kubectl get svc | grep kubernetes | awk '{print $3}')
|
|
|
|
+svc_dns_ip=${svc_ip}0
|
|
|
|
+cmd="./deploy.sh -s -i ${svc_dns_ip} | kubectl apply -f -"
|
|
|
|
+echo $cmd
|
|
|
|
+eval $cmd
|
|
|
|
+
|