3.vps-k8s-install-v1.26.sh 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. #!/bin/sh
  2. set -e
  3. pod_cidr="196.16.0.0/16"
  4. svc_cidr="10.96.0.0/16"
  5. svc_frst=${svc_cidr%.*}.1
  6. svc_dns=${svc_cidr%.*}.10
  7. origin_dir=$(pwd)
  8. ### Setting environment
  9. export_addr=$(curl ipv4.icanhazip.com)
  10. export_port=6443
  11. # CA organization
  12. # expiry 87600h 过期时间10年,可根据自己需求来配置,比如 438000h 50年,876000h 100年
  13. expiry=876000h
  14. ca_config_path=/etc/ca/ca-config.json
  15. # etcd path
  16. etcd_pki_path=/etc/etcd/pki
  17. etcd_ips_list="$export_addr $(hostname)"
  18. # kubernetes
  19. k8s_pki_path=/etc/kubernetes/pki
  20. k8s_ips_list="$export_addr $(hostname)"
  21. markS="##### ca-etcd-k8s envionment #####"
  22. markE="##################################"
  23. write_env_profile() {
  24. sh <<EOF >> /etc/profile
  25. echo "$markS"
  26. echo export expiry=$expiry
  27. echo export ca_config_path=$ca_config_path
  28. echo export etcd_pki_path=$etcd_pki_path
  29. echo export etcd_ips_list=\"${etcd_ips_list}\"
  30. echo export k8s_pki_path=$k8s_pki_path
  31. echo export k8s_ips_list=\"${k8s_ips_list}\"
  32. echo export export_addr=$export_addr
  33. echo export export_port=$export_port
  34. echo "$markE"
  35. EOF
  36. }
  37. if grep "$markS" /etc/profile > /dev/null
  38. then
  39. echo already write profile env
  40. echo "clear the environment and run again"
  41. exit
  42. else
  43. write_env_profile
  44. echo write profile env success
  45. printf "\n$export_addr $(hostname)\n" >> /etc/hosts
  46. fi
  47. ### CA organization
  48. #ca-config.json
  49. echo 创建机构配置 $ca_config_path
  50. mkdir -p $(dirname $ca_config_path)
  51. cat << EOF > $ca_config_path
  52. {
  53. "signing": {
  54. "default": {
  55. "expiry": "$expiry"
  56. },
  57. "profiles": {
  58. "server": {
  59. "expiry": "$expiry",
  60. "usages": [
  61. "signing",
  62. "key encipherment",
  63. "server auth"
  64. ]
  65. },
  66. "client": {
  67. "expiry": "$expiry",
  68. "usages": [
  69. "signing",
  70. "key encipherment",
  71. "client auth"
  72. ]
  73. },
  74. "peer": {
  75. "expiry": "$expiry",
  76. "usages": [
  77. "signing",
  78. "key encipherment",
  79. "server auth",
  80. "client auth"
  81. ]
  82. },
  83. "kubernetes": {
  84. "expiry": "$expiry",
  85. "usages": [
  86. "signing",
  87. "key encipherment",
  88. "server auth",
  89. "client auth"
  90. ]
  91. },
  92. "etcd": {
  93. "expiry": "$expiry",
  94. "usages": [
  95. "signing",
  96. "key encipherment",
  97. "server auth",
  98. "client auth"
  99. ]
  100. }
  101. }
  102. }
  103. }
  104. EOF
  105. ## 安装etcd
  106. #创建etcd证书目录和证书
  107. ### 1、生成etcd根ca证书
  108. echo 生成 etcd 根 ca 证书
  109. mkdir -p $etcd_pki_path
  110. cd $etcd_pki_path
  111. # etcd-ca-csr 签名请求
  112. cat <<EOF > etcd-ca-csr.json
  113. {
  114. "CN": "etcd",
  115. "key": {
  116. "algo": "rsa",
  117. "size": 2048
  118. },
  119. "names": [
  120. {
  121. "C": "CN",
  122. "ST": "Beijing",
  123. "L": "Beijing",
  124. "O": "etcd",
  125. "OU": "etcd"
  126. }
  127. ],
  128. "ca": {
  129. "expiry": "$expiry"
  130. }
  131. }
  132. EOF
  133. # 生成etcd CA根证书和key
  134. cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
  135. #为 etcd 颁证书
  136. echo etcd 颁证书
  137. mkdir -p $etcd_pki_path
  138. cd $etcd_pki_path
  139. # 创建etcd证书签名 etcd-csr.json
  140. cat <<EOF > etcd-csr.json
  141. {
  142. "CN": "etcd",
  143. "key": {
  144. "algo": "rsa",
  145. "size": 2048
  146. },
  147. "hosts": [
  148. "127.0.0.1"
  149. ],
  150. "names": [
  151. {
  152. "C": "CN",
  153. "ST": "Beijing",
  154. "L": "Beijing",
  155. "O": "etcd",
  156. "OU": "System"
  157. }
  158. ]
  159. }
  160. EOF
  161. # hosts 加上 etcd 节点域名、IP
  162. for addr in ${etcd_ips_list[@]}
  163. do
  164. echo "===>>> etcd hosts add "$addr
  165. sed -i "\#\"127.0.0.1\"#i\ \"$addr\"," etcd-csr.json
  166. done
  167. # 生成etcd证书
  168. cfssl gencert \
  169. -ca=etcd-ca.pem \
  170. -ca-key=etcd-ca-key.pem \
  171. -config=${ca_config_path} \
  172. -profile=etcd \
  173. etcd-csr.json | cfssljson -bare etcd
  174. ### 配置 etcd.yaml 配置
  175. mkdir -p /var/lib/etcd
  176. etcd_config_file=$(dirname ${etcd_pki_path})/etcd.yaml
  177. echo 配置 $etcd_config_file
  178. cat <<EOF > $etcd_config_file
  179. name: '{{host}}' #每个机器可以写自己的域名,不能重复
  180. data-dir: /var/lib/etcd
  181. wal-dir: /var/lib/etcd/wal
  182. snapshot-count: 5000
  183. heartbeat-interval: 100
  184. election-timeout: 1000
  185. quota-backend-bytes: 0
  186. listen-peer-urls: 'https://{{ipls}}:2380' #本机ip+2380端口,代表和集群通信22
  187. listen-client-urls: 'https://{{ipls}}:2379,http://127.0.0.1:2379' #自己的ip
  188. max-snapshots: 3
  189. max-wals: 5
  190. cors:
  191. initial-advertise-peer-urls: 'https://{{ipls}}:2380' #自己的ip
  192. advertise-client-urls: 'https://{{ipls}}:2379' #自己的ip
  193. discovery:
  194. discovery-fallback: 'proxy'
  195. discovery-proxy:
  196. discovery-srv:
  197. initial-cluster: '{{host0}}=https://{{ipls0}}:2380' #这里不一样
  198. initial-cluster-token: 'etcd-k8s-cluster'
  199. initial-cluster-state: 'new'
  200. strict-reconfig-check: false
  201. enable-v2: true
  202. enable-pprof: true
  203. proxy: 'off'
  204. proxy-failure-wait: 5000
  205. proxy-refresh-interval: 30000
  206. proxy-dial-timeout: 1000
  207. proxy-write-timeout: 5000
  208. proxy-read-timeout: 0
  209. client-transport-security:
  210. cert-file: '${etcd_pki_path}/etcd.pem'
  211. key-file: '${etcd_pki_path}/etcd-key.pem'
  212. client-cert-auth: true
  213. trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
  214. auto-tls: true
  215. peer-transport-security:
  216. cert-file: '${etcd_pki_path}/etcd.pem'
  217. key-file: '${etcd_pki_path}/etcd-key.pem'
  218. peer-client-cert-auth: true
  219. trusted-ca-file: '${etcd_pki_path}/etcd-ca.pem'
  220. auto-tls: true
  221. debug: false
  222. log-package-levels:
  223. log-outputs: [default]
  224. force-new-cluster: false
  225. EOF
  226. # 处理 etcd.yaml 模板文件进行替换
  227. host=(
  228. $(hostname)
  229. )
  230. ipls=(
  231. $export_addr
  232. )
  233. for i in 0
  234. do
  235. sed -i "s/{{host}}/${host[$i]}/g" $etcd_config_file
  236. sed -i "s/{{host0}}/${host[0]}/g" $etcd_config_file
  237. sed -i "s/{{ipls}}/${ipls[$i]}/g" $etcd_config_file
  238. sed -i "s/{{ipls0}}/${ipls[0]}/g" $etcd_config_file
  239. done
  240. ### etcd.service 开机启动
  241. echo 配置 /usr/lib/systemd/system/etcd.service
  242. cat << EOF > /usr/lib/systemd/system/etcd.service
  243. [Unit]
  244. Description=Etcd Service
  245. Documentation=https://etcd.io/docs/v3.5/op-guide/clustering/
  246. After=network.target
  247. [Service]
  248. Type=notify
  249. ExecStart=/usr/local/bin/etcd --config-file=$etcd_config_file
  250. Restart=on-failure
  251. RestartSec=10
  252. LimitNOFILE=65536
  253. [Install]
  254. WantedBy=multi-user.target
  255. Alias=etcd3.service
  256. EOF
  257. echo 启动etcd
  258. # 加载&开机启动
  259. systemctl daemon-reload
  260. systemctl enable --now etcd
  261. #systemctl status etcd
  262. echo 测试etcd
  263. ### 测试etcd访问
  264. etcdctl member list --write-out=table
  265. etcdctl endpoint status --write-out=table
  266. ## 安装k8s
  267. ### k8s证书
  268. #### k8s根ca证书
  269. echo 创建 k8s 相关证书
  270. mkdir -p ${k8s_pki_path}
  271. cd ${k8s_pki_path}
  272. cat << EOF > ca-csr.json
  273. {
  274. "CN": "kubernetes",
  275. "key": {
  276. "algo": "rsa",
  277. "size": 2048
  278. },
  279. "names": [
  280. {
  281. "C": "CN",
  282. "ST": "Beijing",
  283. "L": "Beijing",
  284. "O": "Kubernetes",
  285. "OU": "Kubernetes"
  286. }
  287. ],
  288. "ca": {
  289. "expiry": "$expiry"
  290. }
  291. }
  292. EOF
  293. ## 生成 k8s CA 证书
  294. echo k8s CA 证书
  295. cfssl gencert -initca ca-csr.json | cfssljson -bare ca
  296. #### apiserver证书
  297. echo 生成apiserver证书
  298. cat <<EOF > apiserver-csr.json
  299. {
  300. "CN": "kube-apiserver",
  301. "hosts": [
  302. "127.0.0.1",
  303. "${svc_frst}",
  304. "kubernetes",
  305. "kubernetes.default",
  306. "kubernetes.default.svc",
  307. "kubernetes.default.svc.cluster",
  308. "kubernetes.default.svc.cluster.local"
  309. ],
  310. "key": {
  311. "algo": "rsa",
  312. "size": 2048
  313. },
  314. "names": [
  315. {
  316. "C": "CN",
  317. "L": "BeiJing",
  318. "ST": "BeiJing",
  319. "O": "Kubernetes",
  320. "OU": "Kubernetes"
  321. }
  322. ]
  323. }
  324. EOF
  325. # hosts 加上 k8s 节点域名、IP
  326. for addr in ${k8s_ips_list[@]}
  327. do
  328. echo "===>>> kube-apiserver hosts add "$addr
  329. sed -i "\#\"127.0.0.1\"#i\ \"$addr\"," apiserver-csr.json
  330. done
  331. cfssl gencert \
  332. -ca=ca.pem \
  333. -ca-key=ca-key.pem \
  334. -config=${ca_config_path} \
  335. -profile=kubernetes \
  336. apiserver-csr.json | cfssljson -bare apiserver
  337. #### front-proxy证书
  338. ##### 1、front-proxy根ca
  339. echo 生成front-proxy证书
  340. cat << EOF > front-proxy-ca-csr.json
  341. {
  342. "CN": "kubernetes",
  343. "key": {
  344. "algo": "rsa",
  345. "size": 2048
  346. }
  347. }
  348. EOF
  349. #front-proxy 根ca生成
  350. cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
  351. ##### 2、front-proxy-client证书
  352. cat << EOF > front-proxy-client-csr.json
  353. {
  354. "CN": "front-proxy-client",
  355. "key": {
  356. "algo": "rsa",
  357. "size": 2048
  358. }
  359. }
  360. EOF
  361. #生成front-proxy-client 证书
  362. echo 生成front-proxy-client 证书
  363. #忽略警告
  364. cfssl gencert \
  365. -ca=front-proxy-ca.pem \
  366. -ca-key=front-proxy-ca-key.pem \
  367. -config=${ca_config_path} \
  368. -profile=kubernetes \
  369. front-proxy-client-csr.json | cfssljson -bare front-proxy-client
  370. #### controller-manage证书
  371. echo 生成 controller-manage证书
  372. #1、生成证书
  373. cat <<EOF > controller-manager-csr.json
  374. {
  375. "CN": "system:kube-controller-manager",
  376. "key": {
  377. "algo": "rsa",
  378. "size": 2048
  379. },
  380. "names": [
  381. {
  382. "C": "CN",
  383. "ST": "Beijing",
  384. "L": "Beijing",
  385. "O": "system:kube-controller-manager",
  386. "OU": "Kubernetes"
  387. }
  388. ]
  389. }
  390. EOF
  391. cfssl gencert \
  392. -ca=ca.pem \
  393. -ca-key=ca-key.pem \
  394. -config=${ca_config_path} \
  395. -profile=kubernetes \
  396. controller-manager-csr.json | cfssljson -bare controller-manager
  397. echo 配置 controller-manager.conf
  398. #2、生成配置
  399. # set-cluster:设置一个集群项
  400. kubectl config set-cluster kubernetes \
  401. --certificate-authority=${k8s_pki_path}/ca.pem \
  402. --embed-certs=true \
  403. --server=https://$export_addr:$export_port \
  404. --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
  405. # 设置一个环境项,一个上下文
  406. kubectl config set-context system:kube-controller-manager@kubernetes \
  407. --cluster=kubernetes \
  408. --user=system:kube-controller-manager \
  409. --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
  410. # set-credentials 设置一个用户项
  411. kubectl config set-credentials system:kube-controller-manager \
  412. --client-certificate=${k8s_pki_path}/controller-manager.pem \
  413. --client-key=${k8s_pki_path}/controller-manager-key.pem \
  414. --embed-certs=true \
  415. --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
  416. # 使用某个环境当做默认环境
  417. kubectl config use-context system:kube-controller-manager@kubernetes \
  418. --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf
  419. # 后来也用来自动批复kubelet证书
  420. echo 生成scheduler证书
  421. #### scheduler证书
  422. #1、生成证书
  423. cat <<EOF > scheduler-csr.json
  424. {
  425. "CN": "system:kube-scheduler",
  426. "key": {
  427. "algo": "rsa",
  428. "size": 2048
  429. },
  430. "names": [
  431. {
  432. "C": "CN",
  433. "ST": "Beijing",
  434. "L": "Beijing",
  435. "O": "system:kube-scheduler",
  436. "OU": "Kubernetes"
  437. }
  438. ]
  439. }
  440. EOF
  441. cfssl gencert \
  442. -ca=ca.pem \
  443. -ca-key=ca-key.pem \
  444. -config=${ca_config_path} \
  445. -profile=kubernetes \
  446. scheduler-csr.json | cfssljson -bare scheduler
  447. echo 配置 scheduler.conf
  448. #2、生成配置
  449. #k8s集群安全操作相关
  450. kubectl config set-cluster kubernetes \
  451. --certificate-authority=${k8s_pki_path}/ca.pem \
  452. --embed-certs=true \
  453. --server=https://$export_addr:$export_port \
  454. --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
  455. kubectl config set-credentials system:kube-scheduler \
  456. --client-certificate=${k8s_pki_path}/scheduler.pem \
  457. --client-key=${k8s_pki_path}/scheduler-key.pem \
  458. --embed-certs=true \
  459. --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
  460. kubectl config set-context system:kube-scheduler@kubernetes \
  461. --cluster=kubernetes \
  462. --user=system:kube-scheduler \
  463. --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
  464. kubectl config use-context system:kube-scheduler@kubernetes \
  465. --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
  466. echo 生成 admin 证书
  467. #### admin证书
  468. #1、生成证书
  469. cat <<EOF > admin-csr.json
  470. {
  471. "CN": "admin",
  472. "key": {
  473. "algo": "rsa",
  474. "size": 2048
  475. },
  476. "names": [
  477. {
  478. "C": "CN",
  479. "ST": "Beijing",
  480. "L": "Beijing",
  481. "O": "system:masters",
  482. "OU": "Kubernetes"
  483. }
  484. ]
  485. }
  486. EOF
  487. cfssl gencert \
  488. -ca=ca.pem \
  489. -ca-key=ca-key.pem \
  490. -config=${ca_config_path} \
  491. -profile=kubernetes \
  492. admin-csr.json | cfssljson -bare admin
  493. echo 配置 admin.conf
  494. #2、生成配置
  495. ##k8s集群admin.conf授权操作相关 ===>>> ~/.kube/config
  496. kubectl config set-cluster kubernetes \
  497. --certificate-authority=${k8s_pki_path}/ca.pem \
  498. --embed-certs=true \
  499. --server=https://$export_addr:$export_port \
  500. --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
  501. kubectl config set-credentials kubernetes-admin \
  502. --client-certificate=${k8s_pki_path}/admin.pem \
  503. --client-key=${k8s_pki_path}/admin-key.pem \
  504. --embed-certs=true \
  505. --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
  506. kubectl config set-context kubernetes-admin@kubernetes \
  507. --cluster=kubernetes \
  508. --user=kubernetes-admin \
  509. --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
  510. kubectl config use-context kubernetes-admin@kubernetes \
  511. --kubeconfig=$(dirname ${k8s_pki_path})/admin.conf
  512. #### ServiceAccount Key生成
  513. openssl genrsa -out ${k8s_pki_path}/sa.key 2048
  514. openssl rsa -in ${k8s_pki_path}/sa.key -pubout -out ${k8s_pki_path}/sa.pub
  515. echo 准备k8s组件环境目录
  516. ### k8s组件
  517. #### 1、目录准备
  518. mkdir -p $(dirname ${k8s_pki_path})/manifests /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
  519. echo 配置 /usr/lib/systemd/system/kube-apiserver.service
  520. #### 2、配置apiserver服务
  521. cat <<EOF > /usr/lib/systemd/system/kube-apiserver.service
  522. [Unit]
  523. Description=Kubernetes API Server
  524. Documentation=https://github.com/kubernetes/kubernetes
  525. After=network.target
  526. [Service]
  527. ExecStart=/usr/local/bin/kube-apiserver \\
  528. --v=2 \\
  529. --allow-privileged=true \\
  530. --bind-address=0.0.0.0 \\
  531. --secure-port=${export_port} \\
  532. --advertise-address=${export_addr} \\
  533. --service-cluster-ip-range=${svc_cidr} \\
  534. --service-node-port-range=30000-32767 \\
  535. --etcd-servers=https://${export_addr}:2379 \\
  536. --etcd-cafile=${etcd_pki_path}/etcd-ca.pem \\
  537. --etcd-certfile=${etcd_pki_path}/etcd.pem \\
  538. --etcd-keyfile=${etcd_pki_path}/etcd-key.pem \\
  539. --client-ca-file=${k8s_pki_path}/ca.pem \\
  540. --tls-cert-file=${k8s_pki_path}/apiserver.pem \\
  541. --tls-private-key-file=${k8s_pki_path}/apiserver-key.pem \\
  542. --kubelet-client-certificate=${k8s_pki_path}/apiserver.pem \\
  543. --kubelet-client-key=${k8s_pki_path}/apiserver-key.pem \\
  544. --service-account-key-file=${k8s_pki_path}/sa.pub \\
  545. --service-account-signing-key-file=${k8s_pki_path}/sa.key \\
  546. --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
  547. --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
  548. --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
  549. --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
  550. --authorization-mode=Node,RBAC \\
  551. --enable-bootstrap-token-auth=true \\
  552. --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem \\
  553. --proxy-client-cert-file=${k8s_pki_path}/front-proxy-client.pem \\
  554. --proxy-client-key-file=${k8s_pki_path}/front-proxy-client-key.pem \\
  555. --requestheader-allowed-names=aggregator,front-proxy-client \\
  556. --requestheader-group-headers=X-Remote-Group \\
  557. --requestheader-extra-headers-prefix=X-Remote-Extra- \\
  558. --requestheader-username-headers=X-Remote-User
  559. # --token-auth-file=/etc/kubernetes/token.csv
  560. Restart=on-failure
  561. RestartSec=10s
  562. LimitNOFILE=65535
  563. [Install]
  564. WantedBy=multi-user.target
  565. EOF
  566. echo 启动 kube-apiserver
  567. #启动apiserver
  568. systemctl daemon-reload
  569. systemctl enable --now kube-apiserver
  570. #systemctl status kube-apiserver
  571. echo 配置 /usr/lib/systemd/system/kube-controller-manager.service
  572. #### 3、配置controller-manager服务
  573. #196.16.0.0/16 是 pod 的网段,如果修改要注意不要和宿主机,docker等的网段冲突
  574. cat <<EOF > /usr/lib/systemd/system/kube-controller-manager.service
  575. [Unit]
  576. Description=Kubernetes Controller Manager
  577. Documentation=https://github.com/kubernetes/kubernetes
  578. After=network.target
  579. [Service]
  580. ExecStart=/usr/local/bin/kube-controller-manager \\
  581. --v=2 \\
  582. --root-ca-file=${k8s_pki_path}/ca.pem \\
  583. --cluster-signing-cert-file=${k8s_pki_path}/ca.pem \\
  584. --cluster-signing-key-file=${k8s_pki_path}/ca-key.pem \\
  585. --service-account-private-key-file=${k8s_pki_path}/sa.key \\
  586. --kubeconfig=$(dirname ${k8s_pki_path})/controller-manager.conf \\
  587. --feature-gates=LegacyServiceAccountTokenNoAutoGeneration=false \\
  588. --leader-elect=true \\
  589. --use-service-account-credentials=true \\
  590. --node-monitor-grace-period=40s \\
  591. --node-monitor-period=5s \\
  592. --pod-eviction-timeout=2m0s \\
  593. --controllers=*,bootstrapsigner,tokencleaner \\
  594. --allocate-node-cidrs=true \\
  595. --cluster-cidr=${pod_cidr} \\
  596. --requestheader-client-ca-file=${k8s_pki_path}/front-proxy-ca.pem \\
  597. --node-cidr-mask-size=24
  598. Restart=always
  599. RestartSec=10s
  600. [Install]
  601. WantedBy=multi-user.target
  602. EOF
  603. echo 启动 kube-controller-manager
  604. systemctl daemon-reload
  605. systemctl enable --now kube-controller-manager
  606. #systemctl status kube-controller-manager
  607. echo 配置 /usr/lib/systemd/system/kube-scheduler.service
  608. #### 4、配置scheduler
  609. cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
  610. [Unit]
  611. Description=Kubernetes Scheduler
  612. Documentation=https://github.com/kubernetes/kubernetes
  613. After=network.target
  614. [Service]
  615. ExecStart=/usr/local/bin/kube-scheduler \\
  616. --v=2 \\
  617. --leader-elect=true \\
  618. --authentication-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
  619. --authorization-kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf \\
  620. --kubeconfig=$(dirname ${k8s_pki_path})/scheduler.conf
  621. Restart=always
  622. RestartSec=10s
  623. [Install]
  624. WantedBy=multi-user.target
  625. EOF
  626. echo 启动 kube-scheduler
  627. systemctl daemon-reload
  628. systemctl enable --now kube-scheduler
  629. #systemctl status kube-scheduler
  630. echo TLS与引导启动配置
  631. #### 5、TLS与引导启动原理
  632. #1、master配置bootstrap 说明,这部分不执行
  633. echo 准备 $(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
  634. #设置集群
  635. kubectl config set-cluster kubernetes \
  636. --certificate-authority=${k8s_pki_path}/ca.pem \
  637. --embed-certs=true \
  638. --server=https://${export_addr}:${export_port} \
  639. --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
  640. #设置秘钥
  641. kubectl config set-credentials tls-bootstrap-token-user \
  642. --token=a2e4f9.781b15d024bb7876 \
  643. --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
  644. #设置上下文
  645. kubectl config set-context tls-bootstrap-token-user@kubernetes \
  646. --cluster=kubernetes \
  647. --user=tls-bootstrap-token-user \
  648. --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
  649. #使用设置
  650. kubectl config use-context tls-bootstrap-token-user@kubernetes \
  651. --kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf
  652. #2、设置kubectl执行权限
  653. #kubectl 能不能操作集群是看 /root/.kube 下有没有config文件,而config就是我们之前生成的admin.conf,具有操作权限的
  654. mkdir -p /root/.kube
  655. \cp -f $(dirname ${k8s_pki_path})/admin.conf /root/.kube/config
  656. #验证集群目前状态,如果不能正常查询集群状态,需要排查k8s前面的组件是否有故障
  657. kubectl get cs
  658. #3、创建集群引导权限文件
  659. #准备 bootstrap.secret.yaml
  660. echo 准备 $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
  661. cat <<EOF > $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
  662. apiVersion: v1
  663. kind: Secret
  664. metadata:
  665. name: bootstrap-token-a2e4f9
  666. namespace: kube-system
  667. type: bootstrap.kubernetes.io/token
  668. stringData:
  669. description: "The default bootstrap token generated by 'kubelet '."
  670. token-id: a2e4f9
  671. token-secret: 781b15d024bb7876
  672. usage-bootstrap-authentication: "true"
  673. usage-bootstrap-signing: "true"
  674. auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
  675. ---
  676. apiVersion: rbac.authorization.k8s.io/v1
  677. kind: ClusterRoleBinding
  678. metadata:
  679. name: kubelet-bootstrap
  680. roleRef:
  681. apiGroup: rbac.authorization.k8s.io
  682. kind: ClusterRole
  683. name: system:node-bootstrapper
  684. subjects:
  685. - apiGroup: rbac.authorization.k8s.io
  686. kind: Group
  687. name: system:bootstrappers:default-node-token
  688. ---
  689. apiVersion: rbac.authorization.k8s.io/v1
  690. kind: ClusterRoleBinding
  691. metadata:
  692. name: node-autoapprove-bootstrap
  693. roleRef:
  694. apiGroup: rbac.authorization.k8s.io
  695. kind: ClusterRole
  696. name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  697. subjects:
  698. - apiGroup: rbac.authorization.k8s.io
  699. kind: Group
  700. name: system:bootstrappers:default-node-token
  701. ---
  702. apiVersion: rbac.authorization.k8s.io/v1
  703. kind: ClusterRoleBinding
  704. metadata:
  705. name: node-autoapprove-certificate-rotation
  706. roleRef:
  707. apiGroup: rbac.authorization.k8s.io
  708. kind: ClusterRole
  709. name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  710. subjects:
  711. - apiGroup: rbac.authorization.k8s.io
  712. kind: Group
  713. name: system:nodes
  714. ---
  715. apiVersion: rbac.authorization.k8s.io/v1
  716. kind: ClusterRole
  717. metadata:
  718. annotations:
  719. rbac.authorization.kubernetes.io/autoupdate: "true"
  720. labels:
  721. kubernetes.io/bootstrapping: rbac-defaults
  722. name: system:kube-apiserver-to-kubelet
  723. rules:
  724. - apiGroups:
  725. - ""
  726. resources:
  727. - nodes/proxy
  728. - nodes/stats
  729. - nodes/log
  730. - nodes/spec
  731. - nodes/metrics
  732. verbs:
  733. - "*"
  734. ---
  735. apiVersion: rbac.authorization.k8s.io/v1
  736. kind: ClusterRoleBinding
  737. metadata:
  738. name: system:kube-apiserver
  739. namespace: ""
  740. roleRef:
  741. apiGroup: rbac.authorization.k8s.io
  742. kind: ClusterRole
  743. name: system:kube-apiserver-to-kubelet
  744. subjects:
  745. - apiGroup: rbac.authorization.k8s.io
  746. kind: User
  747. name: kube-apiserver
  748. EOF
  749. kubectl create -f $(dirname ${k8s_pki_path})/bootstrap.secret.yaml
  750. #### 6、引导k8s节点启动
  751. ##### 1、配置kubelet
  752. ###### 1、kubelet.service
  753. #所有节点,配置kubelet服务
  754. echo 引导k8s节点启动
  755. echo 配置 /usr/lib/systemd/system/kubelet.service
  756. cat << EOF > /usr/lib/systemd/system/kubelet.service
  757. [Unit]
  758. Description=Kubernetes Kubelet
  759. Documentation=https://github.com/kubernetes/kubernetes
  760. After=containerd.service
  761. Requires=containerd.service
  762. [Service]
  763. ExecStart=/usr/local/bin/kubelet
  764. Restart=always
  765. StartLimitInterval=0
  766. RestartSec=10
  767. [Install]
  768. WantedBy=multi-user.target
  769. EOF
  770. echo 配置 /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  771. cat << EOF > /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  772. [Service]
  773. Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=$(dirname ${k8s_pki_path})/bootstrap-kubelet.conf --kubeconfig=$(dirname ${k8s_pki_path})/kubelet.conf"
  774. Environment="KUBELET_SYSTEM_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
  775. Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
  776. Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
  777. ExecStart=
  778. ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
  779. EOF
  780. #/etc/kubernetes/kubelet.conf 会自动生成
  781. ###### 2、kubelet-conf.yml
  782. #clusterDNS 为service网络的第10个ip值,改成自己的。如:10.96.0.10
  783. echo 生成 $(dirname ${k8s_pki_path})/kubelet-conf.yml
  784. cat << EOF > $(dirname ${k8s_pki_path})/kubelet-conf.yml
  785. apiVersion: kubelet.config.k8s.io/v1beta1
  786. kind: KubeletConfiguration
  787. address: 0.0.0.0
  788. port: 10250
  789. readOnlyPort: 10255
  790. authentication:
  791. anonymous:
  792. enabled: false
  793. webhook:
  794. cacheTTL: 2m0s
  795. enabled: true
  796. x509:
  797. clientCAFile: ${k8s_pki_path}/ca.pem
  798. authorization:
  799. mode: Webhook
  800. webhook:
  801. cacheAuthorizedTTL: 5m0s
  802. cacheUnauthorizedTTL: 30s
  803. cgroupDriver: systemd
  804. cgroupsPerQOS: true
  805. clusterDNS:
  806. - ${svc_dns}
  807. clusterDomain: cluster.local
  808. containerLogMaxFiles: 5
  809. containerLogMaxSize: 10Mi
  810. contentType: application/vnd.kubernetes.protobuf
  811. cpuCFSQuota: true
  812. cpuManagerPolicy: none
  813. cpuManagerReconcilePeriod: 10s
  814. enableControllerAttachDetach: true
  815. enableDebuggingHandlers: true
  816. enforceNodeAllocatable:
  817. - pods
  818. eventBurst: 10
  819. eventRecordQPS: 5
  820. evictionHard:
  821. imagefs.available: 15%
  822. memory.available: 100Mi
  823. nodefs.available: 10%
  824. nodefs.inodesFree: 5%
  825. evictionPressureTransitionPeriod: 5m0s #缩小相应的配置
  826. failSwapOn: true
  827. fileCheckFrequency: 20s
  828. hairpinMode: promiscuous-bridge
  829. healthzBindAddress: 127.0.0.1
  830. healthzPort: 10248
  831. httpCheckFrequency: 20s
  832. imageGCHighThresholdPercent: 85
  833. imageGCLowThresholdPercent: 80
  834. imageMinimumGCAge: 2m0s
  835. iptablesDropBit: 15
  836. iptablesMasqueradeBit: 14
  837. kubeAPIBurst: 10
  838. kubeAPIQPS: 5
  839. makeIPTablesUtilChains: true
  840. maxOpenFiles: 1000000
  841. maxPods: 110
  842. nodeStatusUpdateFrequency: 10s
  843. oomScoreAdj: -999
  844. podPidsLimit: -1
  845. registryBurst: 10
  846. registryPullQPS: 5
  847. resolvConf: /etc/resolv.conf
  848. rotateCertificates: true
  849. runtimeRequestTimeout: 2m0s
  850. serializeImagePulls: true
  851. staticPodPath: $(dirname ${k8s_pki_path})/manifests
  852. streamingConnectionIdleTimeout: 4h0m0s
  853. syncFrequency: 1m0s
  854. volumeStatsAggPeriod: 1m0s
  855. EOF
  856. ###### 3、启动 kubelet
  857. # 启动 kubelet
  858. echo 启动 kubelet
  859. systemctl daemon-reload
  860. systemctl enable --now kubelet
  861. #systemctl status kubelet
  862. echo 检查集群node, Ready 或 NotReady 都算正常的
  863. kubectl get node
  864. # 返回 Ready 或 NotReady 目前到这里都是正常的,只是还不能正常使用,需安装 cni,比如 calico
  865. ##### 2、配置kube-proxy
  866. ###### 1、生成 kube-proxy.conf
  867. #创建kube-proxy的sa
  868. echo 配置 kube-proxy 权限
  869. kubectl -n kube-system create serviceaccount kube-proxy
  870. #创建角色绑定
  871. kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
  872. echo "================================================================================"
  873. K8S_DIR=$(dirname ${k8s_pki_path})
  874. get_secret_token() {
  875. sleep 1s
  876. #导出变量,方便后面使用
  877. SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
  878. JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
  879. }
  880. get_secret_token
  881. reget_count=0
  882. while [ -z $JWT_TOKEN ]
  883. do
  884. if [ $reget_count -ge 5 ]
  885. then
  886. echo "reget_count $reget_count -ge 5, please check config"
  887. exit 1
  888. fi
  889. sleep 1s
  890. get_secret_token
  891. ((reget_count+=1))
  892. echo "reget_count $reget_count..."
  893. done
  894. echo "================================================================================"
  895. echo $SECRET
  896. echo $JWT_TOKEN
  897. echo "================================================================================"
  898. # 生成kube-proxy配置
  899. # --server: 指定自己的apiserver地址或者lb地址
  900. echo 生成 ${K8S_DIR}/kube-proxy.conf
  901. kubectl config set-cluster kubernetes \
  902. --certificate-authority=${k8s_pki_path}/ca.pem \
  903. --embed-certs=true \
  904. --server=https://${export_addr}:6443 \
  905. --kubeconfig=${K8S_DIR}/kube-proxy.conf
  906. # kube-proxy秘钥设置
  907. kubectl config set-credentials kubernetes \
  908. --token=${JWT_TOKEN} \
  909. --kubeconfig=${K8S_DIR}/kube-proxy.conf
  910. kubectl config set-context kubernetes \
  911. --cluster=kubernetes \
  912. --user=kubernetes \
  913. --kubeconfig=${K8S_DIR}/kube-proxy.conf
  914. kubectl config use-context kubernetes \
  915. --kubeconfig=${K8S_DIR}/kube-proxy.conf
  916. ###### 2、配置 kube-proxy.service
  917. echo 配置 /usr/lib/systemd/system/kube-proxy.service
  918. cat << EOF > /usr/lib/systemd/system/kube-proxy.service
  919. [Unit]
  920. Description=Kubernetes Kube Proxy
  921. Documentation=https://github.com/kubernetes/kubernetes
  922. After=network.target
  923. [Service]
  924. ExecStart=/usr/local/bin/kube-proxy \\
  925. --config=$(dirname ${k8s_pki_path})/kube-proxy.yaml \\
  926. --v=2
  927. Restart=always
  928. RestartSec=10s
  929. [Install]
  930. WantedBy=multi-user.target
  931. EOF
  932. ###### 3、准备 kube-proxy.yaml
  933. #注意修改自己的Pod网段范围
  934. echo 准备 $(dirname ${k8s_pki_path})/kube-proxy.yaml
  935. cat <<EOF > $(dirname ${k8s_pki_path})/kube-proxy.yaml
  936. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  937. bindAddress: 0.0.0.0
  938. clientConnection:
  939. acceptContentTypes: ""
  940. burst: 10
  941. contentType: application/vnd.kubernetes.protobuf
  942. kubeconfig: $(dirname ${k8s_pki_path})/kube-proxy.conf #kube-proxy引导文件
  943. qps: 5
  944. clusterCIDR: ${pod_cidr} #修改为自己的Pod-CIDR
  945. configSyncPeriod: 15m0s
  946. conntrack:
  947. max: null
  948. maxPerCore: 32768
  949. min: 131072
  950. tcpCloseWaitTimeout: 1h0m0s
  951. tcpEstablishedTimeout: 24h0m0s
  952. enableProfiling: false
  953. healthzBindAddress: 0.0.0.0:10256
  954. hostnameOverride: ""
  955. iptables:
  956. masqueradeAll: false
  957. masqueradeBit: 14
  958. minSyncPeriod: 0s
  959. syncPeriod: 30s
  960. ipvs:
  961. masqueradeAll: true
  962. minSyncPeriod: 5s
  963. scheduler: "rr"
  964. syncPeriod: 30s
  965. kind: KubeProxyConfiguration
  966. metricsBindAddress: 127.0.0.1:10249
  967. mode: "ipvs"
  968. nodePortAddresses: null
  969. oomScoreAdj: -999
  970. portRange: ""
  971. udpIdleTimeout: 250ms
  972. EOF
  973. #启动 kube-proxy
  974. echo 启动 kube-proxy
  975. systemctl daemon-reload
  976. systemctl enable --now kube-proxy
  977. #systemctl status kube-proxy
  978. echo "================================================================================"
  979. sleep 1s
  980. cd $origin_dir
  981. #### 7、部署calico
  982. echo 部署calico
  983. #kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
  984. #kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
  985. mkdir -p k8s-components
  986. cd k8s-components
  987. calico_version=v3.25
  988. if [ ! -f calico-$calico_version.yaml ]; then
  989. curl https://docs.tigera.io/archive/$calico_version/manifests/calico.yaml -o calico-$calico_version.yaml
  990. fi
  991. cp calico-$calico_version.yaml calico.yaml
  992. # CIDR=$(grep -- "--cluster-cidr=" /usr/lib/systemd/system/kube-controller-manager.service | awk '{print $1}' | awk -F= '{print $2}')
  993. # echo $CIDR
  994. CIDR=$pod_cidr
  995. sed -i "s|# - name: CALICO_IPV4POOL_CIDR|- name: CALICO_IPV4POOL_CIDR|" calico.yaml
  996. sed -i "s|# value: \"192.168.0.0/16\"| value: \"$CIDR\"|" calico.yaml
  997. kubectl apply -f calico.yaml
  998. echo "================================================================================"
  999. sleep 1s
  1000. #### 8、部署coreDNS
  1001. echo 部署coreDNS
  1002. coredns=coredns-deployment
  1003. if [ ! -d $coredns ]; then
  1004. git clone https://github.com/coredns/deployment.git $coredns
  1005. fi
  1006. cd $coredns/kubernetes
  1007. #改为 service 网段的 第 10 个ip, 例如10.96.0.10
  1008. #./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
  1009. # 拼装 service 网段的 第 10 个ip
  1010. # 或直接用 $svc_dns
  1011. svc_ip=$(kubectl get svc | grep kubernetes | awk '{print $3}')
  1012. svc_dns_ip=${svc_ip}0
  1013. cmd="./deploy.sh -s -i ${svc_dns_ip} | kubectl apply -f -"
  1014. echo $cmd
  1015. eval $cmd