OILS / benchmarks / testdata / configure-helper.sh View on Github | oilshell.org

2029 lines, 1242 significant
1#!/usr/bin/env bash
2
3# Copyright 2016 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17# This script is for configuring kubernetes master and node instances. It is
18# uploaded in the manifests tar ball.
19
20# TODO: this script duplicates templating logic from cluster/saltbase/salt
21# using sed. It should use an actual template parser on the manifest
22# files.
23
24set -o errexit
25set -o nounset
26set -o pipefail
27
28function setup-os-params {
29 # Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
30 # /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
31 # now, set a generic core_pattern that users can work with.
32 echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
33}
34
35function config-ip-firewall {
36 echo "Configuring IP firewall rules"
37 # The GCI image has host firewall which drop most inbound/forwarded packets.
38 # We need to add rules to accept all TCP/UDP/ICMP packets.
39 if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
40 echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
41 iptables -A INPUT -w -p TCP -j ACCEPT
42 iptables -A INPUT -w -p UDP -j ACCEPT
43 iptables -A INPUT -w -p ICMP -j ACCEPT
44 fi
45 if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
46 echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
47 iptables -A FORWARD -w -p TCP -j ACCEPT
48 iptables -A FORWARD -w -p UDP -j ACCEPT
49 iptables -A FORWARD -w -p ICMP -j ACCEPT
50 fi
51
52 iptables -N KUBE-METADATA-SERVER
53 iptables -I FORWARD -p tcp -d 169.254.169.254 --dport 80 -j KUBE-METADATA-SERVER
54
55 if [[ -n "${KUBE_FIREWALL_METADATA_SERVER:-}" ]]; then
56 iptables -A KUBE-METADATA-SERVER -j DROP
57 fi
58}
59
60function create-dirs {
61 echo "Creating required directories"
62 mkdir -p /var/lib/kubelet
63 mkdir -p /etc/kubernetes/manifests
64 if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
65 mkdir -p /var/lib/kube-proxy
66 fi
67}
68
69# Formats the given device ($1) if needed and mounts it at given mount point
70# ($2).
71function safe-format-and-mount() {
72 device=$1
73 mountpoint=$2
74
75 # Format only if the disk is not already formatted.
76 if ! tune2fs -l "${device}" ; then
77 echo "Formatting '${device}'"
78 mkfs.ext4 -F "${device}"
79 fi
80
81 mkdir -p "${mountpoint}"
82 echo "Mounting '${device}' at '${mountpoint}'"
83 mount -o discard,defaults "${device}" "${mountpoint}"
84}
85
86# Local ssds, if present, are mounted at /mnt/disks/ssdN.
87function ensure-local-ssds() {
88 for ssd in /dev/disk/by-id/google-local-ssd-*; do
89 if [ -e "${ssd}" ]; then
90 ssdnum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
91 ssdmount="/mnt/disks/ssd${ssdnum}/"
92 mkdir -p ${ssdmount}
93 safe-format-and-mount "${ssd}" ${ssdmount}
94 echo "Mounted local SSD $ssd at ${ssdmount}"
95 chmod a+w ${ssdmount}
96 else
97 echo "No local SSD disks found."
98 fi
99 done
100}
101
102# Installs logrotate configuration files
103function setup-logrotate() {
104 mkdir -p /etc/logrotate.d/
105 # Configure log rotation for all logs in /var/log, which is where k8s services
106 # are configured to write their log files. Whenever logrotate is ran, this
107 # config will:
108 # * rotate the log file if its size is > 100Mb OR if one day has elapsed
109 # * save rotated logs into a gzipped timestamped backup
110 # * log file timestamp (controlled by 'dateformat') includes seconds too. This
111 # ensures that logrotate can generate unique logfiles during each rotation
112 # (otherwise it skips rotation if 'maxsize' is reached multiple times in a
113 # day).
114 # * keep only 5 old (rotated) logs, and will discard older logs.
115 cat > /etc/logrotate.d/allvarlogs <<EOF
116/var/log/*.log {
117 rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
118 copytruncate
119 missingok
120 notifempty
121 compress
122 maxsize ${LOGROTATE_MAX_SIZE:-100M}
123 daily
124 dateext
125 dateformat -%Y%m%d-%s
126 create 0644 root root
127}
128EOF
129
130}
131
132# Finds the master PD device; returns it in MASTER_PD_DEVICE
133function find-master-pd {
134 MASTER_PD_DEVICE=""
135 if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
136 return
137 fi
138 device_info=$(ls -l /dev/disk/by-id/google-master-pd)
139 relative_path=${device_info##* }
140 MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
141}
142
143# Mounts a persistent disk (formatting if needed) to store the persistent data
144# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
145# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
146# leave a directory be if it already exists.
147function mount-master-pd {
148 find-master-pd
149 if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
150 return
151 fi
152
153 echo "Mounting master-pd"
154 local -r pd_path="/dev/disk/by-id/google-master-pd"
155 local -r mount_point="/mnt/disks/master-pd"
156 # Format and mount the disk, create directories on it for all of the master's
157 # persistent data, and link them to where they're used.
158 mkdir -p "${mount_point}"
159 safe-format-and-mount "${pd_path}" "${mount_point}"
160 echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
161
162 # NOTE: These locations on the PD store persistent data, so to maintain
163 # upgradeability, these locations should not change. If they do, take care
164 # to maintain a migration path from these locations to whatever new
165 # locations.
166
167 # Contains all the data stored in etcd.
168 mkdir -m 700 -p "${mount_point}/var/etcd"
169 ln -s -f "${mount_point}/var/etcd" /var/etcd
170 mkdir -p /etc/srv
171 # Contains the dynamically generated apiserver auth certs and keys.
172 mkdir -p "${mount_point}/srv/kubernetes"
173 ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
174 # Directory for kube-apiserver to store SSH key (if necessary).
175 mkdir -p "${mount_point}/srv/sshproxy"
176 ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
177
178 if ! id etcd &>/dev/null; then
179 useradd -s /sbin/nologin -d /var/etcd etcd
180 fi
181 chown -R etcd "${mount_point}/var/etcd"
182 chgrp -R etcd "${mount_point}/var/etcd"
183}
184
185# append_or_replace_prefixed_line ensures:
186# 1. the specified file exists
187# 2. existing lines with the specified ${prefix} are removed
188# 3. a new line with the specified ${prefix}${suffix} is appended
189function append_or_replace_prefixed_line {
190 local -r file="${1:-}"
191 local -r prefix="${2:-}"
192 local -r suffix="${3:-}"
193 local -r dirname="$(dirname ${file})"
194 local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
195
196 touch "${file}"
197 awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${tmpfile}"
198 echo "${prefix}${suffix}" >> "${tmpfile}"
199 mv "${tmpfile}" "${file}"
200}
201
202function write-pki-data {
203 local data="${1}"
204 local path="${2}"
205 (umask 077; echo "${data}" | base64 --decode > "${path}")
206}
207
208function create-node-pki {
209 echo "Creating node pki files"
210
211 local -r pki_dir="/etc/srv/kubernetes/pki"
212 mkdir -p "${pki_dir}"
213
214 if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
215 CA_CERT_BUNDLE="${CA_CERT}"
216 fi
217
218 CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
219 write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
220
221 if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
222 KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
223 write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
224
225 KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
226 write-pki-data "${KUBELET_KEY}" "${KUBELET_KEY_PATH}"
227 fi
228
229 # TODO(mikedanese): remove this when we don't support downgrading to versions
230 # < 1.6.
231 ln -sf "${CA_CERT_BUNDLE_PATH}" /etc/srv/kubernetes/ca.crt
232}
233
234function create-master-pki {
235 echo "Creating master pki files"
236
237 local -r pki_dir="/etc/srv/kubernetes/pki"
238 mkdir -p "${pki_dir}"
239
240 CA_CERT_PATH="${pki_dir}/ca.crt"
241 write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
242
243 # this is not true on GKE
244 if [[ ! -z "${CA_KEY:-}" ]]; then
245 CA_KEY_PATH="${pki_dir}/ca.key"
246 write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
247 fi
248
249 if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
250 APISERVER_SERVER_CERT="${MASTER_CERT}"
251 APISERVER_SERVER_KEY="${MASTER_KEY}"
252 fi
253
254 APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
255 write-pki-data "${APISERVER_SERVER_CERT}" "${APISERVER_SERVER_CERT_PATH}"
256
257 APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
258 write-pki-data "${APISERVER_SERVER_KEY}" "${APISERVER_SERVER_KEY_PATH}"
259
260 if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
261 APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
262 APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
263 fi
264
265 APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
266 write-pki-data "${APISERVER_CLIENT_CERT}" "${APISERVER_CLIENT_CERT_PATH}"
267
268 APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
269 write-pki-data "${APISERVER_CLIENT_KEY}" "${APISERVER_CLIENT_KEY_PATH}"
270
271 if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
272 SERVICEACCOUNT_CERT="${MASTER_CERT}"
273 SERVICEACCOUNT_KEY="${MASTER_KEY}"
274 fi
275
276 SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
277 write-pki-data "${SERVICEACCOUNT_CERT}" "${SERVICEACCOUNT_CERT_PATH}"
278
279 SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
280 write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
281
282 # TODO(mikedanese): remove this when we don't support downgrading to versions
283 # < 1.6.
284 ln -sf "${APISERVER_SERVER_KEY_PATH}" /etc/srv/kubernetes/server.key
285 ln -sf "${APISERVER_SERVER_CERT_PATH}" /etc/srv/kubernetes/server.cert
286
287 if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]]; then
288 AGGREGATOR_CA_KEY_PATH="${pki_dir}/aggr_ca.key"
289 write-pki-data "${AGGREGATOR_CA_KEY}" "${AGGREGATOR_CA_KEY_PATH}"
290
291 REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
292 write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
293
294 PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
295 write-pki-data "${PROXY_CLIENT_KEY}" "${PROXY_CLIENT_KEY_PATH}"
296
297 PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
298 write-pki-data "${PROXY_CLIENT_CERT}" "${PROXY_CLIENT_CERT_PATH}"
299 fi
300}
301
302# After the first boot and on upgrade, these files exist on the master-pd
303# and should never be touched again (except perhaps an additional service
304# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
305# enabled. In that case the basic_auth.csv file will be rewritten to make
306# sure it matches the metadata source of truth.
307function create-master-auth {
308 echo "Creating master auth files"
309 local -r auth_dir="/etc/srv/kubernetes"
310 local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
311 if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
312 if [[ -e "${basic_auth_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
313 # If METADATA_CLOBBERS_CONFIG is true, we want to rewrite the file
314 # completely, because if we're changing KUBE_USER and KUBE_PASSWORD, we
315 # have nothing to match on. The file is replaced just below with
316 # append_or_replace_prefixed_line.
317 rm "${basic_auth_csv}"
318 fi
319 append_or_replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
320 fi
321
322 local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
323 if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
324 rm "${known_tokens_csv}"
325 fi
326 if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
327 append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
328 fi
329 if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
330 append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
331 fi
332 if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
333 append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
334 fi
335 if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
336 append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
337 fi
338 if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
339 append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
340 fi
341 local use_cloud_config="false"
342 cat <<EOF >/etc/gce.conf
343[global]
344EOF
345 if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
346 cat <<EOF >>/etc/gce.conf
347api-endpoint = ${GCE_API_ENDPOINT}
348EOF
349 fi
350 if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
351 use_cloud_config="true"
352 cat <<EOF >>/etc/gce.conf
353token-url = ${TOKEN_URL}
354token-body = ${TOKEN_BODY}
355EOF
356 fi
357 if [[ -n "${PROJECT_ID:-}" ]]; then
358 use_cloud_config="true"
359 cat <<EOF >>/etc/gce.conf
360project-id = ${PROJECT_ID}
361EOF
362 fi
363 if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
364 use_cloud_config="true"
365 cat <<EOF >>/etc/gce.conf
366network-project-id = ${NETWORK_PROJECT_ID}
367EOF
368 fi
369 if [[ -n "${NODE_NETWORK:-}" ]]; then
370 use_cloud_config="true"
371 cat <<EOF >>/etc/gce.conf
372network-name = ${NODE_NETWORK}
373EOF
374 fi
375 if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
376 use_cloud_config="true"
377 cat <<EOF >>/etc/gce.conf
378subnetwork-name = ${NODE_SUBNETWORK}
379EOF
380 fi
381 if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
382 use_cloud_config="true"
383 if [[ -n "${NODE_TAGS:-}" ]]; then
384 local -r node_tags="${NODE_TAGS}"
385 else
386 local -r node_tags="${NODE_INSTANCE_PREFIX}"
387 fi
388 cat <<EOF >>/etc/gce.conf
389node-tags = ${node_tags}
390node-instance-prefix = ${NODE_INSTANCE_PREFIX}
391EOF
392 fi
393 if [[ -n "${MULTIZONE:-}" ]]; then
394 use_cloud_config="true"
395 cat <<EOF >>/etc/gce.conf
396multizone = ${MULTIZONE}
397EOF
398 fi
399 if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
400 use_cloud_config="true"
401 cat <<EOF >>/etc/gce.conf
402alpha-features = ${GCE_ALPHA_FEATURES}
403EOF
404 fi
405 if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
406 use_cloud_config="true"
407 cat <<EOF >> /etc/gce.conf
408secondary-range-name = ${SECONDARY-RANGE-NAME}
409EOF
410 fi
411 if [[ "${use_cloud_config}" != "true" ]]; then
412 rm -f /etc/gce.conf
413 fi
414
415 if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
416 cat <<EOF >/etc/gcp_authn.config
417clusters:
418 - name: gcp-authentication-server
419 cluster:
420 server: ${GCP_AUTHN_URL}
421users:
422 - name: kube-apiserver
423 user:
424 auth-provider:
425 name: gcp
426current-context: webhook
427contexts:
428- context:
429 cluster: gcp-authentication-server
430 user: kube-apiserver
431 name: webhook
432EOF
433 fi
434
435 if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
436 cat <<EOF >/etc/gcp_authz.config
437clusters:
438 - name: gcp-authorization-server
439 cluster:
440 server: ${GCP_AUTHZ_URL}
441users:
442 - name: kube-apiserver
443 user:
444 auth-provider:
445 name: gcp
446current-context: webhook
447contexts:
448- context:
449 cluster: gcp-authorization-server
450 user: kube-apiserver
451 name: webhook
452EOF
453 fi
454
455if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
456 # This is the config file for the image review webhook.
457 cat <<EOF >/etc/gcp_image_review.config
458clusters:
459 - name: gcp-image-review-server
460 cluster:
461 server: ${GCP_IMAGE_VERIFICATION_URL}
462users:
463 - name: kube-apiserver
464 user:
465 auth-provider:
466 name: gcp
467current-context: webhook
468contexts:
469- context:
470 cluster: gcp-image-review-server
471 user: kube-apiserver
472 name: webhook
473EOF
474 # This is the config for the image review admission controller.
475 cat <<EOF >/etc/admission_controller.config
476imagePolicy:
477 kubeConfigFile: /etc/gcp_image_review.config
478 allowTTL: 30
479 denyTTL: 30
480 retryBackoff: 500
481 defaultAllow: true
482EOF
483 fi
484}
485
486# Write the config for the audit policy.
487function create-master-audit-policy {
488 local -r path="${1}"
489 local -r policy="${2:-}"
490
491 if [[ -n "${policy}" ]]; then
492 echo "${policy}" > "${path}"
493 return
494 fi
495
496 # Known api groups
497 local -r known_apis='
498 - group: "" # core
499 - group: "admissionregistration.k8s.io"
500 - group: "apiextensions.k8s.io"
501 - group: "apiregistration.k8s.io"
502 - group: "apps"
503 - group: "authentication.k8s.io"
504 - group: "authorization.k8s.io"
505 - group: "autoscaling"
506 - group: "batch"
507 - group: "certificates.k8s.io"
508 - group: "extensions"
509 - group: "metrics.k8s.io"
510 - group: "networking.k8s.io"
511 - group: "policy"
512 - group: "rbac.authorization.k8s.io"
513 - group: "settings.k8s.io"
514 - group: "storage.k8s.io"'
515
516 cat <<EOF >"${path}"
517apiVersion: audit.k8s.io/v1beta1
518kind: Policy
519rules:
520 # The following requests were manually identified as high-volume and low-risk,
521 # so drop them.
522 - level: None
523 users: ["system:kube-proxy"]
524 verbs: ["watch"]
525 resources:
526 - group: "" # core
527 resources: ["endpoints", "services", "services/status"]
528 - level: None
529 # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
530 # TODO(#46983): Change this to the ingress controller service account.
531 users: ["system:unsecured"]
532 namespaces: ["kube-system"]
533 verbs: ["get"]
534 resources:
535 - group: "" # core
536 resources: ["configmaps"]
537 - level: None
538 users: ["kubelet"] # legacy kubelet identity
539 verbs: ["get"]
540 resources:
541 - group: "" # core
542 resources: ["nodes", "nodes/status"]
543 - level: None
544 userGroups: ["system:nodes"]
545 verbs: ["get"]
546 resources:
547 - group: "" # core
548 resources: ["nodes", "nodes/status"]
549 - level: None
550 users:
551 - system:kube-controller-manager
552 - system:kube-scheduler
553 - system:serviceaccount:kube-system:endpoint-controller
554 verbs: ["get", "update"]
555 namespaces: ["kube-system"]
556 resources:
557 - group: "" # core
558 resources: ["endpoints"]
559 - level: None
560 users: ["system:apiserver"]
561 verbs: ["get"]
562 resources:
563 - group: "" # core
564 resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
565 # Don't log HPA fetching metrics.
566 - level: None
567 users:
568 - system:kube-controller-manager
569 verbs: ["get", "list"]
570 resources:
571 - group: "metrics.k8s.io"
572
573 # Don't log these read-only URLs.
574 - level: None
575 nonResourceURLs:
576 - /healthz*
577 - /version
578 - /swagger*
579
580 # Don't log events requests.
581 - level: None
582 resources:
583 - group: "" # core
584 resources: ["events"]
585
586 # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
587 - level: Request
588 users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
589 verbs: ["update","patch"]
590 resources:
591 - group: "" # core
592 resources: ["nodes/status", "pods/status"]
593 omitStages:
594 - "RequestReceived"
595 - level: Request
596 userGroups: ["system:nodes"]
597 verbs: ["update","patch"]
598 resources:
599 - group: "" # core
600 resources: ["nodes/status", "pods/status"]
601 omitStages:
602 - "RequestReceived"
603
604 # deletecollection calls can be large, don't log responses for expected namespace deletions
605 - level: Request
606 users: ["system:serviceaccount:kube-system:namespace-controller"]
607 verbs: ["deletecollection"]
608 omitStages:
609 - "RequestReceived"
610
611 # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
612 # so only log at the Metadata level.
613 - level: Metadata
614 resources:
615 - group: "" # core
616 resources: ["secrets", "configmaps"]
617 - group: authentication.k8s.io
618 resources: ["tokenreviews"]
619 omitStages:
620 - "RequestReceived"
621 # Get repsonses can be large; skip them.
622 - level: Request
623 verbs: ["get", "list", "watch"]
624 resources: ${known_apis}
625 omitStages:
626 - "RequestReceived"
627 # Default level for known APIs
628 - level: RequestResponse
629 resources: ${known_apis}
630 omitStages:
631 - "RequestReceived"
632 # Default level for all other requests.
633 - level: Metadata
634 omitStages:
635 - "RequestReceived"
636EOF
637}
638
639# Writes the configuration file used by the webhook advanced auditing backend.
640function create-master-audit-webhook-config {
641 local -r path="${1}"
642
643 if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
644 # The webhook config file is a kubeconfig file describing the webhook endpoint.
645 cat <<EOF >"${path}"
646clusters:
647 - name: gcp-audit-server
648 cluster:
649 server: ${GCP_AUDIT_URL}
650users:
651 - name: kube-apiserver
652 user:
653 auth-provider:
654 name: gcp
655current-context: webhook
656contexts:
657- context:
658 cluster: gcp-audit-server
659 user: kube-apiserver
660 name: webhook
661EOF
662 fi
663}
664
665# Arg 1: the IP address of the API server
666function create-kubelet-kubeconfig() {
667 local apiserver_address="${1}"
668 if [[ -z "${apiserver_address}" ]]; then
669 echo "Must provide API server address to create Kubelet kubeconfig file!"
670 exit 1
671 fi
672 echo "Creating kubelet kubeconfig file"
673 cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
674apiVersion: v1
675kind: Config
676users:
677- name: kubelet
678 user:
679 client-certificate: ${KUBELET_CERT_PATH}
680 client-key: ${KUBELET_KEY_PATH}
681clusters:
682- name: local
683 cluster:
684 server: https://${apiserver_address}
685 certificate-authority: ${CA_CERT_BUNDLE_PATH}
686contexts:
687- context:
688 cluster: local
689 user: kubelet
690 name: service-account-context
691current-context: service-account-context
692EOF
693}
694
695# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
696# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
697# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
698# should register to the apiserver.
699function create-master-kubelet-auth {
700 # Only configure the kubelet on the master if the required variables are
701 # set in the environment.
702 if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
703 REGISTER_MASTER_KUBELET="true"
704 create-kubelet-kubeconfig ${KUBELET_APISERVER}
705 fi
706}
707
708function create-kubeproxy-user-kubeconfig {
709 echo "Creating kube-proxy user kubeconfig file"
710 cat <<EOF >/var/lib/kube-proxy/kubeconfig
711apiVersion: v1
712kind: Config
713users:
714- name: kube-proxy
715 user:
716 token: ${KUBE_PROXY_TOKEN}
717clusters:
718- name: local
719 cluster:
720 certificate-authority-data: ${CA_CERT_BUNDLE}
721contexts:
722- context:
723 cluster: local
724 user: kube-proxy
725 name: service-account-context
726current-context: service-account-context
727EOF
728}
729
730function create-kubecontrollermanager-kubeconfig {
731 echo "Creating kube-controller-manager kubeconfig file"
732 mkdir -p /etc/srv/kubernetes/kube-controller-manager
733 cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
734apiVersion: v1
735kind: Config
736users:
737- name: kube-controller-manager
738 user:
739 token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
740clusters:
741- name: local
742 cluster:
743 insecure-skip-tls-verify: true
744 server: https://localhost:443
745contexts:
746- context:
747 cluster: local
748 user: kube-controller-manager
749 name: service-account-context
750current-context: service-account-context
751EOF
752}
753
754function create-kubescheduler-kubeconfig {
755 echo "Creating kube-scheduler kubeconfig file"
756 mkdir -p /etc/srv/kubernetes/kube-scheduler
757 cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
758apiVersion: v1
759kind: Config
760users:
761- name: kube-scheduler
762 user:
763 token: ${KUBE_SCHEDULER_TOKEN}
764clusters:
765- name: local
766 cluster:
767 insecure-skip-tls-verify: true
768 server: https://localhost:443
769contexts:
770- context:
771 cluster: local
772 user: kube-scheduler
773 name: kube-scheduler
774current-context: kube-scheduler
775EOF
776}
777
778function create-node-problem-detector-kubeconfig {
779 echo "Creating node-problem-detector kubeconfig file"
780 mkdir -p /var/lib/node-problem-detector
781 cat <<EOF >/var/lib/node-problem-detector/kubeconfig
782apiVersion: v1
783kind: Config
784users:
785- name: node-problem-detector
786 user:
787 token: ${NODE_PROBLEM_DETECTOR_TOKEN}
788clusters:
789- name: local
790 cluster:
791 certificate-authority-data: ${CA_CERT}
792contexts:
793- context:
794 cluster: local
795 user: node-problem-detector
796 name: service-account-context
797current-context: service-account-context
798EOF
799}
800
801function create-master-etcd-auth {
802 if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
803 local -r auth_dir="/etc/srv/kubernetes"
804 echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
805 echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
806 echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
807 fi
808}
809
810function assemble-docker-flags {
811 echo "Assemble docker command line flags"
812 local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
813 if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
814 docker_opts+=" --log-level=debug"
815 else
816 docker_opts+=" --log-level=warn"
817 fi
818 local use_net_plugin="true"
819 if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
820 # set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
821 docker_opts+=" --bip=169.254.123.1/24"
822 else
823 use_net_plugin="false"
824 docker_opts+=" --bridge=cbr0"
825 fi
826
827 # Decide whether to enable a docker registry mirror. This is taken from
828 # the "kube-env" metadata value.
829 if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
830 echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
831 docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
832 fi
833
834 # Configure docker logging
835 docker_opts+=" --log-driver=${DOCKER_LOG_DRIVER:-json-file}"
836 docker_opts+=" --log-opt=max-size=${DOCKER_LOG_MAX_SIZE:-10m}"
837 docker_opts+=" --log-opt=max-file=${DOCKER_LOG_MAX_FILE:-5}"
838
839 echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
840
841 if [[ "${use_net_plugin}" == "true" ]]; then
842 # If using a network plugin, extend the docker configuration to always remove
843 # the network checkpoint to avoid corrupt checkpoints.
844 # (https://github.com/docker/docker/issues/18283).
845 echo "Extend the docker.service configuration to remove the network checkpiont"
846 mkdir -p /etc/systemd/system/docker.service.d
847 cat <<EOF >/etc/systemd/system/docker.service.d/01network.conf
848[Service]
849ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
850EOF
851 fi
852
853 # Ensure TasksMax is sufficient for docker.
854 # (https://github.com/kubernetes/kubernetes/issues/51977)
855 echo "Extend the docker.service configuration to set a higher pids limit"
856 mkdir -p /etc/systemd/system/docker.service.d
857 cat <<EOF >/etc/systemd/system/docker.service.d/02tasksmax.conf
858[Service]
859TasksMax=infinity
860EOF
861
862 systemctl daemon-reload
863 echo "Docker command line is updated. Restart docker to pick it up"
864 systemctl restart docker
865}
866
867# This function assembles the kubelet systemd service file and starts it
868# using systemctl.
869function start-kubelet {
870 echo "Start kubelet"
871
872 local -r kubelet_cert_dir="/var/lib/kubelet/pki/"
873 mkdir -p "${kubelet_cert_dir}"
874
875 local kubelet_bin="${KUBE_HOME}/bin/kubelet"
876 local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
877 local -r builtin_kubelet="/usr/bin/kubelet"
878 if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
879 # Determine which binary to use on test clusters. We use the built-in
880 # version only if the downloaded version is the same as the built-in
881 # version. This allows GCI to run some of the e2e tests to qualify the
882 # built-in kubelet.
883 if [[ -x "${builtin_kubelet}" ]]; then
884 local -r builtin_version="$("${builtin_kubelet}" --version=true | cut -f2 -d " ")"
885 if [[ "${builtin_version}" == "${version}" ]]; then
886 kubelet_bin="${builtin_kubelet}"
887 fi
888 fi
889 fi
890 echo "Using kubelet binary at ${kubelet_bin}"
891 local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
892 flags+=" --allow-privileged=true"
893 flags+=" --cgroup-root=/"
894 flags+=" --cloud-provider=gce"
895 flags+=" --cluster-dns=${DNS_SERVER_IP}"
896 flags+=" --cluster-domain=${DNS_DOMAIN}"
897 flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
898 flags+=" --experimental-mounter-path=${CONTAINERIZED_MOUNTER_HOME}/mounter"
899 flags+=" --experimental-check-node-capabilities-before-mount=true"
900 flags+=" --cert-dir=${kubelet_cert_dir}"
901
902 if [[ -n "${KUBELET_PORT:-}" ]]; then
903 flags+=" --port=${KUBELET_PORT}"
904 fi
905 if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
906 flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
907 flags+=" --enable-debugging-handlers=false"
908 flags+=" --hairpin-mode=none"
909 if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
910 #TODO(mikedanese): allow static pods to start before creating a client
911 #flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
912 #flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
913 flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
914 flags+=" --register-schedulable=false"
915 else
916 # Standalone mode (not widely used?)
917 flags+=" --pod-cidr=${MASTER_IP_RANGE}"
918 fi
919 else # For nodes
920 flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
921 flags+=" --enable-debugging-handlers=true"
922 flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
923 flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
924 if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
925 [[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
926 [[ "${HAIRPIN_MODE:-}" == "none" ]]; then
927 flags+=" --hairpin-mode=${HAIRPIN_MODE}"
928 fi
929 flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}"
930 fi
931 # Network plugin
932 if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
933 flags+=" --cni-bin-dir=/home/kubernetes/bin"
934 if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
935 # Calico uses CNI always.
936 if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" ]]; then
937 flags+=" --network-plugin=${NETWORK_PROVIDER}"
938 else
939 flags+=" --network-plugin=cni"
940 fi
941 else
942 # Otherwise use the configured value.
943 flags+=" --network-plugin=${NETWORK_PROVIDER}"
944 fi
945 fi
946 if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
947 flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
948 fi
949 # FlexVolume plugin
950 if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
951 flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
952 fi
953 if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
954 flags+=" --manifest-url=${MANIFEST_URL}"
955 flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
956 fi
957 if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
958 flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
959 fi
960 local node_labels=""
961 if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${KUBERNETES_MASTER:-}" != "true" ]]; then
962 # Add kube-proxy daemonset label to node to avoid situation during cluster
963 # upgrade/downgrade when there are two instances of kube-proxy running on a node.
964 node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
965 fi
966 if [[ -n "${NODE_LABELS:-}" ]]; then
967 node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
968 fi
969 if [[ -n "${node_labels:-}" ]]; then
970 flags+=" --node-labels=${node_labels}"
971 fi
972 if [[ -n "${NODE_TAINTS:-}" ]]; then
973 flags+=" --register-with-taints=${NODE_TAINTS}"
974 fi
975 if [[ -n "${EVICTION_HARD:-}" ]]; then
976 flags+=" --eviction-hard=${EVICTION_HARD}"
977 fi
978 if [[ -n "${FEATURE_GATES:-}" ]]; then
979 flags+=" --feature-gates=${FEATURE_GATES}"
980 fi
981 if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then
982 flags+=" --rotate-certificates=true"
983 fi
984
985 local -r kubelet_env_file="/etc/default/kubelet"
986 echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
987
988 # Write the systemd service file for kubelet.
989 cat <<EOF >/etc/systemd/system/kubelet.service
990[Unit]
991Description=Kubernetes kubelet
992Requires=network-online.target
993After=network-online.target
994
995[Service]
996Restart=always
997RestartSec=10
998EnvironmentFile=${kubelet_env_file}
999ExecStart=${kubelet_bin} \$KUBELET_OPTS
1000
1001[Install]
1002WantedBy=multi-user.target
1003EOF
1004
1005 # Flush iptables nat table
1006 iptables -t nat -F || true
1007
1008 systemctl start kubelet.service
1009}
1010
1011# This function assembles the node problem detector systemd service file and
1012# starts it using systemctl.
1013function start-node-problem-detector {
1014 echo "Start node problem detector"
1015 local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
1016 local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
1017 local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
1018 echo "Using node problem detector binary at ${npd_bin}"
1019 local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
1020 flags+=" --logtostderr"
1021 flags+=" --system-log-monitors=${km_config},${dm_config}"
1022 flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
1023 local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
1024 flags+=" --port=${npd_port}"
1025
1026 # Write the systemd service file for node problem detector.
1027 cat <<EOF >/etc/systemd/system/node-problem-detector.service
1028[Unit]
1029Description=Kubernetes node problem detector
1030Requires=network-online.target
1031After=network-online.target
1032
1033[Service]
1034Restart=always
1035RestartSec=10
1036ExecStart=${npd_bin} ${flags}
1037
1038[Install]
1039WantedBy=multi-user.target
1040EOF
1041
1042 systemctl start node-problem-detector.service
1043}
1044
1045# Create the log file and set its properties.
1046#
1047# $1 is the file to create.
1048function prepare-log-file {
1049 touch $1
1050 chmod 644 $1
1051 chown root:root $1
1052}
1053
1054# Prepares parameters for kube-proxy manifest.
1055# $1 source path of kube-proxy manifest.
1056function prepare-kube-proxy-manifest-variables {
1057 local -r src_file=$1;
1058
1059 remove-salt-config-comments "${src_file}"
1060
1061 local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
1062 local kube_docker_registry="gcr.io/google_containers"
1063 if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
1064 kube_docker_registry=${KUBE_DOCKER_REGISTRY}
1065 fi
1066 local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
1067 local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
1068 local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
1069 if [[ -n "${FEATURE_GATES:-}" ]]; then
1070 params+=" --feature-gates=${FEATURE_GATES}"
1071 fi
1072 params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
1073 if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
1074 params+=" ${KUBEPROXY_TEST_ARGS}"
1075 fi
1076 local container_env=""
1077 local kube_cache_mutation_detector_env_name=""
1078 local kube_cache_mutation_detector_env_value=""
1079 if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
1080 container_env="env:"
1081 kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
1082 kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
1083 fi
1084 local pod_priority=""
1085 if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then
1086 pod_priority="priorityClassName: system-node-critical"
1087 fi
1088 sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
1089 sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
1090 sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
1091 sed -i -e "s@{{params}}@${params}@g" ${src_file}
1092 sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
1093 sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
1094 sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
1095 sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
1096 sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
1097 sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
1098 sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
1099 if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
1100 sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
1101 fi
1102}
1103
1104# Starts kube-proxy static pod.
1105function start-kube-proxy {
1106 echo "Start kube-proxy static pod"
1107 prepare-log-file /var/log/kube-proxy.log
1108 local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
1109 prepare-kube-proxy-manifest-variables "${src_file}"
1110
1111 cp "${src_file}" /etc/kubernetes/manifests
1112}
1113
1114# Replaces the variables in the etcd manifest file with the real values, and then
1115# copy the file to the manifest dir
1116# $1: value for variable 'suffix'
1117# $2: value for variable 'port'
1118# $3: value for variable 'server_port'
1119# $4: value for variable 'cpulimit'
1120# $5: pod name, which should be either etcd or etcd-events
1121function prepare-etcd-manifest {
1122 local host_name=$(hostname)
1123 local etcd_cluster=""
1124 local cluster_state="new"
1125 local etcd_protocol="http"
1126 local etcd_creds=""
1127
1128 if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
1129 cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
1130 fi
1131 if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
1132 etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
1133 etcd_protocol="https"
1134 fi
1135
1136 for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
1137 etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
1138 if [[ -n "${etcd_cluster}" ]]; then
1139 etcd_cluster+=","
1140 fi
1141 etcd_cluster+="${etcd_host}"
1142 done
1143
1144 local -r temp_file="/tmp/$5"
1145 cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
1146 remove-salt-config-comments "${temp_file}"
1147 sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
1148 sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
1149 sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
1150 sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
1151 sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
1152 sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
1153 sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
1154 # Get default storage backend from manifest file.
1155 local -r default_storage_backend=$(cat "${temp_file}" | \
1156 grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
1157 sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
1158 if [[ -n "${STORAGE_BACKEND:-}" ]]; then
1159 sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
1160 else
1161 sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
1162 fi
1163 if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
1164 sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}"
1165 else
1166 sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
1167 fi
1168 sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
1169 if [[ -n "${ETCD_IMAGE:-}" ]]; then
1170 sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
1171 else
1172 sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
1173 fi
1174 if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
1175 sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
1176 else
1177 sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
1178 fi
1179 sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
1180 sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
1181 if [[ -n "${ETCD_VERSION:-}" ]]; then
1182 sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
1183 else
1184 sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
1185 fi
1186 # Replace the volume host path.
1187 sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
1188 mv "${temp_file}" /etc/kubernetes/manifests
1189}
1190
1191function start-etcd-empty-dir-cleanup-pod {
1192 cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
1193}
1194
1195# Starts etcd server pod (and etcd-events pod if needed).
1196# More specifically, it prepares dirs and files, sets the variable value
1197# in the manifests, and copies them to /etc/kubernetes/manifests.
1198function start-etcd-servers {
1199 echo "Start etcd pods"
1200 if [[ -d /etc/etcd ]]; then
1201 rm -rf /etc/etcd
1202 fi
1203 if [[ -e /etc/default/etcd ]]; then
1204 rm -f /etc/default/etcd
1205 fi
1206 if [[ -e /etc/systemd/system/etcd.service ]]; then
1207 rm -f /etc/systemd/system/etcd.service
1208 fi
1209 if [[ -e /etc/init.d/etcd ]]; then
1210 rm -f /etc/init.d/etcd
1211 fi
1212 prepare-log-file /var/log/etcd.log
1213 prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
1214
1215 prepare-log-file /var/log/etcd-events.log
1216 prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
1217}
1218
1219# Calculates the following variables based on env variables, which will be used
1220# by the manifests of several kube-master components.
1221# CLOUD_CONFIG_OPT
1222# CLOUD_CONFIG_VOLUME
1223# CLOUD_CONFIG_MOUNT
1224# DOCKER_REGISTRY
1225function compute-master-manifest-variables {
1226 CLOUD_CONFIG_OPT=""
1227 CLOUD_CONFIG_VOLUME=""
1228 CLOUD_CONFIG_MOUNT=""
1229 if [[ -f /etc/gce.conf ]]; then
1230 CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
1231 CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
1232 CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
1233 fi
1234 DOCKER_REGISTRY="gcr.io/google_containers"
1235 if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
1236 DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
1237 fi
1238}
1239
1240# A helper function that bind mounts kubelet dirs for running mount in a chroot
1241function prepare-mounter-rootfs {
1242 echo "Prepare containerized mounter"
1243 mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
1244 mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
1245 CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
1246 mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
1247 mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
1248 mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
1249 mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
1250 cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
1251}
1252
1253# A helper function for removing salt configuration and comments from a file.
1254# This is mainly for preparing a manifest file.
1255#
1256# $1: Full path of the file to manipulate
1257function remove-salt-config-comments {
1258 # Remove salt configuration.
1259 sed -i "/^[ |\t]*{[#|%]/d" $1
1260 # Remove comments.
1261 sed -i "/^[ |\t]*#/d" $1
1262}
1263
1264# Starts kubernetes apiserver.
1265# It prepares the log file, loads the docker image, calculates variables, sets them
1266# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
1267#
1268# Assumed vars (which are calculated in function compute-master-manifest-variables)
1269# CLOUD_CONFIG_OPT
1270# CLOUD_CONFIG_VOLUME
1271# CLOUD_CONFIG_MOUNT
1272# DOCKER_REGISTRY
1273function start-kube-apiserver {
1274 echo "Start kubernetes api-server"
1275 prepare-log-file /var/log/kube-apiserver.log
1276 prepare-log-file /var/log/kube-apiserver-audit.log
1277
1278 # Calculate variables and assemble the command line.
1279 local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
1280 params+=" --address=127.0.0.1"
1281 params+=" --allow-privileged=true"
1282 params+=" --cloud-provider=gce"
1283 params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
1284 params+=" --etcd-servers=http://127.0.0.1:2379"
1285 params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
1286 params+=" --secure-port=443"
1287 params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
1288 params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
1289 if [[ -s "${REQUESTHEADER_CA_CERT_PATH:-}" ]]; then
1290 params+=" --requestheader-client-ca-file=${REQUESTHEADER_CA_CERT_PATH}"
1291 params+=" --requestheader-allowed-names=aggregator"
1292 params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-"
1293 params+=" --requestheader-group-headers=X-Remote-Group"
1294 params+=" --requestheader-username-headers=X-Remote-User"
1295 params+=" --proxy-client-cert-file=${PROXY_CLIENT_CERT_PATH}"
1296 params+=" --proxy-client-key-file=${PROXY_CLIENT_KEY_PATH}"
1297 fi
1298 params+=" --enable-aggregator-routing=true"
1299 if [[ -e "${APISERVER_CLIENT_CERT_PATH}" ]] && [[ -e "${APISERVER_CLIENT_KEY_PATH}" ]]; then
1300 params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}"
1301 params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}"
1302 fi
1303 if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]]; then
1304 params+=" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}"
1305 fi
1306 params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
1307 if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
1308 params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
1309 fi
1310 if [[ -n "${STORAGE_BACKEND:-}" ]]; then
1311 params+=" --storage-backend=${STORAGE_BACKEND}"
1312 fi
1313 if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
1314 params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
1315 fi
1316 if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
1317 params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
1318 fi
1319 if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
1320 params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
1321 fi
1322 if [[ -n "${NUM_NODES:-}" ]]; then
1323 # If the cluster is large, increase max-requests-inflight limit in apiserver.
1324 if [[ "${NUM_NODES}" -ge 1000 ]]; then
1325 params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
1326 fi
1327 # Set amount of memory available for apiserver based on number of nodes.
1328 # TODO: Once we start setting proper requests and limits for apiserver
1329 # we should reuse the same logic here instead of current heuristic.
1330 params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
1331 fi
1332 if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
1333 params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
1334 fi
1335 if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
1336 params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
1337 fi
1338
1339 local audit_policy_config_mount=""
1340 local audit_policy_config_volume=""
1341 local audit_webhook_config_mount=""
1342 local audit_webhook_config_volume=""
1343 if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
1344 # We currently only support enabling with a fixed path and with built-in log
1345 # rotation "disabled" (large value) so it behaves like kube-apiserver.log.
1346 # External log rotation should be set up the same as for kube-apiserver.log.
1347 params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
1348 params+=" --audit-log-maxage=0"
1349 params+=" --audit-log-maxbackup=0"
1350 # Lumberjack doesn't offer any way to disable size-based rotation. It also
1351 # has an in-memory counter that doesn't notice if you truncate the file.
1352 # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
1353 # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
1354 # never restarts. Please manually restart apiserver before this time.
1355 params+=" --audit-log-maxsize=2000000000"
1356 # Disable AdvancedAuditing enabled by default
1357 if [[ -z "${FEATURE_GATES:-}" ]]; then
1358 FEATURE_GATES="AdvancedAuditing=false"
1359 else
1360 FEATURE_GATES="${FEATURE_GATES},AdvancedAuditing=false"
1361 fi
1362 elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
1363 local -r audit_policy_file="/etc/audit_policy.config"
1364 params+=" --audit-policy-file=${audit_policy_file}"
1365 # Create the audit policy file, and mount it into the apiserver pod.
1366 create-master-audit-policy "${audit_policy_file}" "${ADVANCED_AUDIT_POLICY:-}"
1367 audit_policy_config_mount="{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"${audit_policy_file}\", \"readOnly\": true},"
1368 audit_policy_config_volume="{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"${audit_policy_file}\", \"type\": \"FileOrCreate\"}},"
1369
1370 if [[ "${ADVANCED_AUDIT_BACKEND:-log}" == *"log"* ]]; then
1371 # The advanced audit log backend config matches the basic audit log config.
1372 params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
1373 params+=" --audit-log-maxage=0"
1374 params+=" --audit-log-maxbackup=0"
1375 # Lumberjack doesn't offer any way to disable size-based rotation. It also
1376 # has an in-memory counter that doesn't notice if you truncate the file.
1377 # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
1378 # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
1379 # never restarts. Please manually restart apiserver before this time.
1380 params+=" --audit-log-maxsize=2000000000"
1381 fi
1382 if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
1383 params+=" --audit-webhook-mode=batch"
1384
1385 # Create the audit webhook config file, and mount it into the apiserver pod.
1386 local -r audit_webhook_config_file="/etc/audit_webhook.config"
1387 params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
1388 create-master-audit-webhook-config "${audit_webhook_config_file}"
1389 audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
1390 audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},"
1391 fi
1392 fi
1393
1394 if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
1395 params+=" --enable-logs-handler=false"
1396 fi
1397
1398 local admission_controller_config_mount=""
1399 local admission_controller_config_volume=""
1400 local image_policy_webhook_config_mount=""
1401 local image_policy_webhook_config_volume=""
1402 if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
1403 params+=" --admission-control=${ADMISSION_CONTROL}"
1404 if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then
1405 params+=" --admission-control-config-file=/etc/admission_controller.config"
1406 # Mount the file to configure admission controllers if ImagePolicyWebhook is set.
1407 admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},"
1408 admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}},"
1409 # Mount the file to configure the ImagePolicyWebhook's webhook.
1410 image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},"
1411 image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}},"
1412 fi
1413 fi
1414
1415 if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
1416 params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
1417 fi
1418 if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
1419 params+=" --runtime-config=${RUNTIME_CONFIG}"
1420 fi
1421 if [[ -n "${FEATURE_GATES:-}" ]]; then
1422 params+=" --feature-gates=${FEATURE_GATES}"
1423 fi
1424 if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
1425 local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
1426 params+=" --advertise-address=${vm_external_ip}"
1427 params+=" --ssh-user=${PROXY_SSH_USER}"
1428 params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
1429 elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
1430 params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
1431 fi
1432
1433 local webhook_authn_config_mount=""
1434 local webhook_authn_config_volume=""
1435 if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
1436 params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
1437 webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
1438 webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}},"
1439 fi
1440
1441
1442 local authorization_mode="Node,RBAC"
1443 local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
1444
1445 # Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
1446 if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then
1447 echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
1448 # Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
1449 if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
1450 local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
1451 remove-salt-config-comments "${abac_policy_json}"
1452 if [[ -n "${KUBE_USER:-}" ]]; then
1453 sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
1454 else
1455 sed -i -e "/{{kube_user}}/d" "${abac_policy_json}"
1456 fi
1457 cp "${abac_policy_json}" /etc/srv/kubernetes/
1458 fi
1459
1460 params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl"
1461 authorization_mode+=",ABAC"
1462 fi
1463
1464 local webhook_config_mount=""
1465 local webhook_config_volume=""
1466 if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
1467 authorization_mode+=",Webhook"
1468 params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
1469 webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
1470 webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},"
1471 fi
1472 params+=" --authorization-mode=${authorization_mode}"
1473
1474 local container_env=""
1475 if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
1476 container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
1477 fi
1478 if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
1479 if [[ -n "${container_env}" ]]; then
1480 container_env="${container_env}, "
1481 fi
1482 container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
1483 fi
1484 if [[ -n "${container_env}" ]]; then
1485 container_env="\"env\":[{${container_env}}],"
1486 fi
1487
1488 if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
1489 local encryption_provider_config_path="/etc/srv/kubernetes/encryption-provider-config.yml"
1490 if [[ -n "${GOOGLE_CLOUD_KMS_CONFIG_FILE_NAME:-}" && -n "${GOOGLE_CLOUD_KMS_CONFIG:-}" ]]; then
1491 echo "${GOOGLE_CLOUD_KMS_CONFIG}" | base64 --decode > "${GOOGLE_CLOUD_KMS_CONFIG_FILE_NAME}"
1492 fi
1493
1494 echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
1495 params+=" --experimental-encryption-provider-config=${encryption_provider_config_path}"
1496 fi
1497
1498 src_file="${src_dir}/kube-apiserver.manifest"
1499 remove-salt-config-comments "${src_file}"
1500 # Evaluate variables.
1501 local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
1502 sed -i -e "s@{{params}}@${params}@g" "${src_file}"
1503 sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
1504 sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
1505 sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
1506 sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
1507 sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
1508 sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
1509 sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}"
1510 sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
1511 sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
1512 sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
1513 sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
1514 sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
1515 sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
1516 sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}"
1517 sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}"
1518 sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}"
1519 sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}"
1520 sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}"
1521 sed -i -e "s@{{audit_webhook_config_mount}}@${audit_webhook_config_mount}@g" "${src_file}"
1522 sed -i -e "s@{{audit_webhook_config_volume}}@${audit_webhook_config_volume}@g" "${src_file}"
1523 sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}"
1524 sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
1525 sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
1526 sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
1527 cp "${src_file}" /etc/kubernetes/manifests
1528}
1529
1530# Starts kubernetes controller manager.
1531# It prepares the log file, loads the docker image, calculates variables, sets them
1532# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
1533#
1534# Assumed vars (which are calculated in function compute-master-manifest-variables)
1535# CLOUD_CONFIG_OPT
1536# CLOUD_CONFIG_VOLUME
1537# CLOUD_CONFIG_MOUNT
1538# DOCKER_REGISTRY
1539function start-kube-controller-manager {
1540 echo "Start kubernetes controller-manager"
1541 create-kubecontrollermanager-kubeconfig
1542 prepare-log-file /var/log/kube-controller-manager.log
1543 # Calculate variables and assemble the command line.
1544 local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
1545 params+=" --use-service-account-credentials"
1546 params+=" --cloud-provider=gce"
1547 params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
1548 params+=" --root-ca-file=${CA_CERT_BUNDLE_PATH}"
1549 params+=" --service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}"
1550 if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
1551 params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
1552 fi
1553 if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
1554 params+=" --cluster-name=${INSTANCE_PREFIX}"
1555 fi
1556 if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
1557 params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
1558 fi
1559 if [[ -n "${CA_KEY:-}" ]]; then
1560 params+=" --cluster-signing-cert-file=${CA_CERT_PATH}"
1561 params+=" --cluster-signing-key-file=${CA_KEY_PATH}"
1562 fi
1563 if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
1564 params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
1565 fi
1566 if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
1567 params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
1568 fi
1569 if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
1570 params+=" --allocate-node-cidrs=true"
1571 elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
1572 params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
1573 fi
1574 if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
1575 params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
1576 fi
1577 if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
1578 params+=" --cidr-allocator-type=CloudAllocator"
1579 params+=" --configure-cloud-routes=false"
1580 fi
1581 if [[ -n "${FEATURE_GATES:-}" ]]; then
1582 params+=" --feature-gates=${FEATURE_GATES}"
1583 fi
1584 if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
1585 params+=" --flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
1586 fi
1587 if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
1588 params+=" --experimental-cluster-signing-duration=$CLUSTER_SIGNING_DURATION"
1589 fi
1590 # disable using HPA metrics REST clients if metrics-server isn't enabled
1591 if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]]; then
1592 params+=" --horizontal-pod-autoscaler-use-rest-clients=false"
1593 fi
1594
1595 local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
1596 local container_env=""
1597 if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
1598 container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
1599 fi
1600
1601 local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
1602 remove-salt-config-comments "${src_file}"
1603 # Evaluate variables.
1604 sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
1605 sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
1606 sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
1607 sed -i -e "s@{{params}}@${params}@g" "${src_file}"
1608 sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
1609 sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
1610 sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
1611 sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
1612 sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
1613 cp "${src_file}" /etc/kubernetes/manifests
1614}
1615
1616# Starts kubernetes scheduler.
1617# It prepares the log file, loads the docker image, calculates variables, sets them
1618# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
1619#
1620# Assumed vars (which are calculated in compute-master-manifest-variables)
1621# DOCKER_REGISTRY
1622function start-kube-scheduler {
1623 echo "Start kubernetes scheduler"
1624 create-kubescheduler-kubeconfig
1625 prepare-log-file /var/log/kube-scheduler.log
1626
1627 # Calculate variables and set them in the manifest.
1628 params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
1629 params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
1630 if [[ -n "${FEATURE_GATES:-}" ]]; then
1631 params+=" --feature-gates=${FEATURE_GATES}"
1632 fi
1633 if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
1634 params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
1635 fi
1636 local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
1637
1638 # Remove salt comments and replace variables with values.
1639 local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
1640 remove-salt-config-comments "${src_file}"
1641
1642 sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
1643 sed -i -e "s@{{params}}@${params}@g" "${src_file}"
1644 sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
1645 sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
1646 cp "${src_file}" /etc/kubernetes/manifests
1647}
1648
1649# Starts cluster autoscaler.
1650# Assumed vars (which are calculated in function compute-master-manifest-variables)
1651# CLOUD_CONFIG_OPT
1652# CLOUD_CONFIG_VOLUME
1653# CLOUD_CONFIG_MOUNT
1654function start-cluster-autoscaler {
1655 if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
1656 echo "Start kubernetes cluster autoscaler"
1657 prepare-log-file /var/log/cluster-autoscaler.log
1658
1659 # Remove salt comments and replace variables with values
1660 local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
1661 remove-salt-config-comments "${src_file}"
1662
1663 local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
1664 sed -i -e "s@{{params}}@${params}@g" "${src_file}"
1665 sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
1666 sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
1667 sed -i -e "s@{%.*%}@@g" "${src_file}"
1668
1669 cp "${src_file}" /etc/kubernetes/manifests
1670 fi
1671}
1672
1673# A helper function for copying addon manifests and set dir/files
1674# permissions.
1675#
1676# $1: addon category under /etc/kubernetes
1677# $2: manifest source dir
1678function setup-addon-manifests {
1679 local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2"
1680 local -r dst_dir="/etc/kubernetes/$1/$2"
1681 if [[ ! -d "${dst_dir}" ]]; then
1682 mkdir -p "${dst_dir}"
1683 fi
1684 local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
1685 if [[ -n "${files}" ]]; then
1686 cp "${src_dir}/"*.yaml "${dst_dir}"
1687 fi
1688 files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
1689 if [[ -n "${files}" ]]; then
1690 cp "${src_dir}/"*.json "${dst_dir}"
1691 fi
1692 files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
1693 if [[ -n "${files}" ]]; then
1694 cp "${src_dir}/"*.yaml.in "${dst_dir}"
1695 fi
1696 chown -R root:root "${dst_dir}"
1697 chmod 755 "${dst_dir}"
1698 chmod 644 "${dst_dir}"/*
1699}
1700
1701# Fluentd manifest is modified using kubectl, which may not be available at
1702# this point. Run this as a background process.
1703function wait-for-apiserver-and-update-fluentd {
1704 until kubectl get nodes
1705 do
1706 sleep 10
1707 done
1708 kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \
1709 --limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \
1710 --requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \
1711 --containers=fluentd-gcp -o yaml > ${fluentd_gcp_yaml}.tmp
1712 mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml}
1713}
1714
1715# Trigger background process that will ultimately update fluentd resource
1716# requirements.
1717function start-fluentd-resource-update {
1718 wait-for-apiserver-and-update-fluentd &
1719}
1720
1721# Updates parameters in yaml file for prometheus-to-sd configuration, or
1722# removes component if it is disabled.
1723function update-prometheus-to-sd-parameters {
1724 if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
1725 sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
1726 sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
1727 else
1728 # Removes all lines between two patterns (throws away prometheus-to-sd)
1729 sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
1730 fi
1731}
1732
1733# Prepares the manifests of k8s addons, and starts the addon manager.
1734# Vars assumed:
1735# CLUSTER_NAME
1736function start-kube-addons {
1737 echo "Prepare kube-addons manifests and start kube addon manager"
1738 local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
1739 local -r dst_dir="/etc/kubernetes/addons"
1740
1741 # prep addition kube-up specific rbac objects
1742 setup-addon-manifests "addons" "rbac"
1743
1744 # Set up manifests of other addons.
1745 if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]]; then
1746 prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
1747 setup-addon-manifests "addons" "kube-proxy"
1748 fi
1749 if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
1750 [[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
1751 [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
1752 [[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
1753 [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
1754 local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
1755 setup-addon-manifests "addons" "cluster-monitoring"
1756 setup-addon-manifests "addons" "${file_dir}"
1757 # Replace the salt configurations with variable values.
1758 base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
1759 base_eventer_memory="190Mi"
1760 base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
1761 nanny_memory="90Mi"
1762 local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
1763 local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
1764 local -r eventer_memory_per_node="500"
1765 local -r nanny_memory_per_node="200"
1766 if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
1767 num_kube_nodes="$((${NUM_NODES}+1))"
1768 nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
1769 fi
1770 controller_yaml="${dst_dir}/${file_dir}"
1771 if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
1772 controller_yaml="${controller_yaml}/heapster-controller-combined.yaml"
1773 else
1774 controller_yaml="${controller_yaml}/heapster-controller.yaml"
1775 fi
1776 remove-salt-config-comments "${controller_yaml}"
1777 sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
1778 sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
1779 sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
1780 sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
1781 sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
1782 sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
1783 sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
1784 sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
1785 update-prometheus-to-sd-parameters ${controller_yaml}
1786 fi
1787 if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
1788 setup-addon-manifests "addons" "metrics-server"
1789 fi
1790 if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
1791 setup-addon-manifests "addons" "dns"
1792 local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
1793 mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}"
1794 # Replace the salt configurations with variable values.
1795 sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
1796 sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
1797
1798 if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
1799 setup-addon-manifests "addons" "dns-horizontal-autoscaler"
1800 fi
1801 fi
1802 if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
1803 setup-addon-manifests "addons" "registry"
1804 local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
1805 local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
1806 mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
1807 mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
1808 # Replace the salt configurations with variable values.
1809 remove-salt-config-comments "${controller_yaml}"
1810 sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
1811 sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
1812 sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
1813 fi
1814 if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
1815 [[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
1816 [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
1817 setup-addon-manifests "addons" "fluentd-elasticsearch"
1818 fi
1819 if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
1820 [[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
1821 setup-addon-manifests "addons" "fluentd-gcp"
1822 local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
1823 local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
1824 update-prometheus-to-sd-parameters ${event_exporter_yaml}
1825 update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
1826 start-fluentd-resource-update
1827 fi
1828 if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
1829 setup-addon-manifests "addons" "dashboard"
1830 fi
1831 if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
1832 setup-addon-manifests "addons" "node-problem-detector"
1833 fi
1834 if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
1835 # Setup role binding for standalone node problem detector.
1836 setup-addon-manifests "addons" "node-problem-detector/standalone"
1837 fi
1838 if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
1839 setup-addon-manifests "admission-controls" "limit-range"
1840 fi
1841 if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
1842 setup-addon-manifests "addons" "calico-policy-controller"
1843
1844 # Configure Calico CNI directory.
1845 local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
1846 sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
1847 fi
1848 if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
1849 setup-addon-manifests "addons" "storage-class/gce"
1850 fi
1851 if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
1852 setup-addon-manifests "addons" "ip-masq-agent"
1853 fi
1854 if [[ "${ENABLE_METADATA_PROXY:-}" == "simple" ]]; then
1855 setup-addon-manifests "addons" "metadata-proxy/gce"
1856 fi
1857
1858 # Place addon manager pod manifest.
1859 cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
1860}
1861
1862# Starts an image-puller - used in test clusters.
1863function start-image-puller {
1864 echo "Start image-puller"
1865 cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
1866 /etc/kubernetes/manifests/
1867}
1868
1869# Starts kube-registry proxy
1870function start-kube-registry-proxy {
1871 echo "Start kube-registry-proxy"
1872 cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
1873}
1874
1875# Starts a l7 loadbalancing controller for ingress.
1876function start-lb-controller {
1877 if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
1878 echo "Start GCE L7 pod"
1879 prepare-log-file /var/log/glbc.log
1880 setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
1881 cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest" \
1882 /etc/kubernetes/manifests/
1883 fi
1884}
1885
1886# Starts rescheduler.
1887function start-rescheduler {
1888 if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
1889 echo "Start Rescheduler"
1890 prepare-log-file /var/log/rescheduler.log
1891 cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
1892 /etc/kubernetes/manifests/
1893 fi
1894}
1895
1896# Setup working directory for kubelet.
1897function setup-kubelet-dir {
1898 echo "Making /var/lib/kubelet executable for kubelet"
1899 mount -B /var/lib/kubelet /var/lib/kubelet/
1900 mount -B -o remount,exec,suid,dev /var/lib/kubelet
1901}
1902
1903function reset-motd {
1904 # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
1905 local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
1906 # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
1907 # or the git hash that's in the build info.
1908 local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
1909 local devel=""
1910 if [[ "${gitref}" != "${version}" ]]; then
1911 devel="
1912Note: This looks like a development version, which might not be present on GitHub.
1913If it isn't, the closest tag is at:
1914 https://github.com/kubernetes/kubernetes/tree/${gitref}
1915"
1916 gitref="${version//*+/}"
1917 fi
1918 cat > /etc/motd <<EOF
1919
1920Welcome to Kubernetes ${version}!
1921
1922You can find documentation for Kubernetes at:
1923 http://docs.kubernetes.io/
1924
1925The source for this release can be found at:
1926 /home/kubernetes/kubernetes-src.tar.gz
1927Or you can download it at:
1928 https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
1929
1930It is based on the Kubernetes source at:
1931 https://github.com/kubernetes/kubernetes/tree/${gitref}
1932${devel}
1933For Kubernetes copyright and licensing information, see:
1934 /home/kubernetes/LICENSES
1935
1936EOF
1937}
1938
1939function override-kubectl {
1940 echo "overriding kubectl"
1941 echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
1942}
1943
1944########### Main Function ###########
1945echo "Start to configure instance for kubernetes"
1946
1947KUBE_HOME="/home/kubernetes"
1948CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
1949if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
1950 echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
1951 exit 1
1952fi
1953
1954source "${KUBE_HOME}/kube-env"
1955
1956if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
1957 source "${KUBE_HOME}/kube-master-certs"
1958fi
1959
1960if [[ -n "${KUBE_USER:-}" ]]; then
1961 if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
1962 echo "Bad KUBE_USER format."
1963 exit 1
1964 fi
1965fi
1966
1967# generate the controller manager and scheduler tokens here since they are only used on the master.
1968KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
1969KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
1970
1971setup-os-params
1972config-ip-firewall
1973create-dirs
1974setup-kubelet-dir
1975ensure-local-ssds
1976setup-logrotate
1977if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
1978 mount-master-pd
1979 create-node-pki
1980 create-master-pki
1981 create-master-auth
1982 create-master-kubelet-auth
1983 create-master-etcd-auth
1984else
1985 create-node-pki
1986 create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
1987 if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
1988 create-kubeproxy-user-kubeconfig
1989 fi
1990 if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
1991 create-node-problem-detector-kubeconfig
1992 fi
1993fi
1994
1995override-kubectl
1996# Run the containerized mounter once to pre-cache the container image.
1997assemble-docker-flags
1998start-kubelet
1999
2000if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
2001 compute-master-manifest-variables
2002 start-etcd-servers
2003 start-etcd-empty-dir-cleanup-pod
2004 start-kube-apiserver
2005 start-kube-controller-manager
2006 start-kube-scheduler
2007 start-kube-addons
2008 start-cluster-autoscaler
2009 start-lb-controller
2010 start-rescheduler
2011else
2012 if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
2013 start-kube-proxy
2014 fi
2015 # Kube-registry-proxy.
2016 if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
2017 start-kube-registry-proxy
2018 fi
2019 if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
2020 start-image-puller
2021 fi
2022 if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
2023 start-node-problem-detector
2024 fi
2025fi
2026reset-motd
2027prepare-mounter-rootfs
2028modprobe configs
2029echo "Done for the configuration for kubernetes"