commit
383d582b47
35 changed files with 450 additions and 105 deletions
|
@ -62,6 +62,7 @@ before_script:
|
|||
KUBELET_DEPLOYMENT: "docker"
|
||||
VAULT_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
|
||||
MAGIC: "ci check this"
|
||||
|
||||
.gce: &gce
|
||||
|
@ -132,6 +133,7 @@ before_script:
|
|||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml
|
||||
|
||||
|
@ -160,6 +162,7 @@ before_script:
|
|||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
$PLAYBOOK;
|
||||
fi
|
||||
|
@ -190,6 +193,7 @@ before_script:
|
|||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml;
|
||||
fi
|
||||
|
@ -232,6 +236,7 @@ before_script:
|
|||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml;
|
||||
fi
|
||||
|
@ -373,6 +378,15 @@ before_script:
|
|||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
|
||||
# stage: deploy-gce-special
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
coreos-calico-sep:
|
||||
stage: deploy-gce-part1
|
||||
|
@ -598,6 +612,17 @@ ubuntu-vault-sep:
|
|||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-flannel-rbac-sep:
|
||||
stage: deploy-gce-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_rbac_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# Premoderated with manual actions
|
||||
ci-authorized:
|
||||
<<: *job
|
||||
|
|
|
@ -67,6 +67,11 @@ following default cluster paramters:
|
|||
OpenStack (default is unset)
|
||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||
Kubernetes
|
||||
* *authorization_modes* - A list of [authorization mode](
|
||||
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
||||
Note: `RBAC` is currently in experimental phase, and do not support either calico or
|
||||
vault. Upgrade from non-RBAC to RBAC is not tested.
|
||||
|
||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||
|
|
|
@ -41,3 +41,7 @@ netchecker_server_memory_requests: 64M
|
|||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
rbac_resources:
|
||||
- sa
|
||||
- clusterrole
|
||||
- clusterrolebinding
|
||||
|
|
|
@ -13,11 +13,35 @@
|
|||
src: "{{item.file}}"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kube-dns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kubedns, file: kubedns-sa.yml, type: sa}
|
||||
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
|
||||
register: manifests
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
when:
|
||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
- rbac_enabled or item.type not in rbac_resources
|
||||
tags: dnsmasq
|
||||
|
||||
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
|
||||
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
|
||||
command: >
|
||||
{{bin_dir}}/kubectl patch clusterrole system:kube-dns
|
||||
--patch='{
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups" : [""],
|
||||
"resources" : ["endpoints", "services"],
|
||||
"verbs": ["list", "watch", "get"]
|
||||
}
|
||||
]
|
||||
}'
|
||||
when:
|
||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
||||
tags: dnsmasq
|
||||
|
||||
- name: Kubernetes Apps | Start Resources
|
||||
|
@ -29,6 +53,7 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
tags: dnsmasq
|
||||
|
||||
|
|
|
@ -5,10 +5,15 @@
|
|||
with_items:
|
||||
- {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
|
||||
- {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
|
||||
- {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
|
||||
- {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server}
|
||||
- {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- rbac_enabled or item.type not in rbac_resources
|
||||
|
||||
#FIXME: remove if kubernetes/features#124 is implemented
|
||||
- name: Kubernetes Apps | Purge old Netchecker daemonsets
|
||||
|
@ -31,4 +36,5 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale", "replicasets/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create"]
|
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-proportional-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
|
@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: kubedns-autoscaler
|
||||
namespace: kube-system
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
k8s-app: kubedns-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
@ -39,11 +39,13 @@ spec:
|
|||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --namespace={{ system_namespace }}
|
||||
- --configmap=kubedns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||
- --target=Deployment/kube-dns
|
||||
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: cluster-proportional-autoscaler
|
||||
{% endif %}
|
||||
|
|
|
@ -151,4 +151,6 @@ spec:
|
|||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: kube-dns
|
||||
{% endif %}
|
||||
|
|
7
roles/kubernetes-apps/ansible/templates/kubedns-sa.yml
Normal file
7
roles/kubernetes-apps/ansible/templates/kubedns-sa.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
|
@ -1,43 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: netchecker-agent-hostnet
|
||||
name: netchecker-agent-hostnet
|
||||
namespace: {{ netcheck_namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: netchecker-agent-hostnet
|
||||
labels:
|
||||
app: netchecker-agent-hostnet
|
||||
spec:
|
||||
hostNetwork: True
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{% endif %}
|
||||
containers:
|
||||
- name: netchecker-agent
|
||||
image: "{{ agent_img }}"
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
args:
|
||||
- "-v=5"
|
||||
- "-alsologtostderr=true"
|
||||
- "-serverendpoint=netchecker-service:8081"
|
||||
- "-reportinterval={{ agent_report_interval }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ netchecker_agent_cpu_limit }}
|
||||
memory: {{ netchecker_agent_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ netchecker_agent_cpu_requests }}
|
||||
memory: {{ netchecker_agent_memory_requests }}
|
|
@ -0,0 +1,9 @@
|
|||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: netchecker-server
|
||||
namespace: {{ netcheck_namespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list"]
|
|
@ -0,0 +1,13 @@
|
|||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: netchecker-server
|
||||
namespace: {{ netcheck_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: netchecker-server
|
||||
namespace: {{ netcheck_namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: netchecker-server
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -31,3 +31,6 @@ spec:
|
|||
- "-logtostderr"
|
||||
- "-kubeproxyinit"
|
||||
- "-endpoint=0.0.0.0:8081"
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: netchecker-server
|
||||
{% endif %}
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: netchecker-server
|
||||
namespace: {{ netcheck_namespace }}
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
|
@ -10,10 +10,36 @@
|
|||
mode: 0755
|
||||
register: helm_container
|
||||
|
||||
- name: Helm | Lay Down Helm Manifests (RBAC)
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {name: tiller, file: tiller-sa.yml, type: sa}
|
||||
- {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding}
|
||||
register: manifests
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
|
||||
|
||||
- name: Helm | Apply Helm Manifests (RBAC)
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
|
||||
|
||||
- name: Helm | Install/upgrade helm
|
||||
command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
|
||||
when: helm_container.changed
|
||||
|
||||
- name: Helm | Patch tiller deployment for RBAC
|
||||
command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }}
|
||||
when: rbac_enabled
|
||||
|
||||
- name: Helm | Set up bash completion
|
||||
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
|
||||
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
7
roles/kubernetes-apps/helm/templates/tiller-sa.yml
Normal file
7
roles/kubernetes-apps/helm/templates/tiller-sa.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
|
@ -64,4 +64,4 @@ apiserver_custom_flags: []
|
|||
|
||||
controller_mgr_custom_flags: []
|
||||
|
||||
scheduler_custom_flags: []
|
||||
scheduler_custom_flags: []
|
||||
|
|
|
@ -60,12 +60,11 @@
|
|||
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
|
||||
tags: apps
|
||||
|
||||
- name: Write kube-controller-manager manifest
|
||||
- name: Write kube-scheduler kubeconfig
|
||||
template:
|
||||
src: manifests/kube-controller-manager.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
notify: Master | wait for kube-controller-manager
|
||||
tags: kube-controller-manager
|
||||
src: kube-scheduler-kubeconfig.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
|
||||
tags: kube-scheduler
|
||||
|
||||
- name: Write kube-scheduler manifest
|
||||
template:
|
||||
|
@ -74,6 +73,19 @@
|
|||
notify: Master | wait for kube-scheduler
|
||||
tags: kube-scheduler
|
||||
|
||||
- name: Write kube-controller-manager kubeconfig
|
||||
template:
|
||||
src: kube-controller-manager-kubeconfig.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
|
||||
tags: kube-controller-manager
|
||||
|
||||
- name: Write kube-controller-manager manifest
|
||||
template:
|
||||
src: manifests/kube-controller-manager.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
|
||||
notify: Master | wait for kube-controller-manager
|
||||
tags: kube-controller-manager
|
||||
|
||||
- include: post-upgrade.yml
|
||||
tags: k8s-post-upgrade
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kube-controller-manager
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem
|
||||
client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-controller-manager
|
||||
name: kube-controller-manager-{{ cluster_name }}
|
||||
current-context: kube-controller-manager-{{ cluster_name }}
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kube-scheduler
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem
|
||||
client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-scheduler
|
||||
name: kube-scheduler-{{ cluster_name }}
|
||||
current-context: kube-scheduler-{{ cluster_name }}
|
|
@ -81,6 +81,9 @@ spec:
|
|||
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||
- --anonymous-auth={{ kube_api_anonymous_auth }}
|
||||
{% endif %}
|
||||
{% if authorization_modes %}
|
||||
- --authorization-mode={{ authorization_modes|join(',') }}
|
||||
{% endif %}
|
||||
{% if apiserver_custom_flags is string %}
|
||||
- {{ apiserver_custom_flags }}
|
||||
{% else %}
|
||||
|
|
|
@ -24,7 +24,7 @@ spec:
|
|||
command:
|
||||
- /hyperkube
|
||||
- controller-manager
|
||||
- --master={{ kube_apiserver_endpoint }}
|
||||
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
|
||||
- --leader-elect=true
|
||||
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
||||
|
@ -35,6 +35,9 @@ spec:
|
|||
- --node-monitor-period={{ kube_controller_node_monitor_period }}
|
||||
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
|
||||
- --v={{ kube_log_level }}
|
||||
{% if rbac_enabled %}
|
||||
- --use-service-account-credentials
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
|
||||
- --cloud-provider={{cloud_provider}}
|
||||
- --cloud-config={{ kube_config_dir }}/cloud_config
|
||||
|
@ -61,20 +64,36 @@ spec:
|
|||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: {{ kube_cert_dir }}
|
||||
name: ssl-certs-kubernetes
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: "{{kube_config_dir}}/ssl"
|
||||
name: etc-kube-ssl
|
||||
readOnly: true
|
||||
- mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
|
||||
name: kubeconfig
|
||||
readOnly: true
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %}
|
||||
- mountPath: {{ kube_config_dir }}/cloud_config
|
||||
- mountPath: "{{ kube_config_dir }}/cloud_config"
|
||||
name: cloudconfig
|
||||
readOnly: true
|
||||
{% endif %}
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: {{ kube_cert_dir }}
|
||||
name: ssl-certs-kubernetes
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
{% if ansible_os_family == 'RedHat' %}
|
||||
path: /etc/pki/tls
|
||||
{% else %}
|
||||
path: /usr/share/ca-certificates
|
||||
{% endif %}
|
||||
- name: etc-kube-ssl
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/ssl"
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
|
||||
- hostPath:
|
||||
path: {{ kube_config_dir }}/cloud_config
|
||||
path: "{{ kube_config_dir }}/cloud_config"
|
||||
name: cloudconfig
|
||||
{% endif %}
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
namespace: {{ system_namespace }}
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
spec:
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
- /hyperkube
|
||||
- scheduler
|
||||
- --leader-elect=true
|
||||
- --master={{ kube_apiserver_endpoint }}
|
||||
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
|
||||
- --v={{ kube_log_level }}
|
||||
{% if scheduler_custom_flags is string %}
|
||||
- {{ scheduler_custom_flags }}
|
||||
|
@ -41,3 +41,27 @@ spec:
|
|||
port: 10251
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: "{{ kube_config_dir }}/ssl"
|
||||
name: etc-kube-ssl
|
||||
readOnly: true
|
||||
- mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
|
||||
name: kubeconfig
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
{% if ansible_os_family == 'RedHat' %}
|
||||
path: /etc/pki/tls
|
||||
{% else %}
|
||||
path: /usr/share/ca-certificates
|
||||
{% endif %}
|
||||
- name: etc-kube-ssl
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/ssl"
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
- include: "install_{{ kubelet_deployment_type }}.yml"
|
||||
|
||||
- name: install | Write kubelet systemd init file
|
||||
template:
|
||||
template:
|
||||
src: "kubelet.{{ kubelet_deployment_type }}.service.j2"
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
|
|
|
@ -30,9 +30,12 @@
|
|||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template:
|
||||
src: node-kubeconfig.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
||||
src: "{{ item }}-kubeconfig.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
|
||||
backup: yes
|
||||
with_items:
|
||||
- node
|
||||
- kube-proxy
|
||||
notify: restart kubelet
|
||||
tags: kubelet
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
|
||||
client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: kube-proxy-{{ cluster_name }}
|
||||
current-context: kube-proxy-{{ cluster_name }}
|
|
@ -25,10 +25,7 @@ spec:
|
|||
- /hyperkube
|
||||
- proxy
|
||||
- --v={{ kube_log_level }}
|
||||
- --master={{ kube_apiserver_endpoint }}
|
||||
{% if not is_kube_master %}
|
||||
- --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml
|
||||
{% endif %}
|
||||
- --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
|
||||
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
|
||||
- --cluster-cidr={{ kube_pods_subnet }}
|
||||
- --proxy-mode={{ kube_proxy_mode }}
|
||||
|
@ -41,14 +38,14 @@ spec:
|
|||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: {{kube_config_dir}}/node-kubeconfig.yaml
|
||||
name: "kubeconfig"
|
||||
- mountPath: "{{ kube_config_dir }}/ssl"
|
||||
name: etc-kube-ssl
|
||||
readOnly: true
|
||||
- mountPath: {{kube_config_dir}}/ssl
|
||||
name: "etc-kube-ssl"
|
||||
- mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
|
||||
name: kubeconfig
|
||||
readOnly: true
|
||||
- mountPath: /var/run/dbus
|
||||
name: "var-run-dbus"
|
||||
name: var-run-dbus
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: ssl-certs-host
|
||||
|
@ -58,12 +55,12 @@ spec:
|
|||
{% else %}
|
||||
path: /usr/share/ca-certificates
|
||||
{% endif %}
|
||||
- name: "kubeconfig"
|
||||
- name: etc-kube-ssl
|
||||
hostPath:
|
||||
path: "{{kube_config_dir}}/node-kubeconfig.yaml"
|
||||
- name: "etc-kube-ssl"
|
||||
path: "{{ kube_config_dir }}/ssl"
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: "{{kube_config_dir}}/ssl"
|
||||
- name: "var-run-dbus"
|
||||
path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
|
||||
- name: var-run-dbus
|
||||
hostPath:
|
||||
path: "/var/run/dbus"
|
||||
path: /var/run/dbus
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(true) -%}
|
||||
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
|
||||
{%- elif is_kube_master -%}
|
||||
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
|
||||
https://127.0.0.1:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
|
|
|
@ -72,32 +72,47 @@ else
|
|||
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
gen_key_and_cert() {
|
||||
local name=$1
|
||||
local subject=$2
|
||||
openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
}
|
||||
|
||||
if [ ! -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# kube-apiserver key
|
||||
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
|
||||
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
|
||||
# kube-apiserver
|
||||
gen_key_and_cert "apiserver" "/CN=kube-apiserver"
|
||||
cat ca.pem >> apiserver.pem
|
||||
# kube-scheduler
|
||||
gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler"
|
||||
# kube-controller-manager
|
||||
gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
|
||||
fi
|
||||
|
||||
# Admins
|
||||
if [ -n "$MASTERS" ]; then
|
||||
for host in $MASTERS; do
|
||||
cn="${host%%.*}"
|
||||
# admin key
|
||||
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1
|
||||
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1
|
||||
# admin
|
||||
gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters"
|
||||
done
|
||||
fi
|
||||
|
||||
# Nodes and Admin
|
||||
# Nodes
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
# node key
|
||||
openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
|
||||
openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1
|
||||
openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1
|
||||
gen_key_and_cert "node-${host}" "/CN=system:node:${cn}/O=system:nodes"
|
||||
done
|
||||
fi
|
||||
|
||||
# system:kube-proxy
|
||||
if [ -n "$HOSTS" ]; then
|
||||
for host in $HOSTS; do
|
||||
cn="${host%%.*}"
|
||||
# kube-proxy
|
||||
gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy"
|
||||
done
|
||||
fi
|
||||
|
||||
|
|
|
@ -56,24 +56,39 @@
|
|||
|
||||
- set_fact:
|
||||
all_master_certs: "['ca-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem',
|
||||
'kube-scheduler.pem',
|
||||
'kube-scheduler-key.pem',
|
||||
'kube-controller-manager.pem',
|
||||
'kube-controller-manager-key.pem',
|
||||
{% for node in groups['kube-master'] %}
|
||||
'admin-{{ node }}.pem',
|
||||
'admin-{{ node }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem',
|
||||
{% endfor %}]"
|
||||
my_master_certs: ['ca-key.pem',
|
||||
'admin-{{ inventory_hostname }}.pem',
|
||||
'admin-{{ inventory_hostname }}-key.pem',
|
||||
'apiserver.pem',
|
||||
'apiserver-key.pem'
|
||||
'apiserver-key.pem',
|
||||
'kube-scheduler.pem',
|
||||
'kube-scheduler-key.pem',
|
||||
'kube-controller-manager.pem',
|
||||
'kube-controller-manager-key.pem',
|
||||
]
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
'node-{{ node }}.pem',
|
||||
'node-{{ node }}-key.pem',
|
||||
'kube-proxy-{{ node }}.pem',
|
||||
'kube-proxy-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem']
|
||||
my_node_certs: ['ca.pem',
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}-key.pem',
|
||||
]
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather master certs
|
||||
|
|
|
@ -114,3 +114,9 @@ vault_deployment_type: docker
|
|||
k8s_image_pull_policy: IfNotPresent
|
||||
efk_enabled: false
|
||||
enable_network_policy: false
|
||||
|
||||
## List of authorization modes that must be configured for
|
||||
## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and
|
||||
## 'RBAC' modes are tested.
|
||||
authorization_modes: []
|
||||
rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
|
||||
|
|
|
@ -79,11 +79,10 @@
|
|||
- /etc/dnsmasq.d-available
|
||||
- /etc/etcd.env
|
||||
- /etc/calico
|
||||
- /etc/weave.env
|
||||
- /opt/cni
|
||||
- /etc/dhcp/dhclient.d/zdnsupdate.sh
|
||||
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
||||
- "{{ bin_dir }}/kubelet"
|
||||
- "{{ bin_dir }}/kubernetes-scripts"
|
||||
- /run/flannel
|
||||
- /etc/flannel
|
||||
- /run/kubernetes
|
||||
|
@ -92,6 +91,15 @@
|
|||
- /etc/ssl/certs/kube-ca.pem
|
||||
- /etc/ssl/certs/etcd-ca.pem
|
||||
- /var/log/pods/
|
||||
- "{{ bin_dir }}/kubelet"
|
||||
- "{{ bin_dir }}/etcd-scripts"
|
||||
- "{{ bin_dir }}/etcd"
|
||||
- "{{ bin_dir }}/etcdctl"
|
||||
- "{{ bin_dir }}/kubernetes-scripts"
|
||||
- "{{ bin_dir }}/kubectl"
|
||||
- "{{ bin_dir }}/helm"
|
||||
- "{{ bin_dir }}/calicoctl"
|
||||
- "{{ bin_dir }}/weave"
|
||||
tags: ['files']
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue