Merge branch 'master' into local_volume_provisioner

This commit is contained in:
Antoine Legrand 2018-02-06 09:28:27 +01:00 committed by GitHub
commit a3248379db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 1092 additions and 38 deletions

View file

@ -13,7 +13,6 @@ If you have questions, join us on the [kubernetes slack](https://kubernetes.slac
To deploy the cluster you can use : To deploy the cluster you can use :
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br> **Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br> **vagrant** by simply running `vagrant up` (for tests purposes) <br>
@ -112,7 +111,6 @@ See also [Network checker](docs/netcheck.md).
## Tools and projects on top of Kubespray ## Tools and projects on top of Kubespray
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst) - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer) - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)

View file

@ -3,20 +3,11 @@ Cloud providers
#### Provisioning #### Provisioning
You can use kubespray-cli to start new instances on cloud providers You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
here's an example
```
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
```
#### Deploy kubernetes #### Deploy kubernetes
With kubespray-cli With ansible-playbook command
```
kubespray deploy [--aws|--gce] -u admin
```
Or ansible-playbook command
``` ```
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
``` ```

View file

@ -1,13 +1,7 @@
CoreOS bootstrap CoreOS bootstrap
=============== ===============
Example with **kubespray-cli**: Example with Ansible:
```
kubespray deploy --gce --coreos
```
Or with Ansible:
Before running the cluster playbook you must satisfy the following requirements: Before running the cluster playbook you must satisfy the following requirements:

View file

@ -1,23 +1,6 @@
Getting started Getting started
=============== ===============
The easiest way to run the deployement is to use the **kubespray-cli** tool.
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
Here is a simple example on AWS:
* Create instances and generate the inventory
```
kubespray aws --instances 3
```
* Run the deployment
```
kubespray deploy --aws -u centos -n calico
```
Building your own inventory Building your own inventory
--------------------------- ---------------------------

View file

@ -106,6 +106,10 @@ kube_apiserver_insecure_port: 8080 # (http)
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
#kube_apiserver_insecure_port: 0 # (disabled) #kube_apiserver_insecure_port: 0 # (disabled)
# Kube-proxy proxyMode configuration.
# Can be ipvs, iptables
kube_proxy_mode: iptables
# DNS configuration. # DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain # Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local cluster_name: cluster.local
@ -158,6 +162,9 @@ helm_enabled: false
# Istio deployment # Istio deployment
istio_enabled: false istio_enabled: false
# Registry deployment
registry_enabled: false
# Local volume provisioner deployment # Local volume provisioner deployment
local_volume_provisioner_enabled: false local_volume_provisioner_enabled: false

View file

@ -21,6 +21,12 @@ dependencies:
- apps - apps
- helm - helm
- role: kubernetes-apps/registry
when: registry_enabled
tags:
- apps
- registry
- role: kubernetes-apps/local_volume_provisioner - role: kubernetes-apps/local_volume_provisioner
when: local_volume_provisioner_enabled when: local_volume_provisioner_enabled
tags: tags:

View file

@ -0,0 +1,274 @@
# Private Docker Registry in Kubernetes
Kubernetes offers an optional private Docker registry addon, which you can turn
on when you bring up a cluster or install later. This gives you a place to
store truly private Docker images for your cluster.
## How it works
The private registry runs as a `Pod` in your cluster. It does not currently
support SSL or authentication, which triggers Docker's "insecure registry"
logic. To work around this, we run a proxy on each node in the cluster,
exposing a port onto the node (via a hostPort), which Docker accepts as
"secure", since it is accessed by `localhost`.
## Turning it on
Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
`ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
whether the registry is run or not. To set this flag, you can specify
`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
does not include this flag, the following steps should work. Note that some of
this is cloud-provider specific, so you may have to customize it a bit.
### Make some storage
The primary job of the registry is to store data. To do that we have to decide
where to store it. For cloud environments that have networked storage, we can
use Kubernetes's `PersistentVolume` abstraction. The following template is
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
other situations:
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
```yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}
```
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
If, for example, you wanted to use NFS you would just need to change the
`gcePersistentDisk` block to `nfs`. See
[here](https://kubernetes.io/docs/user-guide/volumes.md) for more details on volumes.
Note that in any case, the storage (in the case the GCE PersistentDisk) must be
created independently - this is not something Kubernetes manages for you (yet).
### I don't want or don't have persistent storage
If you are running in a place that doesn't have networked storage, or if you
just want to kick the tires on this without committing to it, you can easily
adapt the `ReplicationController` specification below to use a simple
`emptyDir` volume instead of a `persistentVolumeClaim`.
## Claim the storage
Now that the Kubernetes cluster knows that some storage exists, you can put a
claim on that storage. As with the `PersistentVolume` above, you can start
with the `salt` template:
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}
```
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
you created before will be bound to this claim (unless you have other
`PersistentVolumes` in which case those might get bound instead). This claim
gives you the right to use this storage until you release the claim.
## Run the registry
Now we can run a Docker registry:
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v0
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc
```
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
## Expose the registry in the cluster
Now that we have a registry `Pod` running, we can expose it as a Service:
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
## Expose the registry on each node
Now that we have a running `Service`, we need to expose it onto each Kubernetes
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
node by creating following daemonset.
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: kube-system
labels:
k8s-app: kube-registry-proxy
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
template:
metadata:
labels:
k8s-app: kube-registry-proxy
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v0.4
spec:
containers:
- name: kube-registry-proxy
image: gcr.io/google_containers/kube-registry-proxy:0.4
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000
```
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
When modifying replication-controller, service and daemon-set defintions, take
care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
Failing to do so will have register the localhost proxy daemon-sets to the
upstream service. As a result they will then try to proxy themselves, which
will, for obvious reasons, not work.
This ensures that port 5000 on each node is directed to the registry `Service`.
You should be able to verify that it is running by hitting port 5000 with a web
browser and getting a 404 error:
```console
$ curl localhost:5000
404 page not found
```
## Using the registry
To use an image hosted by this registry, simply say this in your `Pod`'s
`spec.containers[].image` field:
```yaml
image: localhost:5000/user/container
```
Before you can use the registry, you have to be able to get images into it,
though. If you are building an image on your Kubernetes `Node`, you can spell
out `localhost:5000` when you build and push. More likely, though, you are
building locally and want to push to your cluster.
You can use `kubectl` to set up a port-forward from your local node to a
running Pod:
```console
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
| grep Running | head -1 | cut -f1 -d' ')
$ kubectl port-forward --namespace kube-system $POD 5000:5000 &
```
Now you can build and push images on your local computer as
`localhost:5000/yourname/container` and those images will be available inside
your kubernetes cluster with the same name.
# More Extensions
- [Use GCS as storage backend](gcs/README.md)
- [Enable TLS/SSL](tls/README.md)
- [Enable Authentication](auth/README.md)
## Future improvements
* Allow port-forwarding to a Service rather than a pod (#15180)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()

View file

@ -0,0 +1,5 @@
---
registry_image_repo: registry
registry_image_tag: 2.6
registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
registry_proxy_image_tag: 0.4

View file

@ -0,0 +1,26 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM nginx:1.12
RUN apt-get update \
&& apt-get install -y \
curl \
--no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
COPY rootfs /
CMD ["/bin/boot"]

View file

@ -0,0 +1,24 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push vet test clean
TAG = 0.4
REPO = gcr.io/google_containers/kube-registry-proxy
build:
docker build --pull -t $(REPO):$(TAG) .
push:
gcloud docker -- push $(REPO):$(TAG)

View file

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# fail if no hostname is provided
REGISTRY_HOST=${REGISTRY_HOST:?no host}
REGISTRY_PORT=${REGISTRY_PORT:-5000}
# we are always listening on port 80
# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
PORT=80
sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-e "s/%PORT%/$REGISTRY_PORT/g" \
-e "s/%BIND_PORT%/$PORT/g" \
</etc/nginx/conf.d/default.conf.in >/etc/nginx/conf.d/default.conf
# wait for registry to come online
while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
sleep 1
done
printf "starting proxy...\n"
exec nginx -g "daemon off;" "$@"

View file

@ -0,0 +1,28 @@
# Docker registry proxy for api version 2
upstream docker-registry {
server %HOST%:%PORT%;
}
# No client auth or TLS
# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
server {
listen %BIND_PORT%;
server_name localhost;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
location / {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
include docker-registry.conf;
}
}

View file

@ -0,0 +1,6 @@
proxy_pass http://docker-registry;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;

View file

@ -0,0 +1,26 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}

View file

@ -0,0 +1,31 @@
---
- name: Registry | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/registry"
owner: root
group: root
mode: 0755
recurse: true
- name: Registry | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
with_items:
- { name: registry-svc, file: registry-svc.yml, type: service }
- { name: registry-rc, file: registry-rc.yml, type: replicationcontroller }
- { name: registry-ds, file: registry-ds.yml, type: daemonset }
register: registry_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Registry | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ system_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
state: "latest"
with_items: "{{ registry_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]

View file

@ -0,0 +1,92 @@
# Enable Authentication with Htpasswd for Kube-Registry
Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry.
### Prepare Htpasswd Secret
Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`.
Creating secret to hold htpasswd...
```console
$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-auth-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret
```
<!-- END MUNGE: EXAMPLE registry-auth-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
### To Verify
Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
### Configure Nodes to Authenticate with Kube-Registry
By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()

View file

@ -0,0 +1,56 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: basic_realm
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /auth/htpasswd
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: auth-dir
mountPath: /auth
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: auth-dir
secret:
secretName: registry-auth-secret

View file

@ -0,0 +1,81 @@
# Kube-Registry with GCS storage backend
Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend.
A few preparation steps are needed.
1. Create a bucket named kube-registry in GCS.
1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
### Pack Keyfile into a Secret
Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
```console
$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
```
### Run Registry
<!-- BEGIN MUNGE: EXAMPLE registry-gcs-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret
```
<!-- END MUNGE: EXAMPLE registry-gcs-rc.yaml -->
No changes are needed for other components (kube-registry service and proxy).
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()

View file

@ -0,0 +1,52 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE
value: gcs
- name: REGISTRY_STORAGE_GCS_BUCKET
value: kube-registry
- name: REGISTRY_STORAGE_GCS_KEYFILE
value: /gcs/keyfile
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumeMounts:
- name: gcs-key
mountPath: /gcs
volumes:
- name: gcs-key
secret:
secretName: gcs-key-secret

View file

@ -0,0 +1,31 @@
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-registry-proxy
namespace: {{ system_namespace }}
labels:
k8s-app: kube-registry-proxy
kubernetes.io/cluster-service: "true"
version: v{{ registry_proxy_image_tag }}
spec:
template:
metadata:
labels:
k8s-app: kube-registry-proxy
kubernetes.io/name: "kube-registry-proxy"
kubernetes.io/cluster-service: "true"
version: v{{ registry_proxy_image_tag }}
spec:
containers:
- name: kube-registry-proxy
image: {{ registry_proxy_image_repo }}:{{ registry_proxy_image_tag }}
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
ports:
- name: registry
containerPort: 80
hostPort: 5000

View file

@ -0,0 +1,17 @@
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
capacity:
storage: {{ pillar['cluster_registry_disk_size'] }}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: "{{ pillar['cluster_registry_disk_name'] }}"
fsType: "ext4"
{% endif %}

View file

@ -0,0 +1,14 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ pillar['cluster_registry_disk_size'] }}

View file

@ -0,0 +1,41 @@
---
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v{{ registry_image_tag }}
namespace: {{ system_namespace }}
labels:
k8s-app: kube-registry-upstream
version: v{{ registry_image_tag }}
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
k8s-app: kube-registry-upstream
version: v{{ registry_image_tag }}
template:
metadata:
labels:
k8s-app: kube-registry-upstream
version: v{{ registry_image_tag }}
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: {{ registry_image_repo }}:{{ registry_image_tag }}
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}

View file

@ -0,0 +1,18 @@
---
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: {{ system_namespace }}
labels:
k8s-app: kube-registry-upstream
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry-upstream
ports:
- name: registry
port: 5000
protocol: TCP

View file

@ -0,0 +1,116 @@
# Enable TLS for Kube-Registry
This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
### Pack domain.crt and domain.key into a Secret
```console
$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
```
### Run Registry
Please be noted that this sample rc is using emptyDir as storage backend for simplicity.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-rc.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret
```
<!-- END MUNGE: EXAMPLE registry-tls-rc.yaml -->
### Expose External IP for Kube-Registry
Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip.
<!-- BEGIN MUNGE: EXAMPLE registry-tls-svc.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP
```
<!-- END MUNGE: EXAMPLE registry-tls-svc.yaml -->
### To Verify
Now you should be able to access your kube-registry from another docker host.
```console
docker pull busybox
docker tag busybox myregistrydomain.com:5000/busybox
docker push myregistrydomain.com:5000/busybox
docker pull myregistrydomain.com:5000/busybox
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()

View file

@ -0,0 +1,57 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
# kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: /certs/domain.crt
- name: REGISTRY_HTTP_TLS_KEY
value: /certs/domain.key
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
- name: cert-dir
mountPath: /certs
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
emptyDir: {}
- name: cert-dir
secret:
secretName: registry-tls-secret

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
# kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
type: LoadBalancer
ports:
- name: registry
port: 5000
protocol: TCP

View file

@ -83,8 +83,11 @@ controller_mgr_custom_flags: []
scheduler_custom_flags: [] scheduler_custom_flags: []
# kubeadm settings # kubeadm settings
# Value of 0 means it never expires ## Value of 0 means it never expires
kubeadm_token_ttl: 0 kubeadm_token_ttl: 0
## Extra args for k8s components passing by kubeadm
kube_kubeadm_controller_extra_args: {}
kube_kubeadm_scheduler_extra_args: {}
## Variable for influencing kube-scheduler behaviour ## Variable for influencing kube-scheduler behaviour
volume_cross_zone_attachment: false volume_cross_zone_attachment: false

View file

@ -19,6 +19,12 @@ kubernetesVersion: {{ kube_version }}
{% if cloud_provider is defined and cloud_provider != "gce" %} {% if cloud_provider is defined and cloud_provider != "gce" %}
cloudProvider: {{ cloud_provider }} cloudProvider: {{ cloud_provider }}
{% endif %} {% endif %}
{% if kube_proxy_mode == 'ipvs' %}
kubeProxy:
config:
featureGates: SupportIPVSProxyMode=true
mode: ipvs
{% endif %}
authorizationModes: authorizationModes:
{% for mode in authorization_modes %} {% for mode in authorization_modes %}
- {{ mode }} - {{ mode }}
@ -61,6 +67,15 @@ controllerManagerExtraArgs:
{% if kube_feature_gates %} {% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }} feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %} {% endif %}
{% for key in kube_kubeadm_controller_extra_args %}
{{ key }}: {{ kube_kubeadm_controller_extra_args[key] }}
{% endfor %}
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
schedulerExtraArgs:
{% for key in kube_kubeadm_scheduler_extra_args %}
{{ key }}: {{ kube_kubeadm_scheduler_extra_args[key] }}
{% endfor %}
{% endif %}
apiServerCertSANs: apiServerCertSANs:
{% for san in apiserver_sans.split(' ') | unique %} {% for san in apiserver_sans.split(' ') | unique %}
- {{ san }} - {{ san }}

View file

@ -14,6 +14,7 @@ kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
# resolv.conf to base dns config # resolv.conf to base dns config
kube_resolv_conf: "/etc/resolv.conf" kube_resolv_conf: "/etc/resolv.conf"
# Can be ipvs, iptables
kube_proxy_mode: iptables kube_proxy_mode: iptables
# If using the pure iptables proxy, SNAT everything. Note that it breaks any # If using the pure iptables proxy, SNAT everything. Note that it breaks any

View file

@ -104,6 +104,20 @@
- net.bridge.bridge-nf-call-arptables - net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables - net.bridge.bridge-nf-call-ip6tables
- name: Modprode Kernel Module for IPVS
modprobe:
name: "{{ item }}"
state: present
when: kube_proxy_mode == 'ipvs'
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
tags:
- kube-proxy
- name: Write proxy manifest - name: Write proxy manifest
template: template:
src: manifests/kube-proxy.manifest.j2 src: manifests/kube-proxy.manifest.j2

View file

@ -33,6 +33,13 @@ spec:
- --proxy-mode={{ kube_proxy_mode }} - --proxy-mode={{ kube_proxy_mode }}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %} {% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all - --masquerade-all
{% elif kube_proxy_mode == 'ipvs' %}
- --masquerade-all
- --feature-gates=SupportIPVSProxyMode=true
- --proxy-mode=ipvs
- --ipvs-min-sync-period=5s
- --ipvs-sync-period=5s
- --ipvs-scheduler=rr
{% endif %} {% endif %}
securityContext: securityContext:
privileged: true privileged: true