Scheduling
ManualScheduling
bharathdasaraju@MacBook-Pro 2.Scheduling (master) $ kubectl edit pod nginx | grep -i nodename
Vim: Warning: Output is not to a terminal
29 nodeName: minikube
[1]+ Stopped kubectl edit pod nginx | grep --color=auto -i nodename
bharathdasaraju@MacBook-Pro 2.Scheduling (master) $
Need to create a binding and send POST request add that binding to pod.
bharathdasaraju@MacBook-Pro 2.Scheduling (master) $ read | kubectl edit pod nginx | grep -i node
Vim: Warning: Output is not to a terminal
Vim: Warning: Input is not from a terminal
29 nodeName: minikube
40 key: node.kubernetes.io/not-ready
44 key: node.kubernetes.io/unreachable
^C
bharathdasaraju@MacBook-Pro 2.Scheduling (master) $
ManualSchedulingLabs
root@controlplane:~# cat nginx.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- image: nginx
name: nginx
root@controlplane:~#
root@controlplane:~# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
controlplane Ready control-plane,master 3m11s v1.20.0 10.60.136.3 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
node01 Ready <none> 2m10s v1.20.0 10.60.136.6 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
root@controlplane:~#
root@controlplane:~# kubectl apply -f nginx.yaml
pod/nginx created
root@controlplane:~# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 0/1 Pending 0 6s <none> <none> <none> <none>
root@controlplane:~#
root@controlplane:~# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
etcd-0 Healthy {"health":"true"}
root@controlplane:~# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-74ff55c5b-4rc9j 1/1 Running 0 23m
coredns-74ff55c5b-62vqs 1/1 Running 0 23m
etcd-controlplane 1/1 Running 0 23m
kube-apiserver-controlplane 1/1 Running 0 23m
kube-controller-manager-controlplane 1/1 Running 0 23m
kube-flannel-ds-7m6ng 1/1 Running 0 22m
kube-flannel-ds-rmh2l 1/1 Running 0 23m
kube-proxy-4fpwv 1/1 Running 0 23m
kube-proxy-wccgr 1/1 Running 0 22m
root@controlplane:~#
root@controlplane:~# kubectl get pods --all-namespaces | grep -i "scheduler"
root@controlplane:~# kubectl get pods --all-namespaces | grep -i "kube-"
kube-system coredns-74ff55c5b-4rc9j 1/1 Running 0 29m
kube-system coredns-74ff55c5b-62vqs 1/1 Running 0 29m
kube-system etcd-controlplane 1/1 Running 0 29m
kube-system kube-apiserver-controlplane 1/1 Running 0 29m
kube-system kube-controller-manager-controlplane 1/1 Running 0 29m
kube-system kube-flannel-ds-7m6ng 1/1 Running 0 29m
kube-system kube-flannel-ds-rmh2l 1/1 Running 0 29m
kube-system kube-proxy-4fpwv 1/1 Running 0 29m
kube-system kube-proxy-wccgr 1/1 Running 0 29m
root@controlplane:~#
"So there is no scheduler running"
root@controlplane:~# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
controlplane Ready control-plane,master 3m11s v1.20.0 10.60.136.3 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
node01 Ready <none> 2m10s v1.20.0 10.60.136.6 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
root@controlplane:~#
Manually schedule the pod on node01
root@controlplane:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane,master 35m v1.20.0
node01 Ready <none> 34m v1.20.0
root@controlplane:~# vim nginx.yaml
root@controlplane:~# cat nginx.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
nodeName: node01
containers:
- image: nginx
name: nginx
root@controlplane:~#
root@controlplane:~# kubectl apply -f nginx.yaml
pod/nginx created
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 17s
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 67s
root@controlplane:~#
root@controlplane:~# vim nginx.yaml
root@controlplane:~# cat nginx.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
nodeName: controlplane
containers:
- image: nginx
name: nginx
root@controlplane:~#
root@controlplane:~# kubectl apply -f nginx.yaml
pod/nginx created
root@controlplane:~# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 0/1 ContainerCreating 0 10s <none> controlplane <none> <none>
root@controlplane:~# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 3m3s 10.244.0.4 controlplane <none> <none>
root@controlplane:~#
LabelsSelectors
Labels and selectors in the kubernetes
app=App1
function=Front-end
app=App2
function=Back-end
app=App3
function=Database
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl run nginx --image=nginx --port=80 -l app=App1,function=Front-end --dry-run=client -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: App1
function: Front-end
name: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_labales.yaml
pod/nginx created
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get pod nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 11s 172.17.0.2 minikube <none> <none>
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl describe pod nginx
Name: nginx
Namespace: default
Priority: 0
Node: minikube/192.168.49.2
Start Time: Tue, 14 Sep 2021 19:50:13 +0800
Labels: app=App1
function=Front-end
Annotations: <none>
Status: Running
IP: 172.17.0.2
IPs:
IP: 172.17.0.2
Containers:
nginx:
Container ID: docker://acd72109ba5fd062466487124e89cea38d84e94c48e5f21bc35015ddbcc811ef
Image: nginx
Image ID: docker-pullable://nginx@sha256:853b221d3341add7aaadf5f81dd088ea943ab9c918766e295321294b035f3f3e
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 14 Sep 2021 19:50:19 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-lxpp9 (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-lxpp9:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-lxpp9
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 27s default-scheduler Successfully assigned default/nginx to minikube
Normal Pulling 26s kubelet Pulling image "nginx"
Normal Pulled 21s kubelet Successfully pulled image "nginx" in 5.4914337s
Normal Created 21s kubelet Created container nginx
Normal Started 21s kubelet Started container nginx
bharathdasaraju@MacBook-Pro 2.Scheduling %
how to select specific pods
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get pods --selector app=App1
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 2m19s
bharathdasaraju@MacBook-Pro 2.Scheduling %
ReplicaSet labels are two types
1. one is replicaset labels.
2. another one is pod labels.
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get rs
NAME DESIRED CURRENT READY AGE
sample-web 3 3 3 4m6s
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl describe rs sample-web
Name: sample-web
Namespace: default
Selector: app=App1
Labels: app=App1
function=Front-end
Annotations: <none>
Replicas: 3 current / 3 desired
Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
Labels: app=App1
function=Front-end
Containers:
nginx-web:
Image: nginx
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal SuccessfulCreate 4m20s replicaset-controller Created pod: sample-web-sdfsx
Normal SuccessfulCreate 4m20s replicaset-controller Created pod: sample-web-x648g
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get rs sample-web -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
sample-web 3 3 3 4m40s nginx-web nginx app=App1
bharathdasaraju@MacBook-Pro 2.Scheduling %
labelsselectorslabs
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get pods --selector app=App1
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 108m
sample-web-sdfsx 1/1 Running 0 14m
sample-web-x648g 1/1 Running 0 14m
bharathdasaraju@MacBook-Pro 2.Scheduling %
We have deployed a number of PODs. They are labelled with tier, env and bu. How many PODs exist in the dev environment?
root@controlplane:~# kubectl get pods --selector env=dev
NAME READY STATUS RESTARTS AGE
app-1-7pdj8 1/1 Running 0 7m26s
app-1-848jw 1/1 Running 0 7m26s
app-1-9qzgq 1/1 Running 0 7m26s
db-1-4jl5f 1/1 Running 0 7m26s
db-1-j5xpx 1/1 Running 0 7m26s
db-1-ltqng 1/1 Running 0 7m26s
db-1-s2w87 1/1 Running 0 7m26s
root@controlplane:~#
root@controlplane:~# kubectl get pods --selector bu=finance
NAME READY STATUS RESTARTS AGE
app-1-7pdj8 1/1 Running 0 8m12s
app-1-848jw 1/1 Running 0 8m12s
app-1-9qzgq 1/1 Running 0 8m12s
app-1-zzxdf 1/1 Running 0 8m11s
auth 1/1 Running 0 8m12s
db-2-kwxn9 1/1 Running 0 8m12s
root@controlplane:~#
root@controlplane:~# kubectl get all --selector env=prod
NAME READY STATUS RESTARTS AGE
pod/app-1-zzxdf 1/1 Running 0 9m
pod/app-2-whmqr 1/1 Running 0 9m1s
pod/auth 1/1 Running 0 9m1s
pod/db-2-kwxn9 1/1 Running 0 9m1s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/app-1 ClusterIP 10.109.116.5 <none> 3306/TCP 9m
NAME DESIRED CURRENT READY AGE
replicaset.apps/app-2 1 1 1 9m1s
replicaset.apps/db-2 1 1 1 9m1s
root@controlplane:~#
root@controlplane:~# kubectl get all --selector env=prod,bu=finance,tier=frontend
NAME READY STATUS RESTARTS AGE
pod/app-1-zzxdf 1/1 Running 0 12m
root@controlplane:~#
taintstolerations
taints on nodes
use tolerations on pods to schedule a pod on that tainted node
Lets say we have 3 nodes node1,node2,node3
taint a node:
------------------>
kubectl taint nodes node1 key=value:taint-effect
kubectl taint nodes node1 app=blue:Noschedule -- No pods gets scheduled here unless they tolerates the taint
kubectl taint nodes node1 app=blue:PreferNoSchedule -- it doesnot guaranteed
kubectl taint nodes node1 app=blue:NoExecute -- New pods will not schedule on the node and existing pods on the node if any will get evicted.
in the node:
-------------------->
kubectl taint nodes node1 app=blue:Noschedule
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl run nginx --image=nginx --dry-run=client -o yaml > 8.demo_taint_tolerations.yaml
bharathdasaraju@MacBook-Pro 2.Scheduling %
in the pod specification file:
---------------------------------->
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
tolerations:
- key: "app"
operator: "Equal"
value: "blue"
effect: "NoSchedule"
taintstolerationslabs
By default k8s master has a taint to not to schedule any pods there
root@controlplane:~# kubectl describe node controlplane | grep -i "taint"
Taints: node-role.kubernetes.io/master:NoSchedule
root@controlplane:~#
root@controlplane:~# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
controlplane Ready control-plane,master 15m v1.20.0 10.63.25.9 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
node01 Ready <none> 14m v1.20.0 10.63.25.12 <none> Ubuntu 18.04.5 LTS 5.4.0-1052-gcp docker://19.3.0
root@controlplane:~#
No tanits on worker node:
------------------------------------------------------->
root@controlplane:~# kubectl describe node node01 | grep -i taint
Taints: <none>
root@controlplane:~#
root@controlplane:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane,master 17m v1.20.0
node01 Ready <none> 16m v1.20.0
root@controlplane:~#
root@controlplane:~# kubectl taint node node01 spray=mortein:NoSchedule
node/node01 tainted
root@controlplane:~# kubectl describe node node01 | grep -i "taint"
Taints: spray=mortein:NoSchedule
root@controlplane:~#
root@controlplane:~# kubectl run mosquito --image=nginx
pod/mosquito created
root@controlplane:~#
root@controlplane:~# kubectl get pod mosquito -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mosquito 0/1 Pending 0 35s <none> <none> <none> <none>
root@controlplane:~#
root@controlplane:~# kubectl run bee --image=nginx --dry-run=client -o yaml > bee_pod_tolerations.yaml
root@controlplane:~#
root@controlplane:~# vim bee_pod_tolerations.yaml
root@controlplane:~# kubectl apply -f bee_pod_tolerations.yaml
pod/bee created
root@controlplane:~#
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
bee 1/1 Running 0 5m18s
mosquito 0/1 Pending 0 17m
root@controlplane:~#
Untaint the node
root@controlplane:~# kubectl taint node controlplane node-role.kubernetes.io/master:NoSchedule-
node/controlplane untainted
root@controlplane:~#
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
bee 1/1 Running 0 13m
mosquito 1/1 Running 0 25m
root@controlplane:~#
How to see documentation when we have errors in pod definition or Replicaset definition or Deployment definition files
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl explain pods --recursive | grep -iwA5 tolerations
tolerations <[]Object>
effect <string>
key <string>
operator <string>
tolerationSeconds <integer>
value <string>
bharathdasaraju@MacBook-Pro 2.Scheduling %
NodeSelectors
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl run nginx --image=nginx --dry-run=client -o yaml > pod-definition-nodeSelector.yaml
bharathdasaraju@MacBook-Pro 2.Scheduling %
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
nodeSelector:
size: Large # This is the node Label
"How to Label a node"
kubectl label nodes node01 size=Large
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
minikube Ready control-plane,master 11d v1.20.2 192.168.49.2 <none> Ubuntu 20.04.2 LTS 5.4.39-linuxkit docker://20.10.6
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl label nodes minikube size=Large
node/minikube labeled
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl describe node minikube | grep -iB20 "size=Large"
Name: minikube
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=minikube
kubernetes.io/os=linux
minikube.k8s.io/commit=c61663e942ec43b20e8e70839dcca52e44cd85ae
minikube.k8s.io/name=minikube
minikube.k8s.io/updated_at=2021_09_04T14_52_27_0700
minikube.k8s.io/version=v1.20.0
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
size=Large
bharathdasaraju@MacBook-Pro 2.Scheduling %
nodeaffinity
We can not provide advanced options like Large (or) Medium (or) Not Small like that with NodeSeletors
So Node Affinity gives us the advanced capability to place a pod on particular Node with conditions.
to get the exact syntacx run kubectl explain pods --recursive command like below.
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl explain pods --recursive | grep -iwA65 "affinity"
affinity <Object>
nodeAffinity <Object>
preferredDuringSchedulingIgnoredDuringExecution <[]Object>
preference <Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchFields <[]Object>
key <string>
operator <string>
values <[]string>
weight <integer>
requiredDuringSchedulingIgnoredDuringExecution <Object>
nodeSelectorTerms <[]Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchFields <[]Object>
key <string>
operator <string>
values <[]string>
podAffinity <Object>
preferredDuringSchedulingIgnoredDuringExecution <[]Object>
podAffinityTerm <Object>
labelSelector <Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchLabels <map[string]string>
namespaces <[]string>
topologyKey <string>
weight <integer>
requiredDuringSchedulingIgnoredDuringExecution <[]Object>
labelSelector <Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchLabels <map[string]string>
namespaces <[]string>
topologyKey <string>
podAntiAffinity <Object>
preferredDuringSchedulingIgnoredDuringExecution <[]Object>
podAffinityTerm <Object>
labelSelector <Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchLabels <map[string]string>
namespaces <[]string>
topologyKey <string>
weight <integer>
requiredDuringSchedulingIgnoredDuringExecution <[]Object>
labelSelector <Object>
matchExpressions <[]Object>
key <string>
operator <string>
values <[]string>
matchLabels <map[string]string>
namespaces <[]string>
topologyKey <string>
automountServiceAccountToken <boolean>
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl describe node minikube | grep -i large
size=Large
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get pods
No resources found in default namespace.
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_node_affinity.yaml
pod/nginx created
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 16s 172.17.0.2 minikube <none> <none>
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_node_affinity_NotIn.yaml
pod/nginx2 created
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_node_affinity_Exists.yaml
pod/nginx3 created
bharathdasaraju@MacBook-Pro 2.Scheduling %
nodeaffinitytypes
Available:
requiredDuringSchedulingIgnoredDuringExecution
preferredDuringSchedulingIgnoredDuringExecution
Planned:
requiredDuringSchedulingRequiredDuringExecution
preferredDuringSchedulingRequiredDuringExecution
nodeaffinitylabs
root@controlplane:~# kubectl describe node node01 | grep -iwC10 "Labels:"
Name: node01
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=node01
kubernetes.io/os=linux
Annotations: flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"9e:a5:53:b7:a3:98"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.49.14.3
kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
root@controlplane:~#
root@controlplane:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane,master 21m v1.20.0
node01 Ready <none> 20m v1.20.0
root@controlplane:~#
root@controlplane:~# kubectl label node node01 color=blue
node/node01 labeled
root@controlplane:~# kubectl describe node node01 | grep -iwC10 "Labels:"
Name: node01
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
color=blue
kubernetes.io/arch=amd64
kubernetes.io/hostname=node01
kubernetes.io/os=linux
Annotations: flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"9e:a5:53:b7:a3:98"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.49.14.3
kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
root@controlplane:~#
root@controlplane:~# kubectl create deploy blue --image=nginx --replicas=3 --dry-run=client -o yaml > blue_deployment.yaml
root@controlplane:~# kubectl apply -f blue_deployment.yaml
deployment.apps/blue created
root@controlplane:~#
root@controlplane:~# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
blue-7bb46df96d-7n2tq 1/1 Running 0 43s 10.244.1.4 node01 <none> <none>
blue-7bb46df96d-qd5sv 1/1 Running 0 43s 10.244.1.2 node01 <none> <none>
blue-7bb46df96d-wvvb4 1/1 Running 0 43s 10.244.1.3 node01 <none> <none>
root@controlplane:~#
root@controlplane:~# kubectl describe node node01 | grep -i "taint"
Taints: <none>
root@controlplane:~# kubectl describe node controlplane | grep -i "taint"
Taints: <none>
root@controlplane:~#
Set Node Affinity to the deployment to place the pods on node01 only.
Name: blue
Replicas: 3
Image: nginx
NodeAffinity: requiredDuringSchedulingIgnoredDuringExecution
Key: color
values: blue
root@controlplane:~# vim blue_deployment.yaml
root@controlplane:~# kubectl apply -f blue_deployment.yaml
deployment.apps/blue configured
root@controlplane:~#
Use the label - node-role.kubernetes.io/master - set on the controlplane node.
root@controlplane:~# kubectl describe node controlplane | grep -iwC10 "Labels:"
Name: controlplane
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=controlplane
kubernetes.io/os=linux
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
Annotations: flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"da:0c:0a:a9:ce:3f"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.49.14.12
root@controlplane:~# cp blue_deployment.yaml red_deployment.yaml
root@controlplane:~# vim red_deployment.yaml
root@controlplane:~# vim red_deployment.yaml
root@controlplane:~# kubectl apply -f red_deployment.yaml
deployment.apps/red created
root@controlplane:~# kubectl get pods -o wide | grep -i red
red-5cbd45ccb6-r2ph2 0/1 ContainerCreating 0 14s <none> controlplane <none> <none>
red-5cbd45ccb6-zmgkc 0/1 ContainerCreating 0 14s <none> controlplane <none> <none>
root@controlplane:~# kubectl get pods -o wide | grep -i red
red-5cbd45ccb6-r2ph2 1/1 Running 0 32s 10.244.0.4 controlplane <none> <none>
red-5cbd45ccb6-zmgkc 1/1 Running 0 32s 10.244.0.5 controlplane <none> <none>
root@controlplane:~#
resourcelimits
Resource Requests:
minimum memory,cpu resources utilized by container can be set by
requests:
memory: "1Gi"
cpu: 1
1G --> 1 Giga Byte(1000 Mega bytes)
1Gi --> 1 Gibi byte(1024 Mega bytes)
We can also set maximum resources utilized by container with
limits:
memory: "2Gi"
cpu: 2
if container uses more than specified limits for cpu and memory pod will get terminated
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_resource_limits.yaml
pod/nginx18 created
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f pod_with_resource_limits.yaml
pod/nginx created
bharathdasaraju@MacBook-Pro 2.Scheduling %
We can also set at namespace level to set default requests and limits for both cpu and memory by creating the LimitRange in the namespace
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
spec:
limits:
- default:
memory: 512Mi
defaultRequest:
memory: 256Mi
type: Container
apiVersion: v1
kind: LimitRange
metadata:
name: cpu-limit-range
spec:
limits:
- default:
cpu: 1
defaultRequest:
cpu: 0.5
type: Container
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl api-resources | grep -i "limit"
limitranges limits true LimitRange
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get limits --all-namespaces
No resources found
bharathdasaraju@MacBook-Pro 2.Scheduling %
resourcelimitslabs
root@controlplane:~# kubectl get deploy
No resources found in default namespace.
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
rabbit 0/1 CrashLoopBackOff 2 56s
root@controlplane:~#
root@controlplane:~# kubectl get pod rabbit -o yaml | grep -iwA4 " resources:"
resources:
limits:
cpu: "2"
requests:
cpu: "1"
root@controlplane:~#
root@controlplane:~# kubectl describe pod elephant | grep -iA5 state
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: OOMKilled
Exit Code: 1
Started: Sat, 18 Sep 2021 01:18:37 +0000
Finished: Sat, 18 Sep 2021 01:18:37 +0000
Ready: False
root@controlplane:~#
root@controlplane:~# kubectl edit pod elephant
error: pods "elephant" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-ffi5t.yaml"
error: Edit cancelled, no valid changes were saved.
root@controlplane:~# kubectl delete pod elephant
pod "elephant" deleted
root@controlplane:~# kubectl apply -f /tmp/kubectl-edit-ffi5t.yaml
pod/elephant created
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
elephant 1/1 Running 0 31s
root@controlplane:~#
daemonsets
daemonset runs one copy of pod on each node in the cluster
Whenever a new node joins a cluster a replica of pod automatically added to that node.
Examples like
1.deploying a monitoring agent and
2.log collector in each node
Kube-proxy an worker node component deployed as a daemonset :)
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get ds --all-namespaces -o wide
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
kube-system kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 13d kube-proxy k8s.gcr.io/kube-proxy:v1.20.2 k8s-app=kube-proxy
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get all --all-namespaces -o wide | grep -i "daemonset"
kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 13d kube-proxy k8s.gcr.io/kube-proxy:v1.20.2 k8s-app=kube-proxy
bharathdasaraju@MacBook-Pro 2.Scheduling %
how to write daemonset specification file
first create a deployment --dry-run=client and get the replicaset yaml file and edit kind to daemonset like below
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl create deployment webapp --image=nginx --dry-run=client -o yaml > web_app_deployment.yaml
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f web_app_deployment.yaml
deployment.apps/webapp created
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get all
NAME READY STATUS RESTARTS AGE
pod/webapp-5654c984c-qlz76 1/1 Running 0 63s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14d
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/webapp 1/1 1 1 63s
NAME DESIRED CURRENT READY AGE
replicaset.apps/webapp-5654c984c 1 1 1 63s
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get rs webapp-5654c984c -o yaml > web_app_replicaset.yaml
bharathdasaraju@MacBook-Pro 2.Scheduling % cp web_app_replicaset.yaml web_app_daemonset.yaml
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl delete deploy webapp
deployment.apps "webapp" deleted
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl apply -f web_app_daemonset.yaml
daemonset.apps/webapp-daemon created
bharathdasaraju@MacBook-Pro 2.Scheduling %
bharathdasaraju@MacBook-Pro 2.Scheduling % kubectl get ds webapp-daemon -o wide
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
webapp-daemon 1 1 1 1 1 <none> 3m3s nginx nginx app=webapp-agent
bharathdasaraju@MacBook-Pro 2.Scheduling %
daemonsetlabs
root@controlplane:~# kubectl get ds --all-namespaces
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system kube-flannel-ds 1 1 1 1 1 <none> 15m
kube-system kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 15m
root@controlplane:~#
Deploy a DaemonSet for FluentD Logging.
Name: elasticsearch
Namespace: kube-system
Image: k8s.gcr.io/fluentd-elasticsearch:1.20
root@controlplane:~# kubectl create deployment elasticsearch --image=k8s.gcr.io/fluentd-elasticsearch:1.20 -n kube-system --dry-run=client -o yaml > fluend_deploy.yaml
root@controlplane:~#
root@controlplane:~# vim fluend_deploy.yaml
root@controlplane:~# cat fluend_deploy.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: kube-system
spec:
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
containers:
- image: k8s.gcr.io/fluentd-elasticsearch:1.20
name: fluentd-elasticsearch
root@controlplane:~#
root@controlplane:~# kubectl apply -f fluend_deploy.yaml
daemonset.apps/elasticsearch created
root@controlplane:~#
root@controlplane:~# kubectl get ds -o wide -n kube-system
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
elasticsearch 1 1 0 1 0 <none> 17s fluentd-elasticsearch k8s.gcr.io/fluentd-elasticsearch:1.20 app=elasticsearch
kube-flannel-ds 1 1 1 1 1 <none> 22m kube-flannel quay.io/coreos/flannel:v0.13.1-rc1 app=flannel
kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 22m kube-proxy k8s.gcr.io/kube-proxy:v1.20.0 k8s-app=kube-proxy
root@controlplane:~#
staticpods
static pods we can create without any k8s master
In standalone node(with out any master) in kubelet config we can specify a statuc path to look for pod specification files
We can only create pods with this static path we can not create replicaset (or) deployments (or) daemonsets etc...
kubelet service(kubelet.service) config specify like
--pod-manifest-path=/etc/kubernetes/manifests
another way in kubelet.service file specify like
--config=kubeconfig.yaml
in kubeconfig.yaml file we can specify the pod specification files path like below
staticPodPath: /etc/kubernetes/manifests
staticpodlabs
what are the pods created as static pods -- static pods normally ends with name of the node.
root@controlplane:~# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-74ff55c5b-d74qv 1/1 Running 0 30m
kube-system coredns-74ff55c5b-gw2ht 1/1 Running 0 30m
kube-system etcd-controlplane 1/1 Running 0 30m
kube-system kube-apiserver-controlplane 1/1 Running 0 30m
kube-system kube-controller-manager-controlplane 1/1 Running 0 30m
kube-system kube-flannel-ds-572ns 1/1 Running 0 30m
kube-system kube-flannel-ds-lbq9j 1/1 Running 0 30m
kube-system kube-proxy-czxw2 1/1 Running 0 30m
kube-system kube-proxy-xghtc 1/1 Running 0 30m
kube-system kube-scheduler-controlplane 1/1 Running 0 30m
root@controlplane:~# kubectl get pods --all-namespaces | awk '{print $2}' | grep "controlplane$"
etcd-controlplane
kube-apiserver-controlplane
kube-controller-manager-controlplane
kube-scheduler-controlplane
root@controlplane:~#
On which nodes are the static pods created currently?
root@controlplane:~# kubectl get pods -o wide --all-namespaces | grep -i controlplane
kube-system etcd-controlplane 1/1 Running 0 33m 10.55.152.6 controlplane <none> <none>
kube-system kube-apiserver-controlplane 1/1 Running 0 33m 10.55.152.6 controlplane <none> <none>
kube-system kube-controller-manager-controlplane 1/1 Running 0 33m 10.55.152.6 controlplane <none> <none>
kube-system kube-scheduler-controlplane 1/1 Running 0 33m 10.55.152.6 controlplane <none> <none>
root@controlplane:~#
What is the path of the directory holding the static pod definition files?
root@controlplane:~# ps auxwww | grep -i "kubelet "
root 4938 0.0 0.0 3929104 106740 ? Ssl 00:10 2:11 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2
root 25099 0.0 0.0 11468 1120 pts/0 R+ 00:48 0:00 grep --color=auto -i kubelet
root@controlplane:~#
root@controlplane:~# cat /var/lib/kubelet/config.yaml | grep -i static
staticPodPath: /etc/kubernetes/manifests
root@controlplane:~#
root@controlplane:~# cd /etc/kubernetes/manifests
root@controlplane:/etc/kubernetes/manifests# ls -rtlh
total 16K
-rw------- 1 root root 3.8K Sep 20 00:09 kube-apiserver.yaml
-rw------- 1 root root 1.4K Sep 20 00:09 kube-scheduler.yaml
-rw------- 1 root root 3.3K Sep 20 00:09 kube-controller-manager.yaml
-rw------- 1 root root 2.2K Sep 20 00:09 etcd.yaml
root@controlplane:/etc/kubernetes/manifests#
root@controlplane:/etc/kubernetes/manifests# cat kube-apiserver.yaml | grep -i image
image: k8s.gcr.io/kube-apiserver:v1.20.0
imagePullPolicy: IfNotPresent
root@controlplane:/etc/kubernetes/manifests#
Create a static pod named static-busybox that uses the busybox image and the command sleep 1000
root@controlplane:/etc/kubernetes/manifests# kubectl run --restart=Never --image=busybox static-busybox --dry-run=client -o yaml --command -- sleep 1000 > static-busybox.yaml
root@controlplane:/etc/kubernetes/manifests#
root@controlplane:/etc/kubernetes/manifests# kubectl run --restart=Never --image=busybox:1.28.4 static-busybox --dry-run=client -o yaml --command -- sleep 1000 > static-busybox.yaml
root@controlplane:/etc/kubernetes/manifests#
root@controlplane:~# ssh node01
Last login: Mon Sep 20 00:59:11 2021 from 10.55.152.4
root@node01:~# ps auxwww | grep -i "kubelet "
root 23673 0.0 0.0 3632640 97320 ? Ssl 00:58 0:09 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.2
root 26435 0.0 0.0 11468 1016 pts/0 S+ 01:01 0:00 grep --color=auto -i kubelet
root@node01:~# cat /var/lib/kubelet/config.yaml | grep -i static
staticPodPath: /etc/just-to-mess-with-you
root@node01:~# cd /etc/just-to-mess-with-you
root@node01:/etc/just-to-mess-with-you# ls -rtlh
total 4.0K
-rw-r--r-- 1 root root 301 Sep 20 00:58 greenbox.yaml
root@node01:/etc/just-to-mess-with-you#
multipleschedulers
when you have multiple schedulers on different master nodes,
you can use --leader-elect=true option in one of the master node with --lock-object-name option.
In the pod-specification file we can use new filed called schedulerName and specify the custom scheduler name.
multipleschedulerlabs
root@controlplane:~# kubectl describe pod kube-scheduler-controlplane -n kube-system | grep -i image Image: k8s.gcr.io/kube-scheduler:v1.20.0
Image ID: docker-pullable://k8s.gcr.io/kube-scheduler@sha256:beaa710325047fa9c867eff4ab9af38d9c2acec05ac5b416c708c304f76bdbef
Normal Pulled 9m29s kubelet Container image "k8s.gcr.io/kube-scheduler:v1.20.0" already present on machine
root@controlplane:~#
Deploy an additional scheduler to the cluster following the given specification.
Use the manifest file used by kubeadm tool. Use a different port than the one used by the current one.
Namespace: kube-system
Name: my-scheduler
Status: Running
Custom Scheduler Name
root@controlplane:/etc/kubernetes/manifests# ls -rtlh
total 16K
-rw------- 1 root root 3.8K Sep 20 01:13 kube-apiserver.yaml
-rw------- 1 root root 1.4K Sep 20 01:13 kube-scheduler.yaml
-rw------- 1 root root 3.3K Sep 20 01:13 kube-controller-manager.yaml
-rw------- 1 root root 2.2K Sep 20 01:13 etcd.yaml
root@controlplane:/etc/kubernetes/manifests# cp kube-scheduler.yaml kube-scheduler-custom.yaml
root@controlplane:/etc/kubernetes/manifests#
Copy kube-scheduler.yaml from the directory /etc/kubernetes/manifests/ to any other location and then change the name to my-scheduler.
Add or update the following command arguments in the YAML file:
- --leader-elect=false
- --port=10282
- --scheduler-name=my-scheduler
- --secure-port=0
Here, we are setting leader-elect to false for our new custom scheduler called my-scheduler.
We are also making use of a different port 10282 which is not currently in use in the controlplane.
The default scheduler uses secure-port on port 10259 to serve HTTPS with authentication and authorization.
This is not needed for our custom scheduler, so we can disable HTTPS by setting the value of secure-port to 0.
Finally, because we have set secure-port to 0, replace HTTPS with HTTP and use the correct ports under liveness and startup probes.
default file as below
---------------------------->
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=true
- --port=0
change as below:
---------------------------------->
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=false
- --port=10282
- --scheduler-name=my-scheduler
- --secure-port=0
image: k8s.gcr.io/kube-scheduler:v1.20.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10282
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-scheduler
Custom-Scheduler static pod specification file:
-------------------------------------------------------->
apiVersion: v1
kind: Pod
metadata:
labels:
component: my-scheduler
tier: control-plane
name: my-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=false
- --port=10282
- --scheduler-name=my-scheduler
- --secure-port=0
image: k8s.gcr.io/kube-scheduler:v1.19.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10282
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /healthz
port: 10282
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
priorityClassName: system-node-critical
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
status: {}
root@controlplane:/etc/kubernetes/manifests# kubectl get pods -n kube-system | grep -i scheduler
kube-scheduler-controlplane 1/1 Running 2 2m26s
my-scheduler-controlplane 1/1 Running 0 2m39s
root@controlplane:/etc/kubernetes/manifests#
A POD definition file is given. Use it to create a POD with the new custom scheduler.
File is located at /root/nginx-pod.yaml
root@controlplane:~# cat nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
schedulerName: my-scheduler
containers:
- image: nginx
name: nginx
root@controlplane:~#
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 13s
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 17s
root@controlplane:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 29s
root@controlplane:~#
pods/deployments/daemonsets with labels, nodeSelectors, taints/tolerations, affinity/antiaffinity rules
podlabels
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: App1
function: Front-end
name: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
replicasetlabels
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: sample-web
labels: # ReplicaSet labels
app: App1 # ReplicaSet labels
function: Front-end # ReplicaSet labels
spec:
replicas: 3
selector:
matchLabels:
app: App1
template:
metadata:
labels: # Pod labels
app: App1 # Pod labels
function: Front-end # Pod labels
spec:
containers:
- name: nginx-web
image: nginx
replicasetlabels2
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: replicaset-1
labels:
tier: nginx
spec:
replicas: 2
selector:
matchLabels:
tier: frontend
template:
metadata:
labels:
tier: frontend
spec:
containers:
- name: nginx
image: nginx
taintstolerations
# kubectl taint nodes node1 app=blue:NoSchedule
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
tolerations:
- key: "app"
operator: "Equal"
value: "blue"
effect: "NoSchedule"
beepodtolerations
apiVersion: v1
kind: Pod
metadata:
labels:
run: bee
name: bee
spec:
containers:
- image: nginx
name: bee
tolerations:
- key: "spray"
operator: "Equal"
value: "mortein"
effect: "NoSchedule"
podnodeselector
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
nodeSelector:
size: Large # This is the node label
podnodeaffinity_In
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx1
name: nginx1
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: size
operator: In
values:
- Large
- Medium
podnodeaffinity_NotIn
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx2
name: nginx2
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: size
operator: NotIn
values:
- Small
- Big
podnodeaffinity_Exists
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx3
name: nginx3
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: size
operator: Exists
deploymentnodeaffinity
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: red
name: red
spec:
replicas: 2
selector:
matchLabels:
app: red
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: red
spec:
containers:
- image: nginx
name: nginx
resources: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
deploymentnodeaffinity2
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: blue
name: blue
spec:
replicas: 3
selector:
matchLabels:
app: blue
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: blue
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: color
operator: In
values:
- blue
status: {}
podresources
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
resources:
requests:
memory: "1Gi"
cpu: 1
limits:
memory: "2Gi"
cpu: 2
webappdeployment
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: webapp
name: webapp
spec:
replicas: 1
selector:
matchLabels:
app: webapp
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: webapp
spec:
containers:
- image: nginx
name: nginx
resources: {}
status: {}
webappreplicaset
apiVersion: apps/v1
kind: ReplicaSet
metadata:
labels:
app: webapp
name: webapp
spec:
replicas: 1
selector:
matchLabels:
app: webapp
template:
metadata:
labels:
app: webapp
spec:
containers:
- image: nginx
name: nginx
webappdaemonset
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: webapp-daemon
spec:
selector:
matchLabels:
app: webapp-agent
template:
metadata:
labels:
app: webapp-agent
spec:
containers:
- image: nginx
name: nginx
staticpod_busybox
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: static-busybox
name: static-busybox
spec:
containers:
- command:
- sleep
- "1000"
image: busybox
name: static-busybox
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}
custom_scheduler
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
schedulerName: my-scheduler
containers:
- image: nginx
name: nginx