kodekloud labs
demolab1
# demo_lab1.sh
root@controlplane:~# kubectl get deploy -n admin2406
NAME READY UP-TO-DATE AVAILABLE AGE
deploy1 1/1 1 1 22m
deploy2 1/1 1 1 22m
deploy3 1/1 1 1 22m
deploy4 1/1 1 1 22m
deploy5 1/1 1 1 22m
root@controlplane:~#
kubectl -n admin2406 get deployment -o custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[].image
kubectl get deploy -n admin2406 -o custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[].image
root@controlplane:~# kubectl get deploy -n admin2406 -o custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace --sort-by=.metadata.name
DEPLOYMENT CONTAINER_IMAGE READY_REPLICAS NAMESPACE
deploy1 nginx 1 admin2406
deploy2 nginx:alpine 1 admin2406
deploy3 nginx:1.16 1 admin2406
deploy4 nginx:1.17 1 admin2406
deploy5 nginx:latest 1 admin2406
root@controlplane:~#
root@controlplane:~# kubectl cluster-info --kubeconfig=/root/CKA/admin.kubeconfig
Kubernetes control plane is running at https://controlplane:6443
KubeDNS is running at https://controlplane:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
root@controlplane:~#
root@controlplane:~# kubectl create deploy nginx-deploy --image=nginx:1.16 --replicas=1
deployment.apps/nginx-deploy created
root@controlplane:~#
root@controlplane:~# kubectl set image deployment/nginx-deploy nginx=nginx:1.17 --record
deployment.apps/nginx-deploy image updated
root@controlplane:~#
A new deployment called alpha-mysql has been deployed in the alpha namespace. However, the pods are not running.
Troubleshoot and fix the issue.
The deployment should make use of the persistent volume alpha-pv to be mounted
at /var/lib/mysql and should use the environment variable MYSQL_ALLOW_EMPTY_PASSWORD=1 to make use of an empty root password.
Important: Do not alter the persistent volume.
root@controlplane:~# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-alpha-pvc
namespace: alpha
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: slow
root@controlplane:~#
export ETCDCTL_API=3
etcdctl snapshot save --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key --endpoints=127.0.0.1:2379 /opt/etcd-backup.db
Create a pod called secret-1401 in the admin1401 namespace using the busybox image.
The container within the pod should be called secret-admin and should sleep for 4800 seconds.
The container should mount a read-only secret volume called secret-volume at the path /etc/secret-volume.
The secret being mounted has already been created for you and is called dotfile-secret.
root@controlplane:~# kubectl get secrets -n admin1401
NAME TYPE DATA AGE
default-token-wz8x2 kubernetes.io/service-account-token 3 49m
dotfile-secret Opaque 1 49m
root@controlplane:~#
root@controlplane:~# kubectl run secret-1401 --image=busybox --dry-run=client -o yaml --command -- sleep 4800
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: secret-1401
name: secret-1401
spec:
containers:
- command:
- sleep
- "4800"
image: busybox
name: secret-1401
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
root@controlplane:~#
root@controlplane:~# kubectl apply -f pod.yaml
pod/secret-1401 created
root@controlplane:~# cat pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: secret-1401
name: secret-1401
namespace: admin1401
spec:
containers:
- command:
- sleep
- "4800"
image: busybox
name: secret-1401
volumeMounts:
- name: secret-volume
readOnly: true
mountPath: "/etc/secret-volume"
volumes:
- name: secret-volume
secret:
secretName: dotfiles-secret
root@controlplane:~#
root@controlplane:~# kubectl get deploy -o custom-columns=DEPLOYMENT:'.metadata.name',CONTAINER_IAMGE:.spec.template.spec.containers[].image,READY_REPLICAS:'.status.readyReplicas',NAMESPACE:'.metadata.namespace' --sort-by '.metadata.name' -n admin2406 > /opt/admin2406_data
root@controlplane:~#
demolab2
root@controlplane:/etc/kubernetes/manifests# kubectl get nodes -o jsonpath='{.items[*].status.nodeInfo}' > /opt/outputs/nodes_os_x43kj56.txt
root@controlplane:/etc/kubernetes/manifests#
root@controlplane:/etc/kubernetes/manifests# kubectl apply -f pv.yaml
persistentvolume/pv-analytics created
root@controlplane:/etc/kubernetes/manifests# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-analytics
spec:
capacity:
storage: 100Mi
accessModes:
- ReadWriteMany
hostPath:
path: /pv/data-analytics
root@controlplane:/etc/kubernetes/manifests#
podsecrets
apiVersion: v1
kind: Pod
metadata:
labels:
run: secret-1401
name: secret-1401
namespace: admin1401
spec:
containers:
- command:
- sleep
args:
- "4800"
image: busybox
name: secret-1401
volumeMounts:
- name: secret-volume
readOnly: true
mountPath: "/etc/secret-volume"
volumes:
- name: secret-volume
secret:
secretName: dotfiles-secret
pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
spec:
volumeName: pv-1
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Mi
csr
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john
spec:
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQU1aV0t3d1EzQzF4UjhxSjFvMWZGWHIzSjlMaFozNEkxVHBUdWJIa1ozUnhrTExTCjQxaEVDZkM5WG5USTBraDlaWHhZRFMwWThTcVBkYWNsWlhXaC9YdEh2WCtCSTJISVlxRTBaekJWTVJYMFdRbmEKS2l1V2ZrbFYrN090OGpvbmZCUGJWcmJZbGNjQmdzeG43bmo5T1AxVXFkSk9zUUQrakU4Q1FVZE1nb1NGK1p5Kwp3NGQvaVJkRHRXeGwvUDVhQ0VPb2FVK1h0SnM2VWFNVDZ4cEdGejhWUHdBY0Rrc1lOS2NUaktqalFwdHBWSGZYCkJSbmlIbHRkNnp1cCtWOVViMHpHVTZHYkNoL01qbjVGUHd4WEdWTGRBYTVMOUpITGlEUk93RElmR1dNdVhjb0gKUUtpMVFWZ2M2R1dXQmhYTDhtK1dwWkNsVUxLS1RDM1R4ai94MUtjQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQ0s3cGJSUC9SVThhR3lHb3p2WVhFTGxxTVd5NmQzb2pKUkdVcG9qV2tTNHJqcWUxdjR6MFRLCklzZ2J5ZHJscHZ2aDF1T1hkRFhKdkVES1VaZmVLT2hacnZkOHBtUlIzaTluY1FHRnJPdFZIUThZS08yNlRjZ0IKQ2gzbVB0cUFTZkErNWxZSDZId0NOdlZ1LzRvUmFDODJ6QVN6SDZJZUd0Uno4UVpMS09pQjU5UUNNd3ZKOUJNQQo1a1BkQkt3R1ZXQXU3WDR2MXRQSEtldFdXZzhGVXJpeXQ5aE82V1BEUWkvZzVxSVdlaS95VVMwb3BJR0xRTVY0CitUZUdscTVhemFZUDNsR3dRZUUyWFVCdG4yTDZBTTBZckUveXpwcDBHVWJDa20yb1N3TlN4bENLRW9oQVE0dWQKMitYZXZOTTg3QzRGU1J2bzFPQ2tiNTE2UXlXYXVQdEcKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
certsigningrequest
---
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john-developer
spec:
signerName: kubernetes.io/kube-apiserver-client
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUt2Um1tQ0h2ZjBrTHNldlF3aWVKSzcrVVdRck04ZGtkdzkyYUJTdG1uUVNhMGFPCjV3c3cwbVZyNkNjcEJFRmVreHk5NUVydkgyTHhqQTNiSHVsTVVub2ZkUU9rbjYra1NNY2o3TzdWYlBld2k2OEIKa3JoM2prRFNuZGFvV1NPWXBKOFg1WUZ5c2ZvNUpxby82YU92czFGcEc3bm5SMG1JYWpySTlNVVFEdTVncGw4bgpjakY0TG4vQ3NEb3o3QXNadEgwcVpwc0dXYVpURTBKOWNrQmswZWhiV2tMeDJUK3pEYzlmaDVIMjZsSE4zbHM4CktiSlRuSnY3WDFsNndCeTN5WUFUSXRNclpUR28wZ2c1QS9uREZ4SXdHcXNlMTdLZDRaa1k3RDJIZ3R4UytkMEMKMTNBeHNVdzQyWVZ6ZzhkYXJzVGRMZzcxQ2NaanRxdS9YSmlyQmxVQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQ1VKTnNMelBKczB2czlGTTVpUzJ0akMyaVYvdXptcmwxTGNUTStsbXpSODNsS09uL0NoMTZlClNLNHplRlFtbGF0c0hCOGZBU2ZhQnRaOUJ2UnVlMUZnbHk1b2VuTk5LaW9FMnc3TUx1a0oyODBWRWFxUjN2SSsKNzRiNnduNkhYclJsYVhaM25VMTFQVTlsT3RBSGxQeDNYVWpCVk5QaGhlUlBmR3p3TTRselZuQW5mNm96bEtxSgpvT3RORStlZ2FYWDdvc3BvZmdWZWVqc25Yd0RjZ05pSFFTbDgzSkljUCtjOVBHMDJtNyt0NmpJU3VoRllTVjZtCmlqblNucHBKZWhFUGxPMkFNcmJzU0VpaFB1N294Wm9iZDFtdWF4bWtVa0NoSzZLeGV0RjVEdWhRMi80NEMvSDIKOWk1bnpMMlRST3RndGRJZjAveUF5N05COHlOY3FPR0QKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
usages:
- digital signature
- key encipherment
- client auth
groups:
- system:authenticated
securitycontext
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: super-user-pod
name: super-user-pod
spec:
containers:
- command:
- sleep
- "4800"
image: busybox:1.28
name: super-user-pod
securityContext:
capabilities:
add: ["SYS_TIME"]
dnsPolicy: ClusterFirst
restartPolicy: Always
demolab3
Take a backup of the etcd cluster and save it to /opt/etcd-backup.db
root@controlplane:~# ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot save /opt/etcd-backup.db
Snapshot saved at /opt/etcd-backup.db
root@controlplane:~#
root@controlplane:~# ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key snapshot status /opt/etcd-backup.db
9939d35e, 1775, 861, 2.4 MB
root@controlplane:~#
Create a Pod called redis-storage with image: redis:alpine with a Volume of type emptyDir that lasts for the life of the Pod.
Pod named 'redis-storage' created
Pod 'redis-storage' uses Volume type of emptyDir
Pod 'redis-storage' uses volumeMount with mountPath = /data/redis
A pod definition file is created at /root/CKA/use-pv.yaml.
Make use of this manifest file and mount the persistent volume called pv-1.
Ensure the pod is running and the PV is bound.
mountPath: /data
persistentVolumeClaim Name: my-pvc
root@controlplane:~# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv-1 10Mi RWO Retain Bound default/my-pvc 12m
root@controlplane:~#
# -------------------------------------------------------------------------------------------
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
spec:
volumeName: pv-1
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Mi
# ---------------------------------------------------------------------------------------------
root@controlplane:~# vim pvc.yaml
root@controlplane:~# kubectl apply -f pvc.yaml
persistentvolumeclaim/my-pvc created
root@controlplane:~# kubectl apply -f /root/CKA/use-pv.yaml
pod/use-pv created
root@controlplane:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
my-pvc Bound pv-1 10Mi RWO 35s
root@controlplane:~# kubectl create deployment nginx-deploy --image=nginx:1.16 --replicas=1
deployment.apps/nginx-deploy created
root@controlplane:~# kubectl set image deploy/nginx-deploy nginx=nginx:1.17 --record
deployment.apps/nginx-deploy image updated
root@controlplane:~#
Create a new user called john. Grant him access to the cluster.
John should have permission to create, list, get, update and delete pods in the development namespace .
The private key exists in the location: /root/CKA/john.key and csr at /root/CKA/john.csr.
Important Note: As of kubernetes 1.19, the CertificateSigningRequest object expects a signerName.
root@controlplane:~/CKA# cat john.csr | base64 -w 0
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQU1aV0t3d1EzQzF4UjhxSjFvMWZGWHIzSjlMaFozNEkxVHBUdWJIa1ozUnhrTExTCjQxaEVDZkM5WG5USTBraDlaWHhZRFMwWThTcVBkYWNsWlhXaC9YdEh2WCtCSTJISVlxRTBaekJWTVJYMFdRbmEKS2l1V2ZrbFYrN090OGpvbmZCUGJWcmJZbGNjQmdzeG43bmo5T1AxVXFkSk9zUUQrakU4Q1FVZE1nb1NGK1p5Kwp3NGQvaVJkRHRXeGwvUDVhQ0VPb2FVK1h0SnM2VWFNVDZ4cEdGejhWUHdBY0Rrc1lOS2NUaktqalFwdHBWSGZYCkJSbmlIbHRkNnp1cCtWOVViMHpHVTZHYkNoL01qbjVGUHd4WEdWTGRBYTVMOUpITGlEUk93RElmR1dNdVhjb0gKUUtpMVFWZ2M2R1dXQmhYTDhtK1dwWkNsVUxLS1RDM1R4ai94MUtjQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQ0s3cGJSUC9SVThhR3lHb3p2WVhFTGxxTVd5NmQzb2pKUkdVcG9qV2tTNHJqcWUxdjR6MFRLCklzZ2J5ZHJscHZ2aDF1T1hkRFhKdkVES1VaZmVLT2hacnZkOHBtUlIzaTluY1FHRnJPdFZIUThZS08yNlRjZ0IKQ2gzbVB0cUFTZkErNWxZSDZId0NOdlZ1LzRvUmFDODJ6QVN6SDZJZUd0Uno4UVpMS09pQjU5UUNNd3ZKOUJNQQo1a1BkQkt3R1ZXQXU3WDR2MXRQSEtldFdXZzhGVXJpeXQ5aE82V1BEUWkvZzVxSVdlaS95VVMwb3BJR0xRTVY0CitUZUdscTVhemFZUDNsR3dRZUUyWFVCdG4yTDZBTTBZckUveXpwcDBHVWJDa20yb1N3TlN4bENLRW9oQVE0dWQKMitYZXZOTTg3QzRGU1J2bzFPQ2tiNTE2UXlXYXVQdEcKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==root@controlplane:~/CKA#
root@controlplane:~/CKA#
root@controlplane:~/CKA# cd -
/root
root@controlplane:~# vim csr.yaml
root@controlplane:~# kubectl apply -f csr.yaml
error: error validating "csr.yaml": error validating data: ValidationError(CertificateSigningRequest.spec): unknown field "expirationSeconds" in io.k8s.api.certificates.v1.CertificateSigningRequestSpec; if you choose to ignore these errors, turn validation off with --validate=false
root@controlplane:~# vim csr.yaml
root@controlplane:~# kubectl apply -f csr.yaml
certificatesigningrequest.certificates.k8s.io/john created
root@controlplane:~#
root@controlplane:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-kmm4g 69m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:yfl3p0 Approved,Issued
john 115s kubernetes.io/kube-apiserver-client kubernetes-admin Pending
root@controlplane:~# kubectl certificate approve john
certificatesigningrequest.certificates.k8s.io/john approved
root@controlplane:~#
root@controlplane:~# kubectl create deployment nginx-deploy --image=nginx:1.16 --dry-run=client -o yaml > deploy.yaml
root@controlplane:~# kubectl apply -f deploy.yaml --record
deployment.apps/nginx-deploy created
root@controlplane:~# kubectl rollout history deployment nginx-deploy
deployment.apps/nginx-deploy
REVISION CHANGE-CAUSE
1 kubectl apply --filename=deploy.yaml --record=true
root@controlplane:~# kubectl set image deployment/nginx-deploy nginx=nginx:1.17 --record
deployment.apps/nginx-deploy image updated
root@controlplane:~# kubectl rollout history deployment nginx-deploydeployment.apps/nginx-deploy
REVISION CHANGE-CAUSE
1 kubectl apply --filename=deploy.yaml --record=true
2 kubectl set image deployment/nginx-deploy nginx=nginx:1.17 --record=true
root@controlplane:~#
root@controlplane:~# kubectl apply -f cert.yaml
certificatesigningrequest.certificates.k8s.io/john-developer created
root@controlplane:~# kubectl certificate approve john-developer
certificatesigningrequest.certificates.k8s.io/john-developer approved
root@controlplane:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-flv2p 61m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane Approved,Issued
csr-z97m2 60m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:ppb9qk Approved,Issued
john 32m kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
john-developer 40s kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
root@controlplane:~#
root@controlplane:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-flv2p 61m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane Approved,Issued
csr-z97m2 60m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:ppb9qk Approved,Issued
john 32m kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
john-developer 40s kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
root@controlplane:~# kubectl create role developer --resource=pods --verb=create,list,get,update,delete --namespace=development
role.rbac.authorization.k8s.io/developer created
root@controlplane:~# kubectl create rolebinding developer-role-binding --role=developer --user=john --namespace=development
rolebinding.rbac.authorization.k8s.io/developer-role-binding created
root@controlplane:~# kubectl auth can-i update pods --as=john --namespace=development
yes
root@controlplane:~# kubectl auth can-i delete pods --as=john -n development
yes
root@controlplane:~#
kubectl run nginx-resolver --image=nginx
kubectl expose pod nginx-resolver --name=nginx-resolver-service --port=80 --target-port=80 --type=ClusterIP
kubectl run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-service
kubectl run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-service > /root/CKA/nginx.svc
Get the IP of the nginx-resolver pod and replace the dots(.) with hyphon(-) which will be used below.
kubectl get pod nginx-resolver -o wide
kubectl run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup <P-O-D-I-P.default.pod> > /root/CKA/nginx.pod
Run Static pod in node01:
----------------------------->
on node create directory "/etc/kubernetes/manifests"
Check if static pod directory is present which is /etc/kubernetes/manifests, if its not present then create it.
root@node01:~# mkdir -p /etc/kubernetes/manifests
Add that complete path to the staticPodPath field in the kubelet config.yaml file.
root@node01:~# vi /var/lib/kubelet/config.yaml
now, move/copy the static.yaml to path /etc/kubernetes/manifests/.
root@node01:~# cp /root/static.yaml /etc/kubernetes/manifests/
Go back to the controlplane node and check the status of static pod:
root@node01:~# exit
logout
root@controlplane:~# kubectl get pods
------------------------------------------------------------------------------------------>
root@controlplane:~# kubectl create deployment nginx-deploy --image=nginx:1.16 --replicas=1 --dry-run=client -o yaml > deploy.yaml
root@controlplane:~# kubectl apply -f deploy.yaml --record
deployment.apps/nginx-deploy created
root@controlplane:~# kubectl rollout history deploy nginx-deploy
deployment.apps/nginx-deploy
REVISION CHANGE-CAUSE
1 kubectl apply --filename=deploy.yaml --record=true
root@controlplane:~#
root@controlplane:~# kubectl create deployment nginx-deploy --image=nginx:1.16 --replicas=1 --dry-run=client -o yaml > deploy.yaml
root@controlplane:~# kubectl apply -f deploy.yaml --record
deployment.apps/nginx-deploy created
root@controlplane:~# kubectl rollout history deploy nginx-deploy
deployment.apps/nginx-deploy
REVISION CHANGE-CAUSE
1 kubectl apply --filename=deploy.yaml --record=true
root@controlplane:~#
root@controlplane:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-99qwr 35m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane Approved,Issued
csr-jdpqv 34m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:riyp9a Approved,Issued
john 6s kubernetes.io/kube-apiserver-client kubernetes-admin Pending
root@controlplane:~# kubectl certificate approve john
certificatesigningrequest.certificates.k8s.io/john approved
root@controlplane:~# kubectl get cstr
error: the server doesnt have a resource type "cstr"
root@controlplane:~# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-99qwr 36m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane Approved,Issued
csr-jdpqv 35m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:riyp9a Approved,Issued
john 42s kubernetes.io/kube-apiserver-client kubernetes-admin Approved,Issued
root@controlplane:~# kubectl create namespace --development
Error: unknown flag: --development
See 'kubectl create namespace --help' for usage.
root@controlplane:~# kubectl create namespace development
Error from server (AlreadyExists): namespaces "development" already exists
root@controlplane:~# kubectl create role dev_role --verb=list,create,get,update,delete --resource=pods -n development
role.rbac.authorization.k8s.io/dev_role created
root@controlplane:~# kubectl create rolebinding dev_binding --role=dev_role --user=john -n development
rolebinding.rbac.authorization.k8s.io/dev_binding created
root@controlplane:~# kubectl auth can-i delete pods --as=john -n development
yes
root@controlplane:~#
root@controlplane:~# kubectl run testdns --image=busybox:1.28 -it --rm --restart=Never -- nslookup nginx-resolver-service
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: nginx-resolver-service
Address 1: 10.106.53.215 nginx-resolver-service.default.svc.cluster.local
pod "testdns" deleted
root@controlplane:~#
root@controlplane:~# kubectl run testdns --image=busybox:1.28 -it --rm --restart=Never -- nslookup nginx-resolver-service > /root/CKA/nginx.svc
root@controlplane:~#
root@controlplane:~# kubectl run testdns --image=busybox:1.28 -it --rm --restart=Never -- nslookup 10-50-192-4.default.pod.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: 10-50-192-4.default.pod.cluster.local
Address 1: 10.50.192.4 10-50-192-4.nginx-resolver-service.default.svc.cluster.local
pod "testdns" deleted
root@controlplane:~#
root@controlplane:~# kubectl run testdns --image=busybox:1.28 -it --rm --restart=Never -- nslookup 10-50-192-4.default.pod.cluster.local > /root/CKA/nginx.pod
root@controlplane:~#
podsa
apiVersion: v1
kind: Pod
metadata:
labels:
run: pvviewer
name: pvviewer
spec:
containers:
- image: redis
name: pvviewer
# Add service account name
serviceAccountName: pvviewer
podnonroot
apiVersion: v1
kind: Pod
metadata:
name: non-root-pod
spec:
securityContext:
runAsUser: 1000
fsGroup: 2000
containers:
- name: non-root-pod
image: redis:alpine
demolab4
root@controlplane:~# kubectl taint nodes node01 env_type=production:NoSchedule
node/node01 tainted
root@controlplane:~#
root@controlplane:~# kubectl create sa pvviewer
serviceaccount/pvviewer created
root@controlplane:~#
root@controlplane:~# kubectl create clusterrole pvviewer-role --resource=persistentvolumes --verb=list
clusterrole.rbac.authorization.k8s.io/pvviewer-role created
root@controlplane:~#
root@controlplane:~# kubectl create clusterrolebinding pvviewer-role-binding --clusterrole=pvviewer-role --serviceaccount=default:pvviewer
clusterrolebinding.rbac.authorization.k8s.io/pvviewer-role-binding created
root@controlplane:~#
root@controlplane:~# kubectl describe clusterrolebinding pvviewer-role-binding
Name: pvviewer-role-binding
Labels: <none>
Annotations: <none>
Role:
Kind: ClusterRole
Name: pvviewer-role
Subjects:
Kind Name Namespace
---- ---- ---------
ServiceAccount pvviewer default
root@controlplane:~#
apiVersion: v1
kind: Pod
metadata:
labels:
run: pvviewer
name: pvviewer
spec:
containers:
- image: redis
name: pvviewer
# Add service account name
serviceAccountName: pvviewer
----------------------------------------------------------------------------->
List the InternalIP of all nodes of the cluster. Save the result to a file /root/CKA/node_ips.
Answer should be in the format: InternalIP of controlplane<space>InternalIP of node01 (in a single line)
root@controlplane:~# kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'
10.31.48.3 10.31.48.6
root@controlplane:~#
root@controlplane:~# kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")]}'
{"address":"10.31.48.3","type":"InternalIP"} {"address":"10.31.48.6","type":"InternalIP"}
root@controlp)].address}':~# kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")
10.31.48.3 10.31.48.6
root@controlplane:~#
root@controlplane:~#
root@controlplane:~# kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}' > /root/CKA/node_ips
root@controlplane:~#
Create a pod called multi-pod with two containers.
Container 1, name: alpha, image: nginx
Container 2: name: beta, image: busybox, command: sleep 4800
Environment Variables:
container 1:
name: alpha
Container 2:
name: beta
apiVersion: v1
kind: Pod
metadata:
name: multi-pod
spec:
containers:
- image: nginx
name: alpha
env:
- name: name
value: alpha
- image: busybox
name: beta
command: ["sleep", "4800"]
env:
- name: name
value: beta
Create a Pod called non-root-pod , image: redis:alpine
runAsUser: 1000
fsGroup: 2000
---
apiVersion: v1
kind: Pod
metadata:
name: non-root-pod
spec:
securityContext:
runAsUser: 1000
fsGroup: 2000
containers:
- name: non-root-pod
image: redis:alpine
We have deployed a new pod called np-test-1 and a service called np-test-service.
Incoming connections to this service are not working. Troubleshoot and fix it.
Create NetworkPolicy, by the name ingress-to-nptest that allows incoming connections to the service over port 80.
Important: Dont delete any current objects deployed.
Solution manifest file to create a network policy ingress-to-nptest as follows:
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-nptest
namespace: default
spec:
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 80
--------------------------------------------------------->
apiVersion: v1
kind: Pod
metadata:
name: multi-pod
spec:
containers:
- image: nginx
name: alpha
env:
- name: name
value: alpha
- image: busybox
name: beta
command: ["sleep", "4800"]
env:
- name: name
value: beta
multicontainer_pods
apiVersion: v1
kind: Pod
metadata:
name: multi-pod
spec:
containers:
- image: nginx
name: alpha
env:
- name: name
value: alpha
- image: busybox
name: beta
command: ["sleep", "4800"]
env:
- name: name
value: beta
ingressnetworkpolicy
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-nptest
namespace: default
spec:
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 80