Docker - Images
docker pull image:tag
docker images / docker image ls
docker build -t name:tag .
docker build -f Dockerfile.prod -t app .
docker tag img:v1 registry/img:v1
docker push registry/img:v1
docker rmi image:tag
docker image prune -a
Docker - Conteneurs
docker run -d --name web nginx
docker run -d -p 8080:80 nginx
docker run -d -v /host:/container nginx
docker run --rm -it ubuntu bash
docker ps / docker ps -a
docker stop/start/restart name
docker rm name / docker rm -f name
docker logs -f name
docker exec -it name bash
docker inspect name
docker stats
Docker - Reseau & Volumes
# Reseaux
docker network ls
docker network create mynet
docker network connect mynet container
# Volumes
docker volume ls
docker volume create myvol
docker run -v myvol:/data nginx
# Nettoyage
docker system prune -a
docker volume prune
Dockerfile
FROM node:18-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
ENV NODE_ENV=production
EXPOSE 3000
USER node
CMD ["node", "server.js"]
# Multi-stage
FROM node:18 AS builder
RUN npm run build
FROM node:18-alpine
COPY --from=builder /app/dist ./
Docker Compose
version: '3.8'
services:
web:
build: .
ports: ["3000:3000"]
environment:
- NODE_ENV=production
depends_on:
db:
condition: service_healthy
networks: [app-net]
db:
image: postgres:15
volumes: [pgdata:/var/lib/postgresql/data]
healthcheck:
test: ["CMD", "pg_isready"]
volumes:
pgdata:
networks:
app-net:
docker compose up -d
docker compose down -v
docker compose logs -f
docker compose exec web bash
Kubernetes - kubectl Basics
# Contexte
kubectl config get-contexts
kubectl config use-context ctx
# Info
kubectl get nodes
kubectl get pods [-n namespace]
kubectl get svc,deploy,rs
kubectl describe pod name
kubectl logs pod [-f]
kubectl exec -it pod -- bash
# Apply
kubectl apply -f file.yaml
kubectl delete -f file.yaml
kubectl delete pod name
K8s - Pod & Deployment
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
spec:
replicas: 3
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- name: web
image: nginx:1.25
ports:
- containerPort: 80
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
K8s - Service & Ingress
# service.yaml
apiVersion: v1
kind: Service
metadata:
name: web-svc
spec:
selector:
app: web
ports:
- port: 80
targetPort: 80
type: ClusterIP # NodePort, LoadBalancer
# ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-ingress
spec:
rules:
- host: app.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-svc
port:
number: 80
K8s - ConfigMap & Secret
# ConfigMap
kubectl create configmap cfg \
--from-literal=KEY=value
kubectl create configmap cfg \
--from-file=config.properties
# Secret
kubectl create secret generic sec \
--from-literal=password=xxx
# Usage dans Pod
envFrom:
- configMapRef:
name: cfg
- secretRef:
name: sec
# Ou variable unique
env:
- name: DB_PASS
valueFrom:
secretKeyRef:
name: sec
key: password
K8s - Scaling & Updates
# Scale manuel
kubectl scale deploy web --replicas=5
# Autoscaling
kubectl autoscale deploy web \
--min=2 --max=10 --cpu-percent=80
# Rolling update
kubectl set image deploy/web \
web=nginx:1.26
# Rollback
kubectl rollout undo deploy/web
kubectl rollout status deploy/web
kubectl rollout history deploy/web
Helm
# Repos
helm repo add bitnami \
https://charts.bitnami.com/bitnami
helm repo update
helm search repo nginx
# Install
helm install myrelease bitnami/nginx
helm install myrelease ./mychart \
-f values.yaml
helm upgrade myrelease ./mychart
helm uninstall myrelease
# Debug
helm list
helm status myrelease
helm template ./mychart
AWS CLI - EC2
# Configuration
aws configure
aws sts get-caller-identity
# EC2
aws ec2 describe-instances
aws ec2 run-instances \
--image-id ami-xxx \
--instance-type t3.micro \
--key-name mykey
aws ec2 start-instances --instance-ids i-xxx
aws ec2 stop-instances --instance-ids i-xxx
aws ec2 terminate-instances --instance-ids i-xxx
# Security Groups
aws ec2 describe-security-groups
aws ec2 authorize-security-group-ingress \
--group-id sg-xxx \
--protocol tcp --port 22 \
--cidr 0.0.0.0/0
AWS CLI - S3
# Buckets
aws s3 ls
aws s3 mb s3://mybucket
aws s3 rb s3://mybucket --force
# Objects
aws s3 ls s3://bucket/prefix/
aws s3 cp file.txt s3://bucket/
aws s3 cp s3://bucket/file.txt ./
aws s3 sync ./folder s3://bucket/folder
aws s3 rm s3://bucket/file.txt
aws s3 rm s3://bucket/ --recursive
# Presigned URL
aws s3 presign s3://bucket/file \
--expires-in 3600
AWS - Services Cles
| Service | Usage |
| EC2 | VMs, compute |
| S3 | Object storage |
| RDS | Managed DB |
| VPC | Reseau virtuel |
| IAM | Identites/acces |
| Lambda | Serverless |
| ECS/EKS | Containers |
| CloudWatch | Monitoring |
| Route 53 | DNS |
| ALB/NLB | Load Balancer |
Azure CLI
# Login
az login
az account list
az account set -s "subscription"
# Resource Groups
az group list
az group create -n mygroup -l westeurope
# VMs
az vm list
az vm create -g mygroup -n myvm \
--image UbuntuLTS --size Standard_B1s
az vm start/stop/delete -g mygroup -n myvm
# Storage
az storage account list
az storage blob upload -c container \
-f file.txt -n file.txt
Terraform - Basics
# Commandes
terraform init # Initialize
terraform plan # Preview
terraform apply # Create
terraform destroy # Delete
terraform fmt # Format
terraform validate # Check syntax
# State
terraform state list
terraform state show resource
terraform import aws_instance.web i-xxx
# Workspaces
terraform workspace list
terraform workspace new dev
terraform workspace select dev
Terraform - HCL Syntax
# Provider
provider "aws" {
region = "eu-west-1"
}
# Resource
resource "aws_instance" "web" {
ami = var.ami_id
instance_type = "t3.micro"
tags = {
Name = "web-${var.env}"
}
}
# Variable
variable "env" {
type = string
default = "dev"
}
# Output
output "ip" {
value = aws_instance.web.public_ip
}
# Data source
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"]
}
Terraform - Modules & Backend
# Module usage
module "vpc" {
source = "./modules/vpc"
cidr = "10.0.0.0/16"
env = var.env
}
output "vpc_id" {
value = module.vpc.vpc_id
}
# Remote backend (S3)
terraform {
backend "s3" {
bucket = "tf-state-bucket"
key = "prod/terraform.tfstate"
region = "eu-west-1"
dynamodb_table = "tf-locks"
}
}
Ansible - Basics
# Inventory (hosts.ini)
[webservers]
web1 ansible_host=10.0.1.10
web2 ansible_host=10.0.1.11
[dbservers]
db1 ansible_host=10.0.2.10
[all:vars]
ansible_user=ubuntu
ansible_ssh_private_key_file=~/.ssh/key.pem
# Ad-hoc commands
ansible all -m ping
ansible web -m shell -a "uptime"
ansible db -m apt -a "name=mysql state=present" -b
Ansible - Playbook
# playbook.yml
---
- name: Configure web servers
hosts: webservers
become: yes
vars:
http_port: 80
tasks:
- name: Install nginx
apt:
name: nginx
state: present
update_cache: yes
- name: Start nginx
service:
name: nginx
state: started
enabled: yes
- name: Copy config
template:
src: nginx.conf.j2
dest: /etc/nginx/nginx.conf
notify: Restart nginx
handlers:
- name: Restart nginx
service:
name: nginx
state: restarted
ansible-playbook -i hosts.ini playbook.yml
Certifications Cibles
| Cert | Focus |
| CKA | Kubernetes Admin |
| CKAD | K8s Developer |
| AWS SAA | Solutions Architect |
| AWS SAP | SA Professional |
| AZ-104 | Azure Admin |
| AZ-305 | Azure Architect |
| Terraform | HashiCorp Associate |
Priorite: CKA + AWS SAA ou Azure AZ-104
Formation Architecte Systeme - Phase 3 Cloud & Containers | Version imprimable