🐳 What is Docker?#
Docker is a containerization platform that allows you to package applications and their dependencies into lightweight, portable containers. It solves the “it works on my machine” problem by ensuring consistency across different environments.
✨ Key Benefits#
- Consistency: Same environment across development, testing, and production
- Portability: Run anywhere Docker is supported
- Efficiency: Lightweight containers share the host OS kernel
- Scalability: Easy to scale applications horizontally
- Isolation: Applications run in isolated environments
🚀 Getting Started#
1. Installation#
# Ubuntu/Debian
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
# macOS (using Homebrew)
brew install --cask docker
# Windows
# Download Docker Desktop from docker.com
2. Basic Commands#
# Check Docker version
docker --version
# Run a simple container
docker run hello-world
# List running containers
docker ps
# List all containers (including stopped)
docker ps -a
# Stop a container
docker stop <container_id>
# Remove a container
docker rm <container_id>
📦 Working with Images#
1. Image Management#
# Pull an image from Docker Hub
docker pull nginx:latest
# List local images
docker images
# Remove an image
docker rmi <image_id>
# Search for images
docker search node
# Inspect an image
docker inspect nginx:latest
2. Building Custom Images#
# Dockerfile
FROM node:18-alpine
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy application code
COPY . .
# Expose port
EXPOSE 3000
# Set environment variables
ENV NODE_ENV=production
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nextjs -u 1001
# Change ownership
RUN chown -R nextjs:nodejs /app
USER nextjs
# Start the application
CMD ["npm", "start"]
3. Multi-stage Builds#
# Multi-stage build for smaller production image
FROM node:18-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
# Production stage
FROM node:18-alpine AS production
WORKDIR /app
# Copy only production dependencies
COPY package*.json ./
RUN npm ci --only=production && npm cache clean --force
# Copy built application from builder stage
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/public ./public
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nextjs -u 1001
RUN chown -R nextjs:nodejs /app
USER nextjs
EXPOSE 3000
CMD ["node", "dist/index.js"]
🏗️ Docker Compose#
1. Basic Compose File#
# docker-compose.yml
version: '3.8'
services:
web:
build: .
ports:
- "3000:3000"
environment:
- NODE_ENV=production
depends_on:
- database
- redis
database:
image: postgres:15-alpine
environment:
POSTGRES_DB: myapp
POSTGRES_USER: user
POSTGRES_PASSWORD: password
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis:7-alpine
ports:
- "6379:6379"
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
depends_on:
- web
volumes:
postgres_data:
2. Advanced Compose Configuration#
# docker-compose.prod.yml
version: '3.8'
services:
web:
build:
context: .
dockerfile: Dockerfile.prod
restart: unless-stopped
environment:
- NODE_ENV=production
- DATABASE_URL=${DATABASE_URL}
- REDIS_URL=${REDIS_URL}
depends_on:
- database
- redis
networks:
- app-network
deploy:
replicas: 3
resources:
limits:
memory: 512M
reservations:
memory: 256M
database:
image: postgres:15-alpine
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./backups:/backups
networks:
- app-network
deploy:
resources:
limits:
memory: 1G
redis:
image: redis:7-alpine
restart: unless-stopped
command: redis-server --appendonly yes
volumes:
- redis_data:/data
networks:
- app-network
nginx:
image: nginx:alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/etc/nginx/ssl
depends_on:
- web
networks:
- app-network
volumes:
postgres_data:
redis_data:
networks:
app-network:
driver: bridge
🔧 Best Practices#
1. Security Best Practices#
# Use specific base image tags
FROM node:18.16.0-alpine3.18
# Run as non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nextjs -u 1001
# Don't run as root
USER nextjs
# Use multi-stage builds to reduce attack surface
FROM node:18-alpine AS builder
# ... build steps
FROM node:18-alpine AS production
# Copy only necessary files
COPY --from=builder /app/dist ./dist
2. Performance Optimization#
# Use .dockerignore to exclude unnecessary files
# .dockerignore
node_modules
npm-debug.log
.git
.gitignore
README.md
.env
.nyc_output
coverage
.nyc_output
.coverage
# Leverage Docker layer caching
FROM node:18-alpine
# Copy package files first (changes less frequently)
COPY package*.json ./
RUN npm ci --only=production
# Copy source code last (changes more frequently)
COPY . .
# Use specific COPY commands instead of copying everything
COPY src/ ./src/
COPY public/ ./public/
3. Health Checks#
# Add health check to Dockerfile
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
# Health check in docker-compose.yml
services:
web:
build: .
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
🚀 Production Deployment#
1. Container Orchestration with Docker Swarm#
# Initialize Docker Swarm
docker swarm init
# Create a stack
docker stack deploy -c docker-compose.prod.yml myapp
# Scale services
docker service scale myapp_web=5
# Update service
docker service update --image myapp:v2 myapp_web
2. Kubernetes Deployment#
# k8s-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 3
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: myapp:latest
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: myapp-service
spec:
selector:
app: myapp
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: LoadBalancer
🔍 Monitoring and Logging#
1. Container Logging#
# docker-compose.yml with logging
services:
web:
build: .
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
2. Health Monitoring#
// health-check.js
const express = require('express');
const app = express();
app.get('/health', (req, res) => {
res.status(200).json({
status: 'healthy',
timestamp: new Date().toISOString(),
uptime: process.uptime()
});
});
app.get('/ready', (req, res) => {
// Check if application is ready to serve requests
const isReady = checkDatabaseConnection() && checkRedisConnection();
if (isReady) {
res.status(200).json({ status: 'ready' });
} else {
res.status(503).json({ status: 'not ready' });
}
});
function checkDatabaseConnection() {
// Implement database health check
return true;
}
function checkRedisConnection() {
// Implement Redis health check
return true;
}
app.listen(3000);
🛠️ Development Workflow#
1. Development Environment#
# docker-compose.dev.yml
version: '3.8'
services:
web:
build:
context: .
dockerfile: Dockerfile.dev
ports:
- "3000:3000"
volumes:
- .:/app
- /app/node_modules
environment:
- NODE_ENV=development
command: npm run dev
database:
image: postgres:15-alpine
environment:
POSTGRES_DB: myapp_dev
POSTGRES_USER: dev
POSTGRES_PASSWORD: dev
ports:
- "5432:5432"
volumes:
- postgres_dev_data:/var/lib/postgresql/data
volumes:
postgres_dev_data:
2. CI/CD Pipeline#
# .github/workflows/docker.yml
name: Docker Build and Push
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: |
myapp:latest
myapp:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
📚 Common Patterns#
1. Database Migrations#
# Dockerfile with migration support
FROM node:18-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
# Create migration script
RUN echo '#!/bin/sh\nnpm run migrate && npm start' > /app/start.sh
RUN chmod +x /app/start.sh
CMD ["/app/start.sh"]
2. Configuration Management#
# docker-compose.yml with config management
services:
web:
build: .
environment:
- NODE_ENV=${NODE_ENV:-development}
env_file:
- .env
configs:
- app_config
secrets:
- db_password
configs:
app_config:
file: ./config/app.json
secrets:
db_password:
file: ./secrets/db_password.txt
🎯 Troubleshooting#
1. Common Issues#
# Container won't start
docker logs <container_id>
# Debug container
docker run -it --rm <image_name> /bin/sh
# Check container resources
docker stats
# Inspect container configuration
docker inspect <container_id>
# Check container processes
docker exec -it <container_id> ps aux
2. Performance Issues#
# Monitor container performance
docker stats --no-stream
# Check container resource usage
docker exec <container_id> top
# Analyze container layers
docker history <image_name>
# Check image size
docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}"
🎉 Conclusion#
Docker containerization is a powerful tool for modern application development and deployment. By following these best practices and patterns, you can create reliable, scalable, and maintainable containerized applications.
Key takeaways:
- Use multi-stage builds for smaller images
- Implement proper health checks
- Follow security best practices
- Monitor and log your containers
- Use Docker Compose for local development
- Consider orchestration for production
Happy containerizing! 🐳