automated terminal push
This commit is contained in:
328
.gitignore
vendored
Normal file
328
.gitignore
vendored
Normal file
@@ -0,0 +1,328 @@
|
|||||||
|
# Enterprise CI/CD Repository .gitignore
|
||||||
|
# Comprehensive exclusion list for Terraform, Jenkins, and DevOps tools
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Terraform
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# State files
|
||||||
|
*.tfstate
|
||||||
|
*.tfstate.*
|
||||||
|
*.tfstate.backup
|
||||||
|
|
||||||
|
# Plan files
|
||||||
|
*.tfplan
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Terraform directories
|
||||||
|
.terraform/
|
||||||
|
.terraform.lock.hcl
|
||||||
|
|
||||||
|
# Variable files (may contain sensitive data)
|
||||||
|
terraform.tfvars
|
||||||
|
*.auto.tfvars
|
||||||
|
*.auto.tfvars.json
|
||||||
|
|
||||||
|
# Override files (environment-specific)
|
||||||
|
override.tf
|
||||||
|
override.tf.json
|
||||||
|
*_override.tf
|
||||||
|
*_override.tf.json
|
||||||
|
|
||||||
|
# Terraform CLI configuration
|
||||||
|
.terraformrc
|
||||||
|
terraform.rc
|
||||||
|
|
||||||
|
# Backend configuration files (generated by bootstrap)
|
||||||
|
.backend-config
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# AWS & Cloud
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# AWS credentials and config
|
||||||
|
.aws/
|
||||||
|
*.pem
|
||||||
|
*.p12
|
||||||
|
*.key
|
||||||
|
*.crt
|
||||||
|
|
||||||
|
# Cloud provider configs
|
||||||
|
.azure/
|
||||||
|
.gcp/
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Jenkins & CI/CD
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Jenkins workspace
|
||||||
|
.jenkins/
|
||||||
|
jenkins/workspace/
|
||||||
|
jenkins/jobs/
|
||||||
|
|
||||||
|
# Pipeline artifacts
|
||||||
|
*.log
|
||||||
|
*.tmp
|
||||||
|
build/
|
||||||
|
dist/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# SonarQube
|
||||||
|
.sonar/
|
||||||
|
.sonarqube/
|
||||||
|
sonar-project.properties
|
||||||
|
|
||||||
|
# Test results and coverage
|
||||||
|
coverage/
|
||||||
|
test-results/
|
||||||
|
*.coverage
|
||||||
|
junit.xml
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Development & IDE
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# VSCode
|
||||||
|
.vscode/
|
||||||
|
*.code-workspace
|
||||||
|
|
||||||
|
# IntelliJ IDEA
|
||||||
|
.idea/
|
||||||
|
*.iws
|
||||||
|
*.iml
|
||||||
|
*.ipr
|
||||||
|
|
||||||
|
# Sublime Text
|
||||||
|
*.sublime-project
|
||||||
|
*.sublime-workspace
|
||||||
|
|
||||||
|
# Vim
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Emacs
|
||||||
|
*~
|
||||||
|
\#*\#
|
||||||
|
/.emacs.desktop
|
||||||
|
/.emacs.desktop.lock
|
||||||
|
*.elc
|
||||||
|
auto-save-list
|
||||||
|
tramp
|
||||||
|
.\#*
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Operating System
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
.DS_Store
|
||||||
|
.AppleDouble
|
||||||
|
.LSOverride
|
||||||
|
Icon?
|
||||||
|
._*
|
||||||
|
.DocumentRevisions-V100
|
||||||
|
.fseventsd
|
||||||
|
.Spotlight-V100
|
||||||
|
.TemporaryItems
|
||||||
|
.Trashes
|
||||||
|
.VolumeIcon.icns
|
||||||
|
.com.apple.timemachine.donotpresent
|
||||||
|
|
||||||
|
# Windows
|
||||||
|
Thumbs.db
|
||||||
|
ehthumbs.db
|
||||||
|
Desktop.ini
|
||||||
|
$RECYCLE.BIN/
|
||||||
|
*.cab
|
||||||
|
*.msi
|
||||||
|
*.msm
|
||||||
|
*.msp
|
||||||
|
|
||||||
|
# Linux
|
||||||
|
*~
|
||||||
|
.directory
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Languages & Frameworks
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Node.js
|
||||||
|
node_modules/
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
.npm
|
||||||
|
.yarn-integrity
|
||||||
|
|
||||||
|
# Python
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
*.so
|
||||||
|
.Python
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
.pytest_cache/
|
||||||
|
.coverage
|
||||||
|
htmlcov/
|
||||||
|
|
||||||
|
# Go
|
||||||
|
vendor/
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
*.test
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Java
|
||||||
|
*.class
|
||||||
|
*.jar
|
||||||
|
*.war
|
||||||
|
*.ear
|
||||||
|
*.nar
|
||||||
|
hs_err_pid*
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Docker & Containers
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
.dockerignore
|
||||||
|
docker-compose.override.yml
|
||||||
|
.docker/
|
||||||
|
|
||||||
|
# Kubernetes
|
||||||
|
*.kubeconfig
|
||||||
|
kustomization.yaml
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Documentation & Logs
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
logs/
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
|
||||||
|
# Runtime data
|
||||||
|
pids/
|
||||||
|
*.pid
|
||||||
|
*.seed
|
||||||
|
*.pid.lock
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
*.tmp
|
||||||
|
*.temp
|
||||||
|
.tmp/
|
||||||
|
.temp/
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Security & Secrets
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Secrets and sensitive data
|
||||||
|
secrets/
|
||||||
|
.secrets/
|
||||||
|
*.secret
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
.env.development.local
|
||||||
|
.env.test.local
|
||||||
|
.env.production.local
|
||||||
|
|
||||||
|
# SSL certificates
|
||||||
|
*.crt
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.p12
|
||||||
|
*.pfx
|
||||||
|
|
||||||
|
# GPG keys
|
||||||
|
*.gpg
|
||||||
|
*.asc
|
||||||
|
|
||||||
|
# SSH keys
|
||||||
|
id_rsa*
|
||||||
|
id_ed25519*
|
||||||
|
known_hosts
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Package Managers
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# npm
|
||||||
|
package-lock.json
|
||||||
|
yarn.lock
|
||||||
|
|
||||||
|
# Composer (PHP)
|
||||||
|
composer.lock
|
||||||
|
vendor/
|
||||||
|
|
||||||
|
# Bundler (Ruby)
|
||||||
|
Gemfile.lock
|
||||||
|
vendor/bundle/
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Database
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Database files
|
||||||
|
*.db
|
||||||
|
*.sqlite
|
||||||
|
*.sqlite3
|
||||||
|
|
||||||
|
# Database dumps
|
||||||
|
*.sql
|
||||||
|
*.dump
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Monitoring & Observability
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Prometheus
|
||||||
|
prometheus.yml
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
grafana.ini
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Custom Project Files
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# Project-specific temporary files
|
||||||
|
debug-info.txt
|
||||||
|
terraform-outputs.*
|
||||||
|
*.backup
|
||||||
|
|
||||||
|
# Local configuration
|
||||||
|
local.config
|
||||||
|
.local/
|
||||||
|
|
||||||
|
# Archive files
|
||||||
|
*.tar.gz
|
||||||
|
*.zip
|
||||||
|
*.rar
|
||||||
|
*.7z
|
||||||
|
|
||||||
|
# ===========================================
|
||||||
|
# Comments for Team
|
||||||
|
# ===========================================
|
||||||
|
|
||||||
|
# IMPORTANT: Never commit sensitive data!
|
||||||
|
# - AWS credentials, API keys, passwords
|
||||||
|
# - terraform.tfvars files with real values
|
||||||
|
# - SSL certificates and private keys
|
||||||
|
# - Database connection strings
|
||||||
|
# - Any file containing production secrets
|
||||||
|
|
||||||
|
# Use terraform.tfvars.example for templates
|
||||||
|
# Use environment variables for sensitive data in CI/CD
|
||||||
|
# Store secrets in Jenkins credential store or AWS Secrets Manager
|
11
Dockerfile
11
Dockerfile
@@ -1,12 +1,21 @@
|
|||||||
FROM python:3.10-slim
|
FROM python:3.10-slim
|
||||||
|
|
||||||
|
# Create a non-root user and group
|
||||||
|
RUN adduser --disabled-password --gecos '' myuser
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy requirements and install dependencies as root (needed for system-wide install)
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
# Copy app code
|
||||||
COPY app.py .
|
COPY app.py .
|
||||||
|
|
||||||
EXPOSE 8080
|
# Change to non-root user
|
||||||
|
USER myuser
|
||||||
|
|
||||||
|
# Expose port and start app
|
||||||
|
EXPOSE 8080
|
||||||
CMD ["gunicorn", "--bind", "0.0.0.0:8080", "app:app"]
|
CMD ["gunicorn", "--bind", "0.0.0.0:8080", "app:app"]
|
747
Infrastructure/foundation/Jenkinsfile
vendored
Normal file
747
Infrastructure/foundation/Jenkinsfile
vendored
Normal file
@@ -0,0 +1,747 @@
|
|||||||
|
pipeline {
|
||||||
|
agent any
|
||||||
|
|
||||||
|
parameters {
|
||||||
|
choice(
|
||||||
|
name: 'ACTION',
|
||||||
|
choices: ['plan', 'apply', 'destroy', 'cleanup'],
|
||||||
|
description: 'Action to perform: plan (review), apply (deploy), destroy (remove infra), cleanup (remove bootstrap)'
|
||||||
|
)
|
||||||
|
booleanParam(
|
||||||
|
name: 'AUTO_APPROVE',
|
||||||
|
defaultValue: false,
|
||||||
|
description: 'Auto-approve terraform apply (use with caution)'
|
||||||
|
)
|
||||||
|
booleanParam(
|
||||||
|
name: 'SKIP_SONAR',
|
||||||
|
defaultValue: false,
|
||||||
|
description: 'Skip SonarQube analysis (not recommended)'
|
||||||
|
)
|
||||||
|
booleanParam(
|
||||||
|
name: 'SKIP_BOOTSTRAP',
|
||||||
|
defaultValue: false,
|
||||||
|
description: 'Skip bootstrap phase (S3/DynamoDB already exist)'
|
||||||
|
)
|
||||||
|
string(
|
||||||
|
name: 'PROJECT_NAME',
|
||||||
|
defaultValue: 'nvhi-atsila-microservice',
|
||||||
|
description: 'Project name for resource naming'
|
||||||
|
)
|
||||||
|
string(
|
||||||
|
name: 'AWS_CREDENTIALS_ID',
|
||||||
|
defaultValue: 'aws-ci',
|
||||||
|
description: 'AWS credentials stored in Jenkins'
|
||||||
|
)
|
||||||
|
string(
|
||||||
|
name: 'AWS_REGION_ID',
|
||||||
|
defaultValue: 'AWS_REGION',
|
||||||
|
description: 'AWS region credential stored in Jenkins'
|
||||||
|
)
|
||||||
|
choice(
|
||||||
|
name: 'ENVIRONMENT',
|
||||||
|
choices: ['dev', 'staging', 'prod'],
|
||||||
|
description: 'Environment to deploy'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
environment {
|
||||||
|
// Terraform configuration
|
||||||
|
TF_VERSION = '1.5.7'
|
||||||
|
TF_IN_AUTOMATION = 'true'
|
||||||
|
TF_INPUT = 'false'
|
||||||
|
TF_CLI_ARGS = '-no-color'
|
||||||
|
|
||||||
|
// Working directory
|
||||||
|
TF_WORKING_DIR = 'infrastructure/foundation'
|
||||||
|
|
||||||
|
// Project configuration (AWS_REGION will be injected from Jenkins credentials)
|
||||||
|
PROJECT_NAME = "${params.PROJECT_NAME}"
|
||||||
|
ENVIRONMENT = "${params.ENVIRONMENT}"
|
||||||
|
|
||||||
|
// SonarQube configuration
|
||||||
|
SONAR_PROJECT_KEY = "${params.PROJECT_NAME}-foundation"
|
||||||
|
SONAR_PROJECT_NAME = "${params.PROJECT_NAME} Foundation Layer"
|
||||||
|
SONAR_PROJECT_VERSION = "${BUILD_NUMBER}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('🔍 Checkout & Validation') {
|
||||||
|
steps {
|
||||||
|
echo "=== Enterprise CI/CD Foundation Layer Pipeline ==="
|
||||||
|
echo "Action: ${params.ACTION}"
|
||||||
|
echo "Environment: ${params.ENVIRONMENT}"
|
||||||
|
echo "Project: ${params.PROJECT_NAME}"
|
||||||
|
echo "AWS Credentials: ${params.AWS_CREDENTIALS_ID}"
|
||||||
|
echo "AWS Region Credential: ${params.AWS_REGION_ID}"
|
||||||
|
echo "Authentication: Jenkins Credential Store (Enterprise Standard)"
|
||||||
|
echo "Build: #${BUILD_NUMBER}"
|
||||||
|
echo "Working Directory: ${env.TF_WORKING_DIR}"
|
||||||
|
|
||||||
|
// Clean workspace and checkout latest code
|
||||||
|
deleteDir()
|
||||||
|
checkout scm
|
||||||
|
|
||||||
|
// Verify repository structure
|
||||||
|
script {
|
||||||
|
sh '''
|
||||||
|
echo "Repository structure validation:"
|
||||||
|
|
||||||
|
# Check for required directories
|
||||||
|
if [ ! -d "${TF_WORKING_DIR}" ]; then
|
||||||
|
echo "❌ Missing foundation directory: ${TF_WORKING_DIR}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for required files
|
||||||
|
cd "${TF_WORKING_DIR}"
|
||||||
|
for file in main.tf variables.tf outputs.tf versions.tf bootstrap.sh cleanup.sh; do
|
||||||
|
if [ ! -f "$file" ]; then
|
||||||
|
echo "❌ Missing required file: $file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Found: $file"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Make scripts executable
|
||||||
|
chmod +x bootstrap.sh cleanup.sh
|
||||||
|
|
||||||
|
echo "✅ Repository structure validated"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🔧 Setup Tools') {
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
// Install Terraform if not available
|
||||||
|
sh '''
|
||||||
|
if ! command -v terraform &> /dev/null; then
|
||||||
|
echo "Installing Terraform ${TF_VERSION}..."
|
||||||
|
wget -q https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip
|
||||||
|
unzip -o terraform_${TF_VERSION}_linux_amd64.zip
|
||||||
|
chmod +x terraform
|
||||||
|
sudo mv terraform /usr/local/bin/ || mv terraform /tmp/
|
||||||
|
export PATH="/tmp:$PATH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Terraform version:"
|
||||||
|
terraform version
|
||||||
|
'''
|
||||||
|
|
||||||
|
// Verify AWS credentials and permissions via Jenkins credential store
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
sh '''
|
||||||
|
echo "AWS CLI version:"
|
||||||
|
aws --version
|
||||||
|
|
||||||
|
echo "Verifying Jenkins stored AWS credentials..."
|
||||||
|
echo "AWS Region: ${AWS_REGION}"
|
||||||
|
aws sts get-caller-identity
|
||||||
|
|
||||||
|
echo "Testing AWS permissions..."
|
||||||
|
aws ec2 describe-vpcs --max-items 1 --region ${AWS_REGION} > /dev/null && echo "✅ EC2 permissions OK" || echo "⚠️ EC2 permissions limited"
|
||||||
|
aws s3 ls > /dev/null 2>&1 && echo "✅ S3 permissions OK" || echo "⚠️ S3 permissions limited"
|
||||||
|
aws dynamodb list-tables --region ${AWS_REGION} > /dev/null 2>&1 && echo "✅ DynamoDB permissions OK" || echo "⚠️ DynamoDB permissions limited"
|
||||||
|
|
||||||
|
echo "✅ Jenkins credential store authentication verified"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🔍 SonarQube Analysis') {
|
||||||
|
when {
|
||||||
|
allOf {
|
||||||
|
not { params.SKIP_SONAR }
|
||||||
|
expression { params.ACTION != 'cleanup' }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
script {
|
||||||
|
// Create comprehensive SonarQube configuration
|
||||||
|
writeFile file: 'sonar-project.properties', text: """
|
||||||
|
sonar.projectKey=${env.SONAR_PROJECT_KEY}
|
||||||
|
sonar.projectName=${env.SONAR_PROJECT_NAME}
|
||||||
|
sonar.projectVersion=${env.SONAR_PROJECT_VERSION}
|
||||||
|
sonar.sources=.
|
||||||
|
sonar.sourceEncoding=UTF-8
|
||||||
|
|
||||||
|
# Terraform-specific configuration
|
||||||
|
sonar.terraform.file.suffixes=.tf
|
||||||
|
sonar.exclusions=**/*.tfstate,**/*.tfstate.backup,**/.terraform/**,**/*.tfplan
|
||||||
|
|
||||||
|
# Include scripts in analysis
|
||||||
|
sonar.inclusions=**/*.tf,**/*.sh
|
||||||
|
|
||||||
|
# Quality gate settings
|
||||||
|
sonar.qualitygate.wait=true
|
||||||
|
|
||||||
|
# Coverage and duplications
|
||||||
|
sonar.cpd.exclusions=**/*.tf
|
||||||
|
|
||||||
|
# Custom properties for enterprise analysis
|
||||||
|
sonar.tags=terraform,infrastructure,enterprise-cicd
|
||||||
|
"""
|
||||||
|
|
||||||
|
// Run SonarQube analysis
|
||||||
|
withSonarQubeEnv('SonarQube') {
|
||||||
|
sh '''
|
||||||
|
echo "🔍 Running SonarQube analysis on Terraform infrastructure..."
|
||||||
|
sonar-scanner
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🎯 Quality Gate') {
|
||||||
|
when {
|
||||||
|
allOf {
|
||||||
|
not { params.SKIP_SONAR }
|
||||||
|
expression { params.ACTION != 'cleanup' }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
timeout(time: 5, unit: 'MINUTES') {
|
||||||
|
def qg = waitForQualityGate()
|
||||||
|
if (qg.status != 'OK') {
|
||||||
|
echo "❌ SonarQube Quality Gate failed: ${qg.status}"
|
||||||
|
echo "Quality gate details: ${qg}"
|
||||||
|
|
||||||
|
if (params.ACTION == 'apply' && !params.AUTO_APPROVE) {
|
||||||
|
def proceed = input(
|
||||||
|
message: 'SonarQube Quality Gate failed. How do you want to proceed?',
|
||||||
|
parameters: [
|
||||||
|
choice(
|
||||||
|
name: 'DECISION',
|
||||||
|
choices: ['Abort', 'Proceed anyway'],
|
||||||
|
description: 'Quality gate failed - your decision'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if (proceed == 'Abort') {
|
||||||
|
error "Deployment aborted due to quality gate failure"
|
||||||
|
}
|
||||||
|
} else if (params.ACTION == 'apply' && params.AUTO_APPROVE) {
|
||||||
|
echo "⚠️ Quality gate failed but AUTO_APPROVE is enabled, proceeding..."
|
||||||
|
} else {
|
||||||
|
error "Quality gate failed and action is ${params.ACTION}"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
echo "✅ SonarQube Quality Gate passed"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🚀 Bootstrap Backend') {
|
||||||
|
when {
|
||||||
|
allOf {
|
||||||
|
expression { params.ACTION == 'apply' }
|
||||||
|
not { params.SKIP_BOOTSTRAP }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
echo "=== Bootstrapping Terraform Backend ==="
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
# Set environment variables for bootstrap script
|
||||||
|
export PROJECT_NAME="${PROJECT_NAME}"
|
||||||
|
export ENVIRONMENT="${ENVIRONMENT}"
|
||||||
|
export AWS_REGION="${AWS_REGION}"
|
||||||
|
|
||||||
|
# Run bootstrap script (uses Jenkins credentials)
|
||||||
|
./bootstrap.sh
|
||||||
|
|
||||||
|
# Verify backend configuration was created
|
||||||
|
if [ ! -f backend.tf ]; then
|
||||||
|
echo "❌ Bootstrap failed - backend.tf not created"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Backend bootstrap completed"
|
||||||
|
echo "Generated backend.tf:"
|
||||||
|
cat backend.tf
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🔄 Terraform Init & Validate') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION != 'cleanup' }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
sh '''
|
||||||
|
echo "=== Terraform Initialization ==="
|
||||||
|
|
||||||
|
# Create terraform.tfvars if not exists
|
||||||
|
if [ ! -f terraform.tfvars ]; then
|
||||||
|
echo "Creating terraform.tfvars..."
|
||||||
|
cat > terraform.tfvars << EOF
|
||||||
|
# Generated by Jenkins Pipeline Build #${BUILD_NUMBER}
|
||||||
|
project_name = "${PROJECT_NAME}"
|
||||||
|
environment = "${ENVIRONMENT}"
|
||||||
|
aws_region = "${AWS_REGION}"
|
||||||
|
|
||||||
|
# Free tier optimized settings
|
||||||
|
enable_private_subnets = false
|
||||||
|
enable_vpc_endpoints = false
|
||||||
|
enable_nat_gateway = false
|
||||||
|
single_nat_gateway = true
|
||||||
|
cost_optimization_mode = true
|
||||||
|
|
||||||
|
# Jenkins-managed tags
|
||||||
|
common_tags = {
|
||||||
|
Terraform = "true"
|
||||||
|
Project = "${PROJECT_NAME}"
|
||||||
|
Environment = "${ENVIRONMENT}"
|
||||||
|
ManagedBy = "jenkins"
|
||||||
|
Pipeline = "foundation-layer"
|
||||||
|
BuildNumber = "${BUILD_NUMBER}"
|
||||||
|
GitCommit = "${GIT_COMMIT}"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Current terraform.tfvars:"
|
||||||
|
cat terraform.tfvars
|
||||||
|
|
||||||
|
# Initialize Terraform (uses Jenkins credentials)
|
||||||
|
terraform init -upgrade
|
||||||
|
|
||||||
|
# Validate configuration
|
||||||
|
terraform validate
|
||||||
|
|
||||||
|
# Format check
|
||||||
|
terraform fmt -check=true || {
|
||||||
|
echo "⚠️ Terraform files need formatting"
|
||||||
|
terraform fmt -diff=true
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "✅ Terraform initialized and validated"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('📊 Terraform Plan') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION in ['plan', 'apply'] }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
sh '''
|
||||||
|
echo "=== Terraform Plan ==="
|
||||||
|
|
||||||
|
terraform plan \
|
||||||
|
-var="project_name=${PROJECT_NAME}" \
|
||||||
|
-var="environment=${ENVIRONMENT}" \
|
||||||
|
-var="aws_region=${AWS_REGION}" \
|
||||||
|
-out=tfplan \
|
||||||
|
-detailed-exitcode || PLAN_EXIT_CODE=$?
|
||||||
|
|
||||||
|
# Handle plan exit codes
|
||||||
|
case ${PLAN_EXIT_CODE:-0} in
|
||||||
|
0)
|
||||||
|
echo "✅ No changes needed - infrastructure is up to date"
|
||||||
|
;;
|
||||||
|
1)
|
||||||
|
echo "❌ Terraform plan failed"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
echo "📝 Changes detected - plan saved to tfplan"
|
||||||
|
|
||||||
|
# Show plan summary
|
||||||
|
echo "=== Plan Summary ==="
|
||||||
|
terraform show -no-color tfplan | grep -E "(Plan:|No changes|Error:)" || echo "Plan generated successfully"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
'''
|
||||||
|
|
||||||
|
// Archive the plan for audit
|
||||||
|
archiveArtifacts artifacts: 'tfplan', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🚦 Deployment Approval') {
|
||||||
|
when {
|
||||||
|
allOf {
|
||||||
|
expression { params.ACTION == 'apply' }
|
||||||
|
not { params.AUTO_APPROVE }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
def planSummary = ""
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
planSummary = sh(
|
||||||
|
script: 'terraform show -no-color tfplan | grep "Plan:" || echo "No plan summary available"',
|
||||||
|
returnStdout: true
|
||||||
|
).trim()
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "=== Manual Approval Required ==="
|
||||||
|
echo "Environment: ${params.ENVIRONMENT}"
|
||||||
|
echo "Region: ${params.AWS_REGION}"
|
||||||
|
echo "Plan Summary: ${planSummary}"
|
||||||
|
|
||||||
|
def approvalData = input(
|
||||||
|
id: 'ProceedApply',
|
||||||
|
message: """
|
||||||
|
🔍 Review the Terraform plan output above carefully.
|
||||||
|
|
||||||
|
Environment: ${params.ENVIRONMENT}
|
||||||
|
Region: ${params.AWS_REGION}
|
||||||
|
Plan: ${planSummary}
|
||||||
|
|
||||||
|
Proceed with deployment?
|
||||||
|
""",
|
||||||
|
parameters: [
|
||||||
|
choice(
|
||||||
|
name: 'PROCEED',
|
||||||
|
choices: ['No', 'Yes, deploy infrastructure'],
|
||||||
|
description: 'Deployment decision'
|
||||||
|
),
|
||||||
|
string(
|
||||||
|
name: 'APPROVER',
|
||||||
|
defaultValue: env.BUILD_USER ?: 'jenkins-user',
|
||||||
|
description: 'Your name for audit trail'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if (approvalData.PROCEED != 'Yes, deploy infrastructure') {
|
||||||
|
error "Deployment cancelled by ${approvalData.APPROVER}"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "✅ Deployment approved by: ${approvalData.APPROVER}"
|
||||||
|
env.DEPLOYMENT_APPROVER = approvalData.APPROVER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🚀 Terraform Apply') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION == 'apply' }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
echo "=== Terraform Apply ==="
|
||||||
|
if (env.DEPLOYMENT_APPROVER) {
|
||||||
|
echo "✅ Approved by: ${env.DEPLOYMENT_APPROVER}"
|
||||||
|
}
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
terraform apply -auto-approve tfplan
|
||||||
|
|
||||||
|
echo "=== Deployment Outputs ==="
|
||||||
|
terraform output
|
||||||
|
|
||||||
|
# Save outputs for other stages/jobs
|
||||||
|
terraform output -json > terraform-outputs.json
|
||||||
|
terraform output > terraform-outputs.txt
|
||||||
|
'''
|
||||||
|
|
||||||
|
// Archive outputs
|
||||||
|
archiveArtifacts artifacts: 'terraform-outputs.json,terraform-outputs.txt', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('💥 Terraform Destroy') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION == 'destroy' }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
def destroyApproval = input(
|
||||||
|
id: 'ProceedDestroy',
|
||||||
|
message: """
|
||||||
|
⚠️ DESTRUCTIVE ACTION WARNING ⚠️
|
||||||
|
|
||||||
|
This will permanently delete ALL infrastructure in:
|
||||||
|
• Environment: ${params.ENVIRONMENT}
|
||||||
|
• Project: ${params.PROJECT_NAME}
|
||||||
|
|
||||||
|
This action CANNOT be undone!
|
||||||
|
|
||||||
|
Type 'DESTROY' exactly to confirm:
|
||||||
|
""",
|
||||||
|
parameters: [
|
||||||
|
string(
|
||||||
|
name: 'CONFIRMATION',
|
||||||
|
defaultValue: '',
|
||||||
|
description: 'Type DESTROY to confirm deletion'
|
||||||
|
),
|
||||||
|
string(
|
||||||
|
name: 'DESTROYER',
|
||||||
|
defaultValue: env.BUILD_USER ?: 'jenkins-user',
|
||||||
|
description: 'Your name for audit trail'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if (destroyApproval.CONFIRMATION != 'DESTROY') {
|
||||||
|
error "Destroy cancelled - confirmation text did not match 'DESTROY'"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "💀 DESTROY operation confirmed by: ${destroyApproval.DESTROYER}"
|
||||||
|
echo "💀 Destroying infrastructure in 10 seconds..."
|
||||||
|
echo "💀 Last chance to cancel with Ctrl+C..."
|
||||||
|
sleep(10)
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
terraform destroy -auto-approve \
|
||||||
|
-var="project_name=${PROJECT_NAME}" \
|
||||||
|
-var="environment=${ENVIRONMENT}" \
|
||||||
|
-var="aws_region=${AWS_REGION}"
|
||||||
|
'''
|
||||||
|
|
||||||
|
echo "💀 Infrastructure destroyed by: ${destroyApproval.DESTROYER}"
|
||||||
|
echo "💀 Next step: Run with ACTION=cleanup to remove bootstrap resources"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('🧹 Cleanup Bootstrap') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION == 'cleanup' }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
echo "=== Cleanup Bootstrap Resources ==="
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
# Set environment variables for cleanup script
|
||||||
|
export PROJECT_NAME="${PROJECT_NAME}"
|
||||||
|
export ENVIRONMENT="${ENVIRONMENT}"
|
||||||
|
export AWS_REGION="${AWS_REGION}"
|
||||||
|
|
||||||
|
# Run cleanup script (uses Jenkins credentials)
|
||||||
|
./cleanup.sh
|
||||||
|
|
||||||
|
echo "✅ Bootstrap cleanup completed"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('📈 Post-Deployment Validation') {
|
||||||
|
when {
|
||||||
|
expression { params.ACTION == 'apply' }
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
withCredentials([
|
||||||
|
aws(credentialsId: "${params.AWS_CREDENTIALS_ID}"),
|
||||||
|
string(credentialsId: "${params.AWS_REGION_ID}", variable: 'AWS_REGION')
|
||||||
|
]) {
|
||||||
|
script {
|
||||||
|
sh '''
|
||||||
|
echo "=== Post-Deployment Validation ==="
|
||||||
|
|
||||||
|
# Validate VPC
|
||||||
|
VPC_ID=$(terraform output -raw vpc_id 2>/dev/null)
|
||||||
|
if [ -n "$VPC_ID" ] && [ "$VPC_ID" != "null" ]; then
|
||||||
|
echo "✅ VPC created successfully: $VPC_ID"
|
||||||
|
|
||||||
|
# Get VPC details
|
||||||
|
aws ec2 describe-vpcs --vpc-ids $VPC_ID --region ${AWS_REGION} \
|
||||||
|
--query 'Vpcs[0].{VpcId:VpcId,State:State,CidrBlock:CidrBlock}' \
|
||||||
|
--output table
|
||||||
|
|
||||||
|
# Count resources
|
||||||
|
SUBNET_COUNT=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$VPC_ID" \
|
||||||
|
--query 'length(Subnets)' --output text --region ${AWS_REGION})
|
||||||
|
echo "✅ Subnets created: $SUBNET_COUNT"
|
||||||
|
|
||||||
|
SG_COUNT=$(aws ec2 describe-security-groups --filters "Name=vpc-id,Values=$VPC_ID" \
|
||||||
|
--query 'length(SecurityGroups)' --output text --region ${AWS_REGION})
|
||||||
|
echo "✅ Security groups: $SG_COUNT"
|
||||||
|
else
|
||||||
|
echo "❌ VPC validation failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate backend resources
|
||||||
|
BUCKET_NAME=$(terraform output -raw terraform_state_bucket_name 2>/dev/null)
|
||||||
|
TABLE_NAME=$(terraform output -raw terraform_locks_table_name 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -n "$BUCKET_NAME" ] && [ "$BUCKET_NAME" != "null" ]; then
|
||||||
|
echo "✅ S3 backend bucket: $BUCKET_NAME"
|
||||||
|
aws s3 ls s3://$BUCKET_NAME --region ${AWS_REGION}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$TABLE_NAME" ] && [ "$TABLE_NAME" != "null" ]; then
|
||||||
|
echo "✅ DynamoDB locks table: $TABLE_NAME"
|
||||||
|
aws dynamodb describe-table --table-name $TABLE_NAME --region ${AWS_REGION} \
|
||||||
|
--query 'Table.{TableName:TableName,Status:TableStatus}' --output table
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cost analysis
|
||||||
|
echo "=== Cost Analysis ==="
|
||||||
|
echo "✅ Current configuration: ~$0/month (free tier optimized)"
|
||||||
|
echo "✅ No NAT Gateways (saves ~$32/month)"
|
||||||
|
echo "✅ No VPC Endpoints (saves ~$14/month)"
|
||||||
|
echo "✅ Using public subnets only for cost optimization"
|
||||||
|
echo "✅ Using Jenkins credential store (enterprise standard)"
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
script {
|
||||||
|
echo "=== Pipeline Execution Summary ==="
|
||||||
|
echo "🔹 Build: #${BUILD_NUMBER}"
|
||||||
|
echo "🔹 Action: ${params.ACTION}"
|
||||||
|
echo "🔹 Environment: ${params.ENVIRONMENT}"
|
||||||
|
echo "🔹 Duration: ${currentBuild.durationString}"
|
||||||
|
echo "🔹 Result: ${currentBuild.result ?: 'SUCCESS'}"
|
||||||
|
|
||||||
|
// Archive all important artifacts
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
archiveArtifacts artifacts: '*.tf,terraform.tfvars,*.tfplan,terraform-outputs.*,sonar-project.properties,.backend-config', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
success {
|
||||||
|
script {
|
||||||
|
echo "✅ Foundation Layer pipeline completed successfully!"
|
||||||
|
|
||||||
|
if (params.ACTION == 'apply') {
|
||||||
|
def message = """
|
||||||
|
🎉 Foundation Layer Deployment Complete!
|
||||||
|
|
||||||
|
📊 Deployment Details:
|
||||||
|
• Environment: ${params.ENVIRONMENT}
|
||||||
|
• Region: ${params.AWS_REGION}
|
||||||
|
• Project: ${params.PROJECT_NAME}
|
||||||
|
• Build: #${BUILD_NUMBER}
|
||||||
|
• Duration: ${currentBuild.durationString}
|
||||||
|
${env.DEPLOYMENT_APPROVER ? "• Approved by: ${env.DEPLOYMENT_APPROVER}" : ""}
|
||||||
|
|
||||||
|
🏗️ Infrastructure Created:
|
||||||
|
• VPC with multi-AZ public subnets
|
||||||
|
• Security groups for ALB and ECS
|
||||||
|
• S3 bucket for Terraform state
|
||||||
|
• DynamoDB table for state locking
|
||||||
|
• Internet Gateway and routing
|
||||||
|
|
||||||
|
💰 Cost: ~$0/month (free tier optimized)
|
||||||
|
|
||||||
|
🚀 Next Steps:
|
||||||
|
• Phase 2: Deploy Shared Services (ECR, ALB, IAM)
|
||||||
|
• Phase 3: Deploy Application Layer (ECS Fargate)
|
||||||
|
• Phase 4: Setup application CI/CD pipeline
|
||||||
|
|
||||||
|
📋 Outputs: Check archived artifacts for resource details
|
||||||
|
"""
|
||||||
|
echo message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
failure {
|
||||||
|
script {
|
||||||
|
echo "❌ Foundation Layer pipeline failed!"
|
||||||
|
|
||||||
|
// Archive debug information
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
sh '''
|
||||||
|
echo "=== Debug Information ===" > debug-info.txt
|
||||||
|
echo "Build: ${BUILD_NUMBER}" >> debug-info.txt
|
||||||
|
echo "Action: ${ACTION}" >> debug-info.txt
|
||||||
|
echo "Environment: ${ENVIRONMENT}" >> debug-info.txt
|
||||||
|
echo "Region: ${AWS_REGION}" >> debug-info.txt
|
||||||
|
echo "" >> debug-info.txt
|
||||||
|
echo "Terraform version:" >> debug-info.txt
|
||||||
|
terraform version >> debug-info.txt 2>&1 || echo "Terraform not available" >> debug-info.txt
|
||||||
|
echo "" >> debug-info.txt
|
||||||
|
echo "AWS CLI version:" >> debug-info.txt
|
||||||
|
aws --version >> debug-info.txt 2>&1 || echo "AWS CLI not available" >> debug-info.txt
|
||||||
|
echo "" >> debug-info.txt
|
||||||
|
echo "Working directory:" >> debug-info.txt
|
||||||
|
pwd >> debug-info.txt
|
||||||
|
ls -la >> debug-info.txt 2>&1
|
||||||
|
echo "" >> debug-info.txt
|
||||||
|
echo "Terraform state:" >> debug-info.txt
|
||||||
|
terraform state list >> debug-info.txt 2>&1 || echo "No state available" >> debug-info.txt
|
||||||
|
'''
|
||||||
|
archiveArtifacts artifacts: 'debug-info.txt', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup {
|
||||||
|
// Clean sensitive data but preserve artifacts
|
||||||
|
dir("${env.TF_WORKING_DIR}") {
|
||||||
|
sh '''
|
||||||
|
rm -f .terraform.lock.hcl 2>/dev/null || true
|
||||||
|
rm -rf .terraform/ 2>/dev/null || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
25
Infrastructure/foundation/backend.tf
Normal file
25
Infrastructure/foundation/backend.tf
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Terraform Backend Configuration
|
||||||
|
# This file will be auto-generated by the bootstrap script
|
||||||
|
|
||||||
|
# The bootstrap script creates this backend configuration automatically
|
||||||
|
# to avoid the chicken-and-egg problem with Terraform state management.
|
||||||
|
#
|
||||||
|
# During pipeline execution:
|
||||||
|
# 1. bootstrap.sh creates S3 bucket and DynamoDB table
|
||||||
|
# 2. bootstrap.sh generates this backend configuration
|
||||||
|
# 3. terraform init uses the remote backend from the start
|
||||||
|
#
|
||||||
|
# This approach eliminates the need for state migration and
|
||||||
|
# follows enterprise best practices.
|
||||||
|
|
||||||
|
# Backend configuration will be inserted here by bootstrap.sh
|
||||||
|
# Example structure:
|
||||||
|
# terraform {
|
||||||
|
# backend "s3" {
|
||||||
|
# bucket = "project-terraform-state-xxxxxxxx"
|
||||||
|
# key = "foundation/terraform.tfstate"
|
||||||
|
# region = "us-east-1"
|
||||||
|
# dynamodb_table = "project-terraform-locks"
|
||||||
|
# encrypt = true
|
||||||
|
# }
|
||||||
|
# }
|
144
Infrastructure/foundation/bootstrap.bash
Normal file
144
Infrastructure/foundation/bootstrap.bash
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Enterprise CI/CD Foundation Bootstrap Script
|
||||||
|
# This script creates the S3 bucket and DynamoDB table for Terraform backend
|
||||||
|
# before running the main Terraform deployment
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
PROJECT_NAME="${PROJECT_NAME:-enterprise-cicd}"
|
||||||
|
ENVIRONMENT="${ENVIRONMENT:-dev}"
|
||||||
|
AWS_REGION="${AWS_REGION:-us-east-1}"
|
||||||
|
|
||||||
|
# Generate unique suffix for global resources
|
||||||
|
RANDOM_SUFFIX=$(openssl rand -hex 4)
|
||||||
|
BUCKET_NAME="${PROJECT_NAME}-terraform-state-${RANDOM_SUFFIX}"
|
||||||
|
TABLE_NAME="${PROJECT_NAME}-terraform-locks"
|
||||||
|
|
||||||
|
echo "🚀 Bootstrapping Terraform Backend Infrastructure"
|
||||||
|
echo "Project: ${PROJECT_NAME}"
|
||||||
|
echo "Environment: ${ENVIRONMENT}"
|
||||||
|
echo "Region: ${AWS_REGION}"
|
||||||
|
echo "Bucket: ${BUCKET_NAME}"
|
||||||
|
echo "Table: ${TABLE_NAME}"
|
||||||
|
|
||||||
|
# Verify AWS credentials
|
||||||
|
echo "🔐 Verifying AWS credentials..."
|
||||||
|
aws sts get-caller-identity || {
|
||||||
|
echo "❌ AWS credentials not configured or invalid"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create S3 bucket for Terraform state
|
||||||
|
echo "📦 Creating S3 bucket for Terraform state..."
|
||||||
|
if aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
|
||||||
|
echo "✅ Bucket ${BUCKET_NAME} already exists"
|
||||||
|
else
|
||||||
|
# Create bucket with appropriate settings based on region
|
||||||
|
if [ "${AWS_REGION}" = "us-east-1" ]; then
|
||||||
|
aws s3api create-bucket \
|
||||||
|
--bucket "${BUCKET_NAME}" \
|
||||||
|
--region "${AWS_REGION}"
|
||||||
|
else
|
||||||
|
aws s3api create-bucket \
|
||||||
|
--bucket "${BUCKET_NAME}" \
|
||||||
|
--region "${AWS_REGION}" \
|
||||||
|
--create-bucket-configuration LocationConstraint="${AWS_REGION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Enable versioning
|
||||||
|
aws s3api put-bucket-versioning \
|
||||||
|
--bucket "${BUCKET_NAME}" \
|
||||||
|
--versioning-configuration Status=Enabled
|
||||||
|
|
||||||
|
# Enable encryption
|
||||||
|
aws s3api put-bucket-encryption \
|
||||||
|
--bucket "${BUCKET_NAME}" \
|
||||||
|
--server-side-encryption-configuration '{
|
||||||
|
"Rules": [{
|
||||||
|
"ApplyServerSideEncryptionByDefault": {
|
||||||
|
"SSEAlgorithm": "AES256"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Block public access
|
||||||
|
aws s3api put-public-access-block \
|
||||||
|
--bucket "${BUCKET_NAME}" \
|
||||||
|
--public-access-block-configuration \
|
||||||
|
BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true
|
||||||
|
|
||||||
|
echo "✅ S3 bucket ${BUCKET_NAME} created successfully"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create DynamoDB table for state locking
|
||||||
|
echo "🔒 Creating DynamoDB table for state locking..."
|
||||||
|
if aws dynamodb describe-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}" >/dev/null 2>&1; then
|
||||||
|
echo "✅ DynamoDB table ${TABLE_NAME} already exists"
|
||||||
|
else
|
||||||
|
aws dynamodb create-table \
|
||||||
|
--table-name "${TABLE_NAME}" \
|
||||||
|
--attribute-definitions AttributeName=LockID,AttributeType=S \
|
||||||
|
--key-schema AttributeName=LockID,KeyType=HASH \
|
||||||
|
--billing-mode PAY_PER_REQUEST \
|
||||||
|
--region "${AWS_REGION}" \
|
||||||
|
--tags Key=Name,Value="${TABLE_NAME}" \
|
||||||
|
Key=Project,Value="${PROJECT_NAME}" \
|
||||||
|
Key=Environment,Value="${ENVIRONMENT}" \
|
||||||
|
Key=ManagedBy,Value=terraform
|
||||||
|
|
||||||
|
# Wait for table to be active
|
||||||
|
echo "⏳ Waiting for DynamoDB table to be active..."
|
||||||
|
aws dynamodb wait table-exists --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
|
||||||
|
echo "✅ DynamoDB table ${TABLE_NAME} created successfully"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate backend configuration
|
||||||
|
echo "📝 Generating backend configuration..."
|
||||||
|
cat > backend.tf << EOF
|
||||||
|
# Terraform Backend Configuration
|
||||||
|
# Auto-generated by bootstrap script
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
backend "s3" {
|
||||||
|
bucket = "${BUCKET_NAME}"
|
||||||
|
key = "foundation/terraform.tfstate"
|
||||||
|
region = "${AWS_REGION}"
|
||||||
|
dynamodb_table = "${TABLE_NAME}"
|
||||||
|
encrypt = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "✅ Backend configuration written to backend.tf"
|
||||||
|
|
||||||
|
# Save configuration for later use
|
||||||
|
cat > .backend-config << EOF
|
||||||
|
BUCKET_NAME=${BUCKET_NAME}
|
||||||
|
TABLE_NAME=${TABLE_NAME}
|
||||||
|
AWS_REGION=${AWS_REGION}
|
||||||
|
PROJECT_NAME=${PROJECT_NAME}
|
||||||
|
ENVIRONMENT=${ENVIRONMENT}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🎉 Bootstrap completed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "📋 Resources Created:"
|
||||||
|
echo " S3 Bucket: ${BUCKET_NAME}"
|
||||||
|
echo " DynamoDB Table: ${TABLE_NAME}"
|
||||||
|
echo " Region: ${AWS_REGION}"
|
||||||
|
echo ""
|
||||||
|
echo "📁 Files Generated:"
|
||||||
|
echo " backend.tf - Terraform backend configuration"
|
||||||
|
echo " .backend-config - Resource details for cleanup"
|
||||||
|
echo ""
|
||||||
|
echo "🚀 Ready to run Terraform:"
|
||||||
|
echo " terraform init"
|
||||||
|
echo " terraform plan"
|
||||||
|
echo " terraform apply"
|
||||||
|
echo ""
|
||||||
|
echo "💡 To destroy everything later:"
|
||||||
|
echo " terraform destroy"
|
||||||
|
echo " ./cleanup.sh (to remove bootstrap resources)"
|
131
Infrastructure/foundation/cleanup.bash
Normal file
131
Infrastructure/foundation/cleanup.bash
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Enterprise CI/CD Foundation Cleanup Script
|
||||||
|
# This script removes the bootstrap S3 bucket and DynamoDB table
|
||||||
|
# Run this AFTER terraform destroy to completely clean up
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🧹 Foundation Layer Cleanup Script"
|
||||||
|
|
||||||
|
# Load configuration if available
|
||||||
|
if [ -f .backend-config ]; then
|
||||||
|
echo "📋 Loading configuration from .backend-config..."
|
||||||
|
source .backend-config
|
||||||
|
else
|
||||||
|
echo "⚠️ No .backend-config found. Using environment variables..."
|
||||||
|
BUCKET_NAME="${BUCKET_NAME:-}"
|
||||||
|
TABLE_NAME="${TABLE_NAME:-}"
|
||||||
|
AWS_REGION="${AWS_REGION:-us-east-1}"
|
||||||
|
PROJECT_NAME="${PROJECT_NAME:-enterprise-cicd}"
|
||||||
|
ENVIRONMENT="${ENVIRONMENT:-dev}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify AWS credentials
|
||||||
|
echo "🔐 Verifying AWS credentials..."
|
||||||
|
aws sts get-caller-identity || {
|
||||||
|
echo "❌ AWS credentials not configured or invalid"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Interactive confirmation
|
||||||
|
echo ""
|
||||||
|
echo "⚠️ WARNING: This will permanently delete bootstrap resources!"
|
||||||
|
echo ""
|
||||||
|
echo "Resources to delete:"
|
||||||
|
echo " S3 Bucket: ${BUCKET_NAME}"
|
||||||
|
echo " DynamoDB Table: ${TABLE_NAME}"
|
||||||
|
echo " Region: ${AWS_REGION}"
|
||||||
|
echo ""
|
||||||
|
read -p "Are you sure you want to proceed? (type 'DELETE' to confirm): " confirmation
|
||||||
|
|
||||||
|
if [ "$confirmation" != "DELETE" ]; then
|
||||||
|
echo "❌ Cleanup cancelled"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "💀 Starting cleanup process..."
|
||||||
|
|
||||||
|
# Check if Terraform state still exists
|
||||||
|
if [ -f terraform.tfstate ] || [ -f .terraform/terraform.tfstate ]; then
|
||||||
|
echo "❌ Error: Terraform state files still exist!"
|
||||||
|
echo "Please run 'terraform destroy' first to destroy all infrastructure"
|
||||||
|
echo "Then run this cleanup script to remove bootstrap resources"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if S3 bucket contains state files
|
||||||
|
if [ -n "${BUCKET_NAME}" ] && aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
|
||||||
|
STATE_FILES=$(aws s3 ls "s3://${BUCKET_NAME}/foundation/" --recursive 2>/dev/null || echo "")
|
||||||
|
if [ -n "${STATE_FILES}" ]; then
|
||||||
|
echo "❌ Error: S3 bucket contains Terraform state files!"
|
||||||
|
echo "Found state files:"
|
||||||
|
echo "${STATE_FILES}"
|
||||||
|
echo ""
|
||||||
|
echo "Please run 'terraform destroy' first to clean up all infrastructure"
|
||||||
|
echo "This will remove the state files from S3"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove S3 bucket
|
||||||
|
if [ -n "${BUCKET_NAME}" ] && aws s3api head-bucket --bucket "${BUCKET_NAME}" 2>/dev/null; then
|
||||||
|
echo "🗑️ Removing S3 bucket: ${BUCKET_NAME}"
|
||||||
|
|
||||||
|
# Remove all objects and versions
|
||||||
|
echo " Removing all objects and versions..."
|
||||||
|
aws s3api list-object-versions --bucket "${BUCKET_NAME}" \
|
||||||
|
--query 'Versions[].[Key,VersionId]' --output text | \
|
||||||
|
while read key version; do
|
||||||
|
if [ -n "$key" ] && [ -n "$version" ]; then
|
||||||
|
aws s3api delete-object --bucket "${BUCKET_NAME}" --key "$key" --version-id "$version"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Remove delete markers
|
||||||
|
aws s3api list-object-versions --bucket "${BUCKET_NAME}" \
|
||||||
|
--query 'DeleteMarkers[].[Key,VersionId]' --output text | \
|
||||||
|
while read key version; do
|
||||||
|
if [ -n "$key" ] && [ -n "$version" ]; then
|
||||||
|
aws s3api delete-object --bucket "${BUCKET_NAME}" --key "$key" --version-id "$version"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Delete the bucket
|
||||||
|
aws s3api delete-bucket --bucket "${BUCKET_NAME}" --region "${AWS_REGION}"
|
||||||
|
echo "✅ S3 bucket ${BUCKET_NAME} deleted"
|
||||||
|
else
|
||||||
|
echo "ℹ️ S3 bucket ${BUCKET_NAME} not found or already deleted"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove DynamoDB table
|
||||||
|
if [ -n "${TABLE_NAME}" ] && aws dynamodb describe-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}" >/dev/null 2>&1; then
|
||||||
|
echo "🗑️ Removing DynamoDB table: ${TABLE_NAME}"
|
||||||
|
aws dynamodb delete-table --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
|
||||||
|
|
||||||
|
# Wait for deletion to complete
|
||||||
|
echo "⏳ Waiting for table deletion to complete..."
|
||||||
|
aws dynamodb wait table-not-exists --table-name "${TABLE_NAME}" --region "${AWS_REGION}"
|
||||||
|
echo "✅ DynamoDB table ${TABLE_NAME} deleted"
|
||||||
|
else
|
||||||
|
echo "ℹ️ DynamoDB table ${TABLE_NAME} not found or already deleted"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up local files
|
||||||
|
echo "🧹 Cleaning up local files..."
|
||||||
|
rm -f backend.tf
|
||||||
|
rm -f .backend-config
|
||||||
|
rm -f terraform.tfstate.backup
|
||||||
|
rm -f .terraform.lock.hcl
|
||||||
|
rm -rf .terraform/
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🎉 Cleanup completed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "📋 What was removed:"
|
||||||
|
echo " ✅ S3 bucket: ${BUCKET_NAME}"
|
||||||
|
echo " ✅ DynamoDB table: ${TABLE_NAME}"
|
||||||
|
echo " ✅ Local backend configuration files"
|
||||||
|
echo ""
|
||||||
|
echo "💡 You can now run the bootstrap script again to create new resources"
|
331
Infrastructure/foundation/main.tf
Normal file
331
Infrastructure/foundation/main.tf
Normal file
@@ -0,0 +1,331 @@
|
|||||||
|
# Foundation Layer - VPC and Core Infrastructure (Free Tier Optimized)
|
||||||
|
# Creates base networking infrastructure with minimal cost for learning/development
|
||||||
|
|
||||||
|
# Data source for availability zones
|
||||||
|
data "aws_availability_zones" "available" {
|
||||||
|
state = "available"
|
||||||
|
}
|
||||||
|
|
||||||
|
# VPC
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = var.vpc_cidr
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
enable_dns_support = true
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-vpc"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Internet Gateway
|
||||||
|
resource "aws_internet_gateway" "main" {
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-igw"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Public Subnets (using 2 AZs for cost optimization)
|
||||||
|
resource "aws_subnet" "public" {
|
||||||
|
count = 2
|
||||||
|
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index)
|
||||||
|
availability_zone = data.aws_availability_zones.available.names[count.index]
|
||||||
|
map_public_ip_on_launch = true
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-public-subnet-${count.index + 1}"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
Type = "public"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Private Subnets (created but will use public for now to avoid NAT Gateway costs)
|
||||||
|
# These can be activated later when you want to upgrade to production-ready setup
|
||||||
|
resource "aws_subnet" "private" {
|
||||||
|
count = var.enable_private_subnets ? 2 : 0
|
||||||
|
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index + 10)
|
||||||
|
availability_zone = data.aws_availability_zones.available.names[count.index]
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-private-subnet-${count.index + 1}"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
Type = "private"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Conditional NAT Gateway resources (only if private subnets are enabled)
|
||||||
|
resource "aws_eip" "nat" {
|
||||||
|
count = var.enable_private_subnets && var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : 2) : 0
|
||||||
|
|
||||||
|
domain = "vpc"
|
||||||
|
depends_on = [aws_internet_gateway.main]
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-nat-eip-${count.index + 1}"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "main" {
|
||||||
|
count = var.enable_private_subnets && var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : 2) : 0
|
||||||
|
|
||||||
|
allocation_id = aws_eip.nat[count.index].id
|
||||||
|
subnet_id = aws_subnet.public[count.index].id
|
||||||
|
depends_on = [aws_internet_gateway.main]
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-nat-gw-${count.index + 1}"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Route Table for Public Subnets
|
||||||
|
resource "aws_route_table" "public" {
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = aws_internet_gateway.main.id
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-public-rt"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Route Tables for Private Subnets (only if enabled)
|
||||||
|
resource "aws_route_table" "private" {
|
||||||
|
count = var.enable_private_subnets ? 2 : 0
|
||||||
|
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
# Only add route to NAT Gateway if NAT Gateway is enabled
|
||||||
|
dynamic "route" {
|
||||||
|
for_each = var.enable_nat_gateway ? [1] : []
|
||||||
|
content {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
# If single NAT gateway, all route tables use index 0, otherwise use the route table's index
|
||||||
|
nat_gateway_id = aws_nat_gateway.main[var.single_nat_gateway ? 0 : count.index].id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-private-rt-${count.index + 1}"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Associate Public Subnets with Public Route Table
|
||||||
|
resource "aws_route_table_association" "public" {
|
||||||
|
count = 2
|
||||||
|
|
||||||
|
subnet_id = aws_subnet.public[count.index].id
|
||||||
|
route_table_id = aws_route_table.public.id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Associate Private Subnets with Private Route Tables (only if enabled)
|
||||||
|
resource "aws_route_table_association" "private" {
|
||||||
|
count = var.enable_private_subnets ? 2 : 0
|
||||||
|
|
||||||
|
subnet_id = aws_subnet.private[count.index].id
|
||||||
|
route_table_id = aws_route_table.private[count.index].id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default Security Group
|
||||||
|
resource "aws_security_group" "default" {
|
||||||
|
name = "${var.project_name}-default-sg"
|
||||||
|
description = "Default security group for ${var.project_name}"
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-default-sg"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Security Group for ALB
|
||||||
|
resource "aws_security_group" "alb" {
|
||||||
|
name = "${var.project_name}-alb-sg"
|
||||||
|
description = "Security group for Application Load Balancer"
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 443
|
||||||
|
to_port = 443
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-alb-sg"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Security Group for ECS Tasks
|
||||||
|
resource "aws_security_group" "ecs_tasks" {
|
||||||
|
name = "${var.project_name}-ecs-tasks-sg"
|
||||||
|
description = "Security group for ECS tasks"
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
|
||||||
|
# Allow traffic from ALB
|
||||||
|
ingress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "tcp"
|
||||||
|
security_groups = [aws_security_group.alb.id]
|
||||||
|
}
|
||||||
|
|
||||||
|
# For development: allow direct access (remove in production)
|
||||||
|
ingress {
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
from_port = 8080
|
||||||
|
to_port = 8080
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-ecs-tasks-sg"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Conditional VPC Endpoints (only if enabled and cost-optimized)
|
||||||
|
resource "aws_vpc_endpoint" "s3" {
|
||||||
|
count = var.enable_vpc_endpoints ? 1 : 0
|
||||||
|
|
||||||
|
vpc_id = aws_vpc.main.id
|
||||||
|
service_name = "com.amazonaws.${var.aws_region}.s3"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-s3-endpoint"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# S3 Bucket for Terraform State
|
||||||
|
resource "aws_s3_bucket" "terraform_state" {
|
||||||
|
bucket = "${var.project_name}-terraform-state-${random_string.bucket_suffix.result}"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-terraform-state"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Random string for bucket uniqueness
|
||||||
|
resource "random_string" "bucket_suffix" {
|
||||||
|
length = 8
|
||||||
|
special = false
|
||||||
|
upper = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# S3 Bucket Versioning
|
||||||
|
resource "aws_s3_bucket_versioning" "terraform_state" {
|
||||||
|
bucket = aws_s3_bucket.terraform_state.id
|
||||||
|
versioning_configuration {
|
||||||
|
status = "Enabled"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# S3 Bucket Server Side Encryption
|
||||||
|
resource "aws_s3_bucket_server_side_encryption_configuration" "terraform_state" {
|
||||||
|
bucket = aws_s3_bucket.terraform_state.id
|
||||||
|
|
||||||
|
rule {
|
||||||
|
apply_server_side_encryption_by_default {
|
||||||
|
sse_algorithm = "AES256"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# S3 Bucket Public Access Block
|
||||||
|
resource "aws_s3_bucket_public_access_block" "terraform_state" {
|
||||||
|
bucket = aws_s3_bucket.terraform_state.id
|
||||||
|
|
||||||
|
block_public_acls = true
|
||||||
|
block_public_policy = true
|
||||||
|
ignore_public_acls = true
|
||||||
|
restrict_public_buckets = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# DynamoDB Table for Terraform State Locking
|
||||||
|
resource "aws_dynamodb_table" "terraform_locks" {
|
||||||
|
name = "${var.project_name}-terraform-locks"
|
||||||
|
billing_mode = "PAY_PER_REQUEST"
|
||||||
|
hash_key = "LockID"
|
||||||
|
|
||||||
|
attribute {
|
||||||
|
name = "LockID"
|
||||||
|
type = "S"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "${var.project_name}-terraform-locks"
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
}
|
||||||
|
}
|
157
Infrastructure/foundation/outputs.tf
Normal file
157
Infrastructure/foundation/outputs.tf
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Foundation Layer Outputs
|
||||||
|
# These outputs will be used by subsequent layers (Shared Services and Application)
|
||||||
|
|
||||||
|
# VPC Information
|
||||||
|
output "vpc_id" {
|
||||||
|
description = "ID of the VPC"
|
||||||
|
value = aws_vpc.main.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_cidr_block" {
|
||||||
|
description = "CIDR block of the VPC"
|
||||||
|
value = aws_vpc.main.cidr_block
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_arn" {
|
||||||
|
description = "ARN of the VPC"
|
||||||
|
value = aws_vpc.main.arn
|
||||||
|
}
|
||||||
|
|
||||||
|
# Subnet Information
|
||||||
|
output "public_subnet_ids" {
|
||||||
|
description = "List of IDs of the public subnets"
|
||||||
|
value = aws_subnet.public[*].id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_subnet_ids" {
|
||||||
|
description = "List of IDs of the private subnets"
|
||||||
|
value = var.enable_private_subnets ? aws_subnet.private[*].id : []
|
||||||
|
}
|
||||||
|
|
||||||
|
output "public_subnet_cidrs" {
|
||||||
|
description = "List of CIDR blocks of the public subnets"
|
||||||
|
value = aws_subnet.public[*].cidr_block
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_subnet_cidrs" {
|
||||||
|
description = "List of CIDR blocks of the private subnets"
|
||||||
|
value = var.enable_private_subnets ? aws_subnet.private[*].cidr_block : []
|
||||||
|
}
|
||||||
|
|
||||||
|
# For free tier: use public subnets as "app subnets" when private subnets are disabled
|
||||||
|
output "app_subnet_ids" {
|
||||||
|
description = "List of subnet IDs to use for application deployment (private if available, public if cost-optimized)"
|
||||||
|
value = var.enable_private_subnets ? aws_subnet.private[*].id : aws_subnet.public[*].id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Availability Zones
|
||||||
|
output "availability_zones" {
|
||||||
|
description = "List of availability zones"
|
||||||
|
value = data.aws_availability_zones.available.names
|
||||||
|
}
|
||||||
|
|
||||||
|
# Gateway Information
|
||||||
|
output "internet_gateway_id" {
|
||||||
|
description = "ID of the Internet Gateway"
|
||||||
|
value = aws_internet_gateway.main.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "nat_gateway_ids" {
|
||||||
|
description = "List of IDs of the NAT Gateways"
|
||||||
|
value = var.enable_private_subnets && var.enable_nat_gateway ? aws_nat_gateway.main[*].id : []
|
||||||
|
}
|
||||||
|
|
||||||
|
output "nat_gateway_public_ips" {
|
||||||
|
description = "List of public Elastic IPs of NAT Gateways"
|
||||||
|
value = var.enable_private_subnets && var.enable_nat_gateway ? aws_eip.nat[*].public_ip : []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Security Group Information
|
||||||
|
output "default_security_group_id" {
|
||||||
|
description = "ID of the default security group"
|
||||||
|
value = aws_security_group.default.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "alb_security_group_id" {
|
||||||
|
description = "ID of the ALB security group"
|
||||||
|
value = aws_security_group.alb.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "ecs_tasks_security_group_id" {
|
||||||
|
description = "ID of the ECS tasks security group"
|
||||||
|
value = aws_security_group.ecs_tasks.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_endpoints_security_group_id" {
|
||||||
|
description = "ID of the VPC endpoints security group"
|
||||||
|
value = null # Not created in free tier version to avoid costs
|
||||||
|
}
|
||||||
|
|
||||||
|
# Route Table Information
|
||||||
|
output "public_route_table_id" {
|
||||||
|
description = "ID of the public route table"
|
||||||
|
value = aws_route_table.public.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_route_table_ids" {
|
||||||
|
description = "List of IDs of the private route tables"
|
||||||
|
value = var.enable_private_subnets ? aws_route_table.private[*].id : []
|
||||||
|
}
|
||||||
|
|
||||||
|
# VPC Endpoint Information
|
||||||
|
output "s3_vpc_endpoint_id" {
|
||||||
|
description = "ID of the S3 VPC endpoint"
|
||||||
|
value = var.enable_vpc_endpoints ? aws_vpc_endpoint.s3[0].id : null
|
||||||
|
}
|
||||||
|
|
||||||
|
output "ecr_dkr_vpc_endpoint_id" {
|
||||||
|
description = "ID of the ECR Docker VPC endpoint"
|
||||||
|
value = null # Disabled in free tier version
|
||||||
|
}
|
||||||
|
|
||||||
|
output "ecr_api_vpc_endpoint_id" {
|
||||||
|
description = "ID of the ECR API VPC endpoint"
|
||||||
|
value = null # Disabled in free tier version
|
||||||
|
}
|
||||||
|
|
||||||
|
output "logs_vpc_endpoint_id" {
|
||||||
|
description = "ID of the CloudWatch Logs VPC endpoint"
|
||||||
|
value = null # Disabled in free tier version
|
||||||
|
}
|
||||||
|
|
||||||
|
# Terraform Backend Information
|
||||||
|
output "terraform_state_bucket_name" {
|
||||||
|
description = "Name of the S3 bucket for Terraform state"
|
||||||
|
value = aws_s3_bucket.terraform_state.bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
output "terraform_state_bucket_arn" {
|
||||||
|
description = "ARN of the S3 bucket for Terraform state"
|
||||||
|
value = aws_s3_bucket.terraform_state.arn
|
||||||
|
}
|
||||||
|
|
||||||
|
output "terraform_locks_table_name" {
|
||||||
|
description = "Name of the DynamoDB table for Terraform locks"
|
||||||
|
value = aws_dynamodb_table.terraform_locks.name
|
||||||
|
}
|
||||||
|
|
||||||
|
output "terraform_locks_table_arn" {
|
||||||
|
description = "ARN of the DynamoDB table for Terraform locks"
|
||||||
|
value = aws_dynamodb_table.terraform_locks.arn
|
||||||
|
}
|
||||||
|
|
||||||
|
# Project Information
|
||||||
|
output "project_name" {
|
||||||
|
description = "Name of the project"
|
||||||
|
value = var.project_name
|
||||||
|
}
|
||||||
|
|
||||||
|
output "environment" {
|
||||||
|
description = "Environment name"
|
||||||
|
value = var.environment
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_region" {
|
||||||
|
description = "AWS region"
|
||||||
|
value = var.aws_region
|
||||||
|
}
|
36
Infrastructure/foundation/terraform.tfvars.example
Normal file
36
Infrastructure/foundation/terraform.tfvars.example
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Example Terraform Variables File - FREE TIER OPTIMIZED
|
||||||
|
# Copy this file to terraform.tfvars and customize as needed
|
||||||
|
# terraform.tfvars is gitignored for security
|
||||||
|
|
||||||
|
# Project Configuration
|
||||||
|
project_name = "nvhi-atsila-microservice"
|
||||||
|
environment = "dev"
|
||||||
|
aws_region = "us-east-1"
|
||||||
|
|
||||||
|
# Network Configuration
|
||||||
|
vpc_cidr = "10.0.0.0/16"
|
||||||
|
|
||||||
|
# Feature Toggles - FREE TIER SETTINGS
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
enable_dns_support = true
|
||||||
|
enable_private_subnets = false # Set to false to avoid NAT Gateway costs (~$32/month)
|
||||||
|
enable_nat_gateway = false # Only relevant if private subnets are enabled
|
||||||
|
single_nat_gateway = true # If you enable NAT later, use single gateway for cost savings
|
||||||
|
enable_vpc_endpoints = false # Set to false to avoid interface endpoint costs (~$14/month)
|
||||||
|
cost_optimization_mode = true # Enables free tier optimizations
|
||||||
|
|
||||||
|
# Cost Estimates:
|
||||||
|
# enable_private_subnets = false, enable_vpc_endpoints = false: ~$0/month
|
||||||
|
# enable_private_subnets = true, single_nat_gateway = true: ~$32/month
|
||||||
|
# enable_private_subnets = true, single_nat_gateway = false: ~$64/month
|
||||||
|
# enable_vpc_endpoints = true: +$14/month
|
||||||
|
|
||||||
|
# Additional Tags
|
||||||
|
common_tags = {
|
||||||
|
Terraform = "true"
|
||||||
|
Project = "enterprise-cicd"
|
||||||
|
Owner = "devops-team"
|
||||||
|
CostCenter = "engineering"
|
||||||
|
Department = "technology"
|
||||||
|
Tier = "free"
|
||||||
|
}
|
79
Infrastructure/foundation/variables.tf
Normal file
79
Infrastructure/foundation/variables.tf
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Foundation Layer Variables - FREE TIER OPTIMIZED
|
||||||
|
# Configuration variables for the enterprise CI/CD pipeline infrastructure
|
||||||
|
|
||||||
|
variable "project_name" {
|
||||||
|
description = "Name of the project - used for resource naming"
|
||||||
|
type = string
|
||||||
|
default = "nvhi-atsila-microservice"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "environment" {
|
||||||
|
description = "Environment name (dev, staging, prod)"
|
||||||
|
type = string
|
||||||
|
default = "dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_region" {
|
||||||
|
description = "AWS region for infrastructure deployment"
|
||||||
|
type = string
|
||||||
|
default = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vpc_cidr" {
|
||||||
|
description = "CIDR block for the VPC"
|
||||||
|
type = string
|
||||||
|
default = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_dns_hostnames" {
|
||||||
|
description = "Enable DNS hostnames in the VPC"
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_dns_support" {
|
||||||
|
description = "Enable DNS support in the VPC"
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_nat_gateway" {
|
||||||
|
description = "Enable NAT gateways for private subnets (only relevant if private subnets enabled)"
|
||||||
|
type = bool
|
||||||
|
default = false # Default false for free tier
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "single_nat_gateway" {
|
||||||
|
description = "Use a single NAT gateway instead of one per AZ (cost optimization)"
|
||||||
|
type = bool
|
||||||
|
default = true # Default true for cost optimization when NAT is enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_private_subnets" {
|
||||||
|
description = "Enable private subnets (requires NAT Gateway for internet access)"
|
||||||
|
type = bool
|
||||||
|
default = false # Set to false for free tier to avoid NAT Gateway costs
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "enable_vpc_endpoints" {
|
||||||
|
description = "Enable VPC endpoints for AWS services (costs extra)"
|
||||||
|
type = bool
|
||||||
|
default = false # Set to false for free tier to avoid interface endpoint costs
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cost_optimization_mode" {
|
||||||
|
description = "Enable cost optimization features for free tier usage"
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "common_tags" {
|
||||||
|
description = "Common tags to apply to all resources"
|
||||||
|
type = map(string)
|
||||||
|
default = {
|
||||||
|
Terraform = "true"
|
||||||
|
Project = "enterprise-cicd"
|
||||||
|
Owner = "devops-team"
|
||||||
|
CostCenter = "engineering"
|
||||||
|
}
|
||||||
|
}
|
33
Infrastructure/foundation/versions.tf
Normal file
33
Infrastructure/foundation/versions.tf
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Terraform and Provider Versions
|
||||||
|
# Defines the minimum required versions for consistency and reliability
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 1.5"
|
||||||
|
|
||||||
|
required_providers {
|
||||||
|
aws = {
|
||||||
|
source = "hashicorp/aws"
|
||||||
|
version = "~> 5.0"
|
||||||
|
}
|
||||||
|
random = {
|
||||||
|
source = "hashicorp/random"
|
||||||
|
version = "~> 3.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# AWS Provider Configuration
|
||||||
|
provider "aws" {
|
||||||
|
region = var.aws_region
|
||||||
|
|
||||||
|
default_tags {
|
||||||
|
tags = merge(
|
||||||
|
var.common_tags,
|
||||||
|
{
|
||||||
|
Environment = var.environment
|
||||||
|
Project = var.project_name
|
||||||
|
ManagedBy = "terraform"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
921
Jenkinsfile
vendored
921
Jenkinsfile
vendored
@@ -1,921 +0,0 @@
|
|||||||
pipeline {
|
|
||||||
agent any
|
|
||||||
|
|
||||||
parameters {
|
|
||||||
booleanParam(
|
|
||||||
name: 'FORCE_INFRASTRUCTURE_DEPLOY',
|
|
||||||
defaultValue: false,
|
|
||||||
description: 'Force infrastructure deployment regardless of change detection'
|
|
||||||
)
|
|
||||||
booleanParam(
|
|
||||||
name: 'SKIP_QUALITY_GATES',
|
|
||||||
defaultValue: false,
|
|
||||||
description: 'Skip SonarQube quality gates (use with caution)'
|
|
||||||
)
|
|
||||||
booleanParam(
|
|
||||||
name: 'DESTROY_INFRASTRUCTURE',
|
|
||||||
defaultValue: false,
|
|
||||||
description: 'Destroy all infrastructure (use with extreme caution)'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
environment {
|
|
||||||
// Core configuration
|
|
||||||
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git '
|
|
||||||
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
|
|
||||||
SONAR_HOST = 'https://sonar.jacquesingram.online '
|
|
||||||
SONAR_TOKEN = credentials('sonar-token')
|
|
||||||
// AWS configuration with ECR
|
|
||||||
AWS_CRED_ID = 'aws-ci'
|
|
||||||
AWS_ACCOUNT_ID = credentials('AWS_ACCOUNT_ID')
|
|
||||||
AWS_REGION = 'us-east-2'
|
|
||||||
ECR_REPO = 'nvhi-atsila-microservice'
|
|
||||||
// Backend configuration
|
|
||||||
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
|
|
||||||
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
|
|
||||||
TF_DDB_TABLE = 'nvhi-atsila-locks'
|
|
||||||
// Application variables
|
|
||||||
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
|
|
||||||
TF_VAR_vpc_cidr = '10.0.0.0/16'
|
|
||||||
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
|
|
||||||
TF_VAR_instance_type = 't2.micro'
|
|
||||||
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
|
|
||||||
TF_VAR_jenkins_ip_cidr = "0.0.0.0/0" // For demo; tighten in production
|
|
||||||
TF_VAR_aws_region = "${AWS_REGION}"
|
|
||||||
// Enhanced deployment tracking
|
|
||||||
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
|
|
||||||
// Initialize deployment type - will be set properly in stages
|
|
||||||
DEPLOYMENT_TYPE = "APPLICATION"
|
|
||||||
// Enterprise settings
|
|
||||||
TF_IN_AUTOMATION = 'true'
|
|
||||||
TF_INPUT = 'false'
|
|
||||||
// Ansible configuration
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING = 'False'
|
|
||||||
ANSIBLE_CONFIG = './ansible/ansible.cfg'
|
|
||||||
ECS_LOG_GROUP = "/ecs/nvhi-atsila-cluster"
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
|
||||||
stage('Debug: Show File Structure') {
|
|
||||||
steps {
|
|
||||||
echo "📂 Current directory contents:"
|
|
||||||
sh 'ls -la'
|
|
||||||
echo "🔍 Full file tree:"
|
|
||||||
sh 'find . -type f | sort'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Bootstrap Terraform Backend') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
def tfBackendDir = "terraform-backend"
|
|
||||||
|
|
||||||
echo "🔐 Using Jenkins credentials to authenticate with AWS"
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
echo "🔄 Checking/Bootstrapping Terraform backend..."
|
|
||||||
dir(tfBackendDir) {
|
|
||||||
def exitCode = sh(
|
|
||||||
script: """
|
|
||||||
terraform init \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}" \\
|
|
||||||
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
|
||||||
-var="lock_table_name=${TF_DDB_TABLE}"
|
|
||||||
terraform apply -auto-approve \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}" \\
|
|
||||||
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
|
||||||
-var="lock_table_name=${TF_DDB_TABLE}"
|
|
||||||
""",
|
|
||||||
returnStatus: true
|
|
||||||
)
|
|
||||||
|
|
||||||
if (exitCode == 0) {
|
|
||||||
echo "✅ Terraform backend created successfully"
|
|
||||||
} else {
|
|
||||||
echo "⚠️ Terraform apply failed, checking if resources already exist..."
|
|
||||||
def bucketExists = sh(
|
|
||||||
script: "aws s3api head-bucket --bucket ${TF_BACKEND_BUCKET} --region ${TF_VAR_aws_region} 2>/dev/null",
|
|
||||||
returnStatus: true
|
|
||||||
) == 0
|
|
||||||
def tableExists = sh(
|
|
||||||
script: "aws dynamodb describe-table --table-name ${TF_DDB_TABLE} --region ${TF_VAR_aws_region} 2>/dev/null",
|
|
||||||
returnStatus: true
|
|
||||||
) == 0
|
|
||||||
|
|
||||||
if (bucketExists && tableExists) {
|
|
||||||
echo "✅ Terraform backend already exists - continuing..."
|
|
||||||
} else {
|
|
||||||
echo "❌ Backend bootstrap failed and resources don't exist:"
|
|
||||||
echo " S3 Bucket exists: ${bucketExists}"
|
|
||||||
echo " DynamoDB Table exists: ${tableExists}"
|
|
||||||
error("Manual intervention required.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Security Assessment & Checkout') {
|
|
||||||
steps {
|
|
||||||
checkout scm
|
|
||||||
script {
|
|
||||||
// Check for infrastructure destruction first
|
|
||||||
if (params.DESTROY_INFRASTRUCTURE) {
|
|
||||||
env.DEPLOYMENT_TYPE = "DESTROY"
|
|
||||||
currentBuild.displayName = "DESTROY-${BUILD_NUMBER}"
|
|
||||||
echo "🚨 DESTROY MODE: Infrastructure destruction requested"
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
def infrastructureFiles = sh(
|
|
||||||
script: '''
|
|
||||||
if git rev-parse HEAD~1 >/dev/null 2>&1; then
|
|
||||||
git diff --name-only HEAD~1 2>/dev/null | grep -E "^terraform/" || echo "none"
|
|
||||||
else
|
|
||||||
echo "initial"
|
|
||||||
fi
|
|
||||||
''',
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
// Check force parameter first - this overrides everything
|
|
||||||
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
|
||||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
||||||
currentBuild.displayName = "INFRASTRUCTURE-FORCED-${BUILD_NUMBER}"
|
|
||||||
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
|
||||||
echo "✅ Deployment type set to: INFRASTRUCTURE (forced)"
|
|
||||||
} else if (infrastructureFiles == "initial") {
|
|
||||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
||||||
currentBuild.displayName = "INFRASTRUCTURE-INITIAL-${BUILD_NUMBER}"
|
|
||||||
echo "✅ First run detected. Deploying infrastructure."
|
|
||||||
} else if (infrastructureFiles != "none") {
|
|
||||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
||||||
currentBuild.displayName = "INFRASTRUCTURE-CHANGED-${BUILD_NUMBER}"
|
|
||||||
echo "🚨 SECURITY NOTICE: Infrastructure changes detected - elevated permissions required"
|
|
||||||
echo " Changed files: ${infrastructureFiles}"
|
|
||||||
} else {
|
|
||||||
env.DEPLOYMENT_TYPE = "APPLICATION"
|
|
||||||
currentBuild.displayName = "APPLICATION-${BUILD_NUMBER}"
|
|
||||||
echo "✅ SECURITY: Application-only deployment - using restricted permissions"
|
|
||||||
}
|
|
||||||
|
|
||||||
def gitCommit = sh(script: 'git rev-parse HEAD', returnStdout: true).trim()
|
|
||||||
def gitAuthor = sh(script: 'git log -1 --pretty=format:"%an"', returnStdout: true).trim()
|
|
||||||
currentBuild.description = "${env.DEPLOYMENT_TYPE} | ${env.IMAGE_TAG} | ${gitCommit.take(8)}"
|
|
||||||
echo "📋 SECURITY AUDIT TRAIL:"
|
|
||||||
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
||||||
echo " • Version: ${env.IMAGE_TAG}"
|
|
||||||
echo " • Commit: ${gitCommit.take(8)}"
|
|
||||||
echo " • Author: ${gitAuthor}"
|
|
||||||
echo " • Container Registry: ECR (AWS-native, secure)"
|
|
||||||
echo " • Architecture: Ansible-based deployment (enterprise security)"
|
|
||||||
echo " • Security Model: Principle of Least Privilege"
|
|
||||||
echo " • Timestamp: ${new Date()}"
|
|
||||||
echo "🔄 DEPLOYMENT TYPE CONFIRMATION: ${env.DEPLOYMENT_TYPE}"
|
|
||||||
|
|
||||||
writeFile file: 'deployment-audit.json', text: """{
|
|
||||||
"build_number": "${BUILD_NUMBER}",
|
|
||||||
"deployment_type": "${env.DEPLOYMENT_TYPE}",
|
|
||||||
"image_tag": "${env.IMAGE_TAG}",
|
|
||||||
"git_commit": "${gitCommit}",
|
|
||||||
"git_author": "${gitAuthor}",
|
|
||||||
"infrastructure_files_changed": "${infrastructureFiles}",
|
|
||||||
"container_registry": "ECR",
|
|
||||||
"architecture": "ansible_based_deployment",
|
|
||||||
"security_model": "principle_of_least_privilege",
|
|
||||||
"timestamp": "${new Date()}"
|
|
||||||
}"""
|
|
||||||
archiveArtifacts artifacts: 'deployment-audit.json', fingerprint: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Security & Quality Checks') {
|
|
||||||
parallel {
|
|
||||||
stage('SonarQube Security Analysis') {
|
|
||||||
when {
|
|
||||||
expression { !params.SKIP_QUALITY_GATES }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
def scannerHome = tool 'SonarQubeScanner'
|
|
||||||
withSonarQubeEnv('SonarQube') {
|
|
||||||
sh """
|
|
||||||
echo "🔒 SECURITY: Running SonarQube security analysis..."
|
|
||||||
${scannerHome}/bin/sonar-scanner \\
|
|
||||||
-Dsonar.projectKey=nvhi-atsila-microservice \\
|
|
||||||
-Dsonar.sources=. \\
|
|
||||||
-Dsonar.projectVersion=${BUILD_NUMBER} \\
|
|
||||||
-Dsonar.login=${SONAR_TOKEN}
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
echo "✅ SECURITY: Code quality and security scan completed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Terraform Security Validation') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
echo "🔒 SECURITY: Running Terraform security and validation checks..."
|
|
||||||
sh '''
|
|
||||||
echo "Validating Terraform configuration..."
|
|
||||||
cd terraform && terraform init -backend=false
|
|
||||||
terraform validate
|
|
||||||
echo "✅ Terraform validation passed"
|
|
||||||
echo "🔒 SECURITY: Checking infrastructure security compliance..."
|
|
||||||
grep -r "encrypted.*true" . --include="*.tf" && echo "✅ Encryption policies found" || echo "⚠️ Review encryption settings"
|
|
||||||
echo "🔒 SECURITY: Checking for open security groups..."
|
|
||||||
if grep -r "0.0.0.0/0" . --include="*.tf" --exclude-dir=".terraform" | grep -v "# Approved:"; then
|
|
||||||
echo "⚠️ Review open access rules found"
|
|
||||||
else
|
|
||||||
echo "✅ No unauthorized open access rules"
|
|
||||||
fi
|
|
||||||
'''
|
|
||||||
echo "✅ SECURITY: Infrastructure validation and security checks passed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Secure Container Build & Registry') {
|
|
||||||
when {
|
|
||||||
not { expression { env.DEPLOYMENT_TYPE == "DESTROY" } }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
script {
|
|
||||||
echo "🔐 SECURITY: Using ECR for secure, AWS-native container registry"
|
|
||||||
|
|
||||||
// Create ECR repository if it doesn't exist
|
|
||||||
echo "🔍 Checking/Creating ECR repository..."
|
|
||||||
sh """
|
|
||||||
if ! aws ecr describe-repositories --repository-names ${ECR_REPO} --region ${AWS_REGION} 2>/dev/null; then
|
|
||||||
echo "📦 Creating ECR repository: ${ECR_REPO}"
|
|
||||||
aws ecr create-repository --repository-name ${ECR_REPO} --region ${AWS_REGION}
|
|
||||||
echo "✅ ECR repository created successfully"
|
|
||||||
else
|
|
||||||
echo "✅ ECR repository already exists"
|
|
||||||
fi
|
|
||||||
"""
|
|
||||||
|
|
||||||
sh """
|
|
||||||
echo "🔐 Authenticating with ECR using temporary credentials..."
|
|
||||||
aws ecr get-login-password --region ${AWS_REGION} | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com
|
|
||||||
"""
|
|
||||||
echo "🐳 Building secure container with metadata..."
|
|
||||||
sh """
|
|
||||||
docker build -t ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} .
|
|
||||||
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}
|
|
||||||
docker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
|
||||||
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
|
||||||
"""
|
|
||||||
echo "✅ SECURITY: Container built and pushed to ECR successfully"
|
|
||||||
echo " Image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
|
||||||
echo " Registry: ECR (AWS-native, IAM-secured)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Infrastructure Readiness Check') {
|
|
||||||
when {
|
|
||||||
not { expression { env.DEPLOYMENT_TYPE == "DESTROY" } }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
script {
|
|
||||||
echo "🔍 SECURITY: Checking if infrastructure is ready for deployment..."
|
|
||||||
echo "🔍 Current deployment type: ${env.DEPLOYMENT_TYPE}"
|
|
||||||
|
|
||||||
// Only check readiness if deployment type is APPLICATION
|
|
||||||
if (env.DEPLOYMENT_TYPE == "APPLICATION") {
|
|
||||||
def serviceExists = sh(
|
|
||||||
script: """
|
|
||||||
aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'
|
|
||||||
""",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
def instanceCount = sh(
|
|
||||||
script: """
|
|
||||||
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
|
||||||
""",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
echo "🔍 Service Exists: ${serviceExists}"
|
|
||||||
echo "🔍 Container Instances: ${instanceCount}"
|
|
||||||
|
|
||||||
if (serviceExists == "false" || instanceCount == "0" || instanceCount == "null") {
|
|
||||||
echo "🚨 SECURITY NOTICE: Infrastructure not ready - forcing infrastructure deployment"
|
|
||||||
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
||||||
currentBuild.displayName = "INFRASTRUCTURE-AUTO-${BUILD_NUMBER}"
|
|
||||||
currentBuild.description = "INFRASTRUCTURE (auto-detected) | ${env.IMAGE_TAG}"
|
|
||||||
echo "✅ Changed deployment type to: INFRASTRUCTURE"
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
echo "✅ Infrastructure deployment already scheduled - skipping readiness check"
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "📋 SECURITY: Infrastructure readiness assessment completed"
|
|
||||||
echo " Final Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Destroy Infrastructure') {
|
|
||||||
when {
|
|
||||||
expression { env.DEPLOYMENT_TYPE == "DESTROY" }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
dir('terraform') {
|
|
||||||
script {
|
|
||||||
echo "🚨 DESTRUCTION: Destroying infrastructure..."
|
|
||||||
sh """
|
|
||||||
echo "🔄 Initializing Terraform with remote backend..."
|
|
||||||
terraform init \\
|
|
||||||
-backend-config="bucket=${TF_BACKEND_BUCKET}" \\
|
|
||||||
-backend-config="key=${TF_BACKEND_PREFIX}" \\
|
|
||||||
-backend-config="region=${AWS_REGION}" \\
|
|
||||||
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
|
|
||||||
|
|
||||||
echo "🔄 Planning infrastructure destruction..."
|
|
||||||
terraform plan -destroy \\
|
|
||||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
||||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
||||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
||||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
||||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
||||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}"
|
|
||||||
|
|
||||||
echo "🔄 Destroying infrastructure..."
|
|
||||||
terraform destroy -auto-approve \\
|
|
||||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
||||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
||||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
||||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
||||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
||||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}"
|
|
||||||
"""
|
|
||||||
echo "✅ Infrastructure destruction completed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Deploy Infrastructure') {
|
|
||||||
when {
|
|
||||||
anyOf {
|
|
||||||
expression { params.FORCE_INFRASTRUCTURE_DEPLOY == true }
|
|
||||||
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
dir('terraform') {
|
|
||||||
script {
|
|
||||||
echo "🔍 DEPLOYMENT: Force parameter = ${params.FORCE_INFRASTRUCTURE_DEPLOY}"
|
|
||||||
echo "🔍 DEPLOYMENT: Deployment type = ${env.DEPLOYMENT_TYPE}"
|
|
||||||
echo "🚨 SECURITY NOTICE: Infrastructure deployment requested"
|
|
||||||
echo "🏗️ ARCHITECTURE: Deploying ECS Cluster with Ansible-based deployment (enterprise security)"
|
|
||||||
echo "🔐 In production: This would require infrastructure-admin role"
|
|
||||||
echo "🚀 Attempting infrastructure deployment..."
|
|
||||||
|
|
||||||
// Add error handling for Terraform operations
|
|
||||||
try {
|
|
||||||
sh """
|
|
||||||
echo "🔄 Initializing Terraform with remote backend..."
|
|
||||||
terraform init \\
|
|
||||||
-backend-config="bucket=${TF_BACKEND_BUCKET}" \\
|
|
||||||
-backend-config="key=${TF_BACKEND_PREFIX}" \\
|
|
||||||
-backend-config="region=${AWS_REGION}" \\
|
|
||||||
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
|
|
||||||
|
|
||||||
echo "🔄 Planning infrastructure changes..."
|
|
||||||
terraform plan \\
|
|
||||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
||||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
||||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
||||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
||||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
||||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}"
|
|
||||||
|
|
||||||
echo "🔄 Applying infrastructure changes..."
|
|
||||||
terraform apply -auto-approve \\
|
|
||||||
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
||||||
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
||||||
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
||||||
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
||||||
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
||||||
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
||||||
-var="aws_region=${TF_VAR_aws_region}"
|
|
||||||
"""
|
|
||||||
echo "✅ SECURITY: Infrastructure deployment completed successfully"
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "❌ Infrastructure deployment failed: ${e.getMessage()}"
|
|
||||||
echo "📋 Checking current Terraform state..."
|
|
||||||
sh "terraform show || echo 'No state found'"
|
|
||||||
throw e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Wait for ECS Agents') {
|
|
||||||
when {
|
|
||||||
anyOf {
|
|
||||||
expression { params.FORCE_INFRASTRUCTURE_DEPLOY == true }
|
|
||||||
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
script {
|
|
||||||
echo "⏳ Waiting for ECS agents to register with cluster..."
|
|
||||||
timeout(time: 10, unit: 'MINUTES') {
|
|
||||||
waitUntil {
|
|
||||||
def count = sh(
|
|
||||||
script: """
|
|
||||||
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
|
||||||
""",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
if (count != "0" && count != "null") {
|
|
||||||
echo "✅ ECS agents registered: ${count} instance(s)"
|
|
||||||
// Fixed: Simplified active count check to avoid backtick escaping issues
|
|
||||||
def activeCount = sh(
|
|
||||||
script: """
|
|
||||||
aws ecs describe-container-instances \\
|
|
||||||
--cluster ${TF_VAR_cluster_name} \\
|
|
||||||
--container-instances \$(aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns[*]' --output text) \\
|
|
||||||
--region ${AWS_REGION} \\
|
|
||||||
--output text | grep -c ACTIVE || echo '0'
|
|
||||||
""",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
if (activeCount != "0" && activeCount != "null") {
|
|
||||||
echo "✅ Active ECS instances: ${activeCount}"
|
|
||||||
return true
|
|
||||||
} else {
|
|
||||||
echo "⏳ Waiting for instances to become ACTIVE..."
|
|
||||||
sleep(20)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
echo "⏳ No ECS agents registered yet..."
|
|
||||||
sleep(20)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Configure & Deploy Application with Ansible') {
|
|
||||||
when {
|
|
||||||
not { expression { env.DEPLOYMENT_TYPE == "DESTROY" } }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
echo "🚀 ENTERPRISE: Deploying with Ansible (replacing SSM approach)"
|
|
||||||
|
|
||||||
// Get infrastructure details from Terraform
|
|
||||||
def instanceId = ""
|
|
||||||
def publicIp = ""
|
|
||||||
def executionRoleArn = ""
|
|
||||||
|
|
||||||
try {
|
|
||||||
instanceId = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_instance_id",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
publicIp = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
executionRoleArn = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_task_execution_role_arn",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
echo "📍 Target Instance: ${instanceId} (${publicIp})"
|
|
||||||
echo "🔧 Execution Role: ${executionRoleArn}"
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "⚠️ Could not get all Terraform outputs: ${e.getMessage()}"
|
|
||||||
echo "⚠️ Some outputs may be missing, continuing with available data..."
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create Ansible working directory and files
|
|
||||||
sh "mkdir -p ansible/group_vars"
|
|
||||||
|
|
||||||
// Create dynamic inventory file
|
|
||||||
def inventoryContent = """[inventory_hosts]
|
|
||||||
ec2-instance ansible_host=${publicIp} ansible_user=ec2-user
|
|
||||||
|
|
||||||
[inventory_hosts:vars]
|
|
||||||
ansible_ssh_private_key_file=~/.ssh/id_rsa
|
|
||||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 -o ServerAliveInterval=60'
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
ansible_connection=ssh
|
|
||||||
ansible_ssh_retries=3
|
|
||||||
aws_region=${AWS_REGION}
|
|
||||||
"""
|
|
||||||
writeFile file: 'ansible/hosts', text: inventoryContent
|
|
||||||
|
|
||||||
// Create Ansible configuration
|
|
||||||
def ansibleConfig = """[defaults]
|
|
||||||
inventory = hosts
|
|
||||||
host_key_checking = False
|
|
||||||
retry_files_enabled = False
|
|
||||||
gathering = smart
|
|
||||||
stdout_callback = yaml
|
|
||||||
timeout = 30
|
|
||||||
log_path = ./ansible.log
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10
|
|
||||||
pipelining = True
|
|
||||||
"""
|
|
||||||
writeFile file: 'ansible/ansible.cfg', text: ansibleConfig
|
|
||||||
|
|
||||||
// Create group variables
|
|
||||||
def groupVarsContent = """---
|
|
||||||
ecs_cluster_name: ${TF_VAR_cluster_name}
|
|
||||||
service_name: ${TF_VAR_cluster_name}-service
|
|
||||||
task_family: ${TF_VAR_cluster_name}-task
|
|
||||||
container_name: ${ECR_REPO}
|
|
||||||
aws_region: ${AWS_REGION}
|
|
||||||
container_port: 8080
|
|
||||||
"""
|
|
||||||
writeFile file: 'ansible/group_vars/all.yml', text: groupVarsContent
|
|
||||||
|
|
||||||
// Test connectivity and execute deployment
|
|
||||||
withCredentials([
|
|
||||||
[$class: 'AmazonWebServicesCredentialsBinding',
|
|
||||||
credentialsId: env.AWS_CRED_ID,
|
|
||||||
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
|
|
||||||
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
|
||||||
]) {
|
|
||||||
sh """
|
|
||||||
cd ansible
|
|
||||||
|
|
||||||
# Set environment variables
|
|
||||||
export AWS_DEFAULT_REGION="${AWS_REGION}"
|
|
||||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
|
||||||
export ANSIBLE_CONFIG="./ansible.cfg"
|
|
||||||
|
|
||||||
# Wait for SSH connectivity
|
|
||||||
echo "🔍 Testing SSH connectivity to ${publicIp}..."
|
|
||||||
timeout 120 bash -c 'while ! nc -z ${publicIp} 22; do echo "Waiting for SSH..."; sleep 5; done'
|
|
||||||
|
|
||||||
# Install Python dependencies if needed
|
|
||||||
pip3 install --user boto3 botocore jq > /dev/null 2>&1 || true
|
|
||||||
|
|
||||||
# Test Ansible connectivity
|
|
||||||
echo "🔍 Testing Ansible connectivity..."
|
|
||||||
ansible inventory_hosts -m ping -i hosts -v
|
|
||||||
|
|
||||||
if [ \$? -ne 0 ]; then
|
|
||||||
echo "❌ Ansible connectivity failed"
|
|
||||||
echo "Debugging SSH connection..."
|
|
||||||
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 ec2-user@${publicIp} 'echo "SSH test successful"' || {
|
|
||||||
echo "SSH connection failed"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ Connectivity test passed"
|
|
||||||
|
|
||||||
# Execute main deployment playbook
|
|
||||||
echo "🚀 Starting deployment..."
|
|
||||||
ansible-playbook configure_ecs.yml \\
|
|
||||||
-i hosts \\
|
|
||||||
-e "app_version=${IMAGE_TAG}" \\
|
|
||||||
-e "aws_account_id=${AWS_ACCOUNT_ID}" \\
|
|
||||||
-e "aws_region=${AWS_REGION}" \\
|
|
||||||
-e "task_execution_role_arn=${executionRoleArn}" \\
|
|
||||||
--timeout 600 \\
|
|
||||||
-v
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final verification
|
|
||||||
echo "🔍 Running final verification..."
|
|
||||||
sh """
|
|
||||||
echo "Testing application endpoint..."
|
|
||||||
for i in {1..10}; do
|
|
||||||
if curl -f -s "http://${publicIp}:8080/health"; then
|
|
||||||
echo "✅ Application health check passed"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "⏳ Health check attempt \$i/10..."
|
|
||||||
sleep 10
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
success {
|
|
||||||
script {
|
|
||||||
def publicIp = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
echo """
|
|
||||||
========================================
|
|
||||||
🎉 DEPLOYMENT SUCCESSFUL!
|
|
||||||
========================================
|
|
||||||
Application URL: http://${publicIp}:8080
|
|
||||||
Health Endpoint: http://${publicIp}:8080/health
|
|
||||||
Version: ${IMAGE_TAG}
|
|
||||||
Deployment Method: Ansible (Enterprise Security)
|
|
||||||
========================================
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Archive deployment artifacts
|
|
||||||
archiveArtifacts artifacts: 'ansible/ansible.log', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
|
|
||||||
failure {
|
|
||||||
echo "❌ DEPLOYMENT FAILED - Gathering debug information..."
|
|
||||||
|
|
||||||
script {
|
|
||||||
sh """
|
|
||||||
echo "=== ANSIBLE DEBUG INFORMATION ==="
|
|
||||||
cat ansible/ansible.log 2>/dev/null || echo "No Ansible log available"
|
|
||||||
|
|
||||||
echo "=== ECS SERVICE STATUS ==="
|
|
||||||
aws ecs describe-services \\
|
|
||||||
--cluster "${TF_VAR_cluster_name}" \\
|
|
||||||
--services "${TF_VAR_cluster_name}-service" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--query 'services[0].{Status:status,Running:runningCount,Pending:pendingCount,Events:events[0:3]}' \\
|
|
||||||
--output json 2>/dev/null || echo "Could not get ECS service status"
|
|
||||||
|
|
||||||
echo "=== ECS CLUSTER STATUS ==="
|
|
||||||
aws ecs describe-clusters \\
|
|
||||||
--clusters "${TF_VAR_cluster_name}" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--query 'clusters[0].{Status:status,ActiveInstances:activeContainerInstancesCount,Tasks:runningTasksCount}' \\
|
|
||||||
--output json 2>/dev/null || echo "Could not get ECS cluster status"
|
|
||||||
|
|
||||||
echo "=== RECENT CONTAINER LOGS ==="
|
|
||||||
LATEST_STREAM=\$(aws logs describe-log-streams \\
|
|
||||||
--log-group-name "${ECS_LOG_GROUP}" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--order-by LastEventTime \\
|
|
||||||
--descending \\
|
|
||||||
--max-items 1 \\
|
|
||||||
--query 'logStreams[0].logStreamName' \\
|
|
||||||
--output text 2>/dev/null)
|
|
||||||
|
|
||||||
if [ "\$LATEST_STREAM" != "None" ] && [ "\$LATEST_STREAM" != "" ]; then
|
|
||||||
echo "Latest log stream: \$LATEST_STREAM"
|
|
||||||
aws logs get-log-events \\
|
|
||||||
--log-group-name "${ECS_LOG_GROUP}" \\
|
|
||||||
--log-stream-name "\$LATEST_STREAM" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--start-from-head \\
|
|
||||||
--query 'events[-20:].[timestamp,message]' \\
|
|
||||||
--output table 2>/dev/null || echo "Could not retrieve logs"
|
|
||||||
else
|
|
||||||
echo "No log streams found"
|
|
||||||
fi
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offer rollback option
|
|
||||||
script {
|
|
||||||
try {
|
|
||||||
timeout(time: 5, unit: 'MINUTES') {
|
|
||||||
def rollbackChoice = input(
|
|
||||||
message: 'Deployment failed. Would you like to rollback to the previous version?',
|
|
||||||
parameters: [
|
|
||||||
choice(choices: ['No', 'Yes'], description: 'Rollback?', name: 'ROLLBACK')
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
if (rollbackChoice == 'Yes') {
|
|
||||||
echo "🔄 Initiating automatic rollback..."
|
|
||||||
withCredentials([
|
|
||||||
[$class: 'AmazonWebServicesCredentialsBinding',
|
|
||||||
credentialsId: env.AWS_CRED_ID,
|
|
||||||
accessKeyVariable: 'AWS_ACCESS_KEY_ID',
|
|
||||||
secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
|
||||||
]) {
|
|
||||||
sh """
|
|
||||||
cd ansible
|
|
||||||
ansible-playbook rollback.yml \\
|
|
||||||
-e auto_rollback=true \\
|
|
||||||
-v
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "Rollback prompt timed out or was cancelled"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
always {
|
|
||||||
// Cleanup temporary files
|
|
||||||
sh """
|
|
||||||
rm -f ansible/hosts 2>/dev/null || true
|
|
||||||
rm -f ansible/ansible.cfg 2>/dev/null || true
|
|
||||||
rm -f ansible/group_vars/all.yml 2>/dev/null || true
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Verify Deployment') {
|
|
||||||
when {
|
|
||||||
not { expression { env.DEPLOYMENT_TYPE == "DESTROY" } }
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
script {
|
|
||||||
echo "🔍 VERIFICATION: Running comprehensive validation..."
|
|
||||||
|
|
||||||
def publicIp = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
|
|
||||||
sh """
|
|
||||||
echo "=== APPLICATION HEALTH CHECK ==="
|
|
||||||
curl -f -v "http://${publicIp}:8080/health"
|
|
||||||
|
|
||||||
echo "=== ECS SERVICE VALIDATION ==="
|
|
||||||
aws ecs describe-services \\
|
|
||||||
--cluster "${TF_VAR_cluster_name}" \\
|
|
||||||
--services "${TF_VAR_cluster_name}-service" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--query 'services[0].{Status:status,TaskDefinition:taskDefinition,Running:runningCount,Desired:desiredCount}' \\
|
|
||||||
--output table
|
|
||||||
|
|
||||||
echo "=== CONTAINER HEALTH CHECK ==="
|
|
||||||
# Check if containers are healthy
|
|
||||||
RUNNING_TASKS=\$(aws ecs list-tasks \\
|
|
||||||
--cluster "${TF_VAR_cluster_name}" \\
|
|
||||||
--service-name "${TF_VAR_cluster_name}-service" \\
|
|
||||||
--desired-status RUNNING \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--query 'taskArns' \\
|
|
||||||
--output text)
|
|
||||||
|
|
||||||
if [ -n "\$RUNNING_TASKS" ]; then
|
|
||||||
aws ecs describe-tasks \\
|
|
||||||
--cluster "${TF_VAR_cluster_name}" \\
|
|
||||||
--tasks \$RUNNING_TASKS \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--query 'tasks[0].containers[0].{Name:name,Status:lastStatus,Health:healthStatus}' \\
|
|
||||||
--output table
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "=== LOG VALIDATION ==="
|
|
||||||
# Fixed: Simplified log analysis to avoid complex escaping
|
|
||||||
LATEST_STREAM=\$(aws logs describe-log-streams \\
|
|
||||||
--log-group-name "${ECS_LOG_GROUP}" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--order-by LastEventTime \\
|
|
||||||
--descending \\
|
|
||||||
--max-items 1 \\
|
|
||||||
--query 'logStreams[0].logStreamName' \\
|
|
||||||
--output text 2>/dev/null)
|
|
||||||
|
|
||||||
if [ "\$LATEST_STREAM" != "None" ] && [ "\$LATEST_STREAM" != "" ]; then
|
|
||||||
echo "Checking logs for errors in stream: \$LATEST_STREAM"
|
|
||||||
# Simple approach: get recent log messages and check for errors with grep
|
|
||||||
aws logs get-log-events \\
|
|
||||||
--log-group-name "${ECS_LOG_GROUP}" \\
|
|
||||||
--log-stream-name "\$LATEST_STREAM" \\
|
|
||||||
--region "${AWS_REGION}" \\
|
|
||||||
--start-from-head \\
|
|
||||||
--query 'events[-20:].message' \\
|
|
||||||
--output text > /tmp/recent_logs.txt 2>/dev/null || echo "Could not get logs"
|
|
||||||
|
|
||||||
if [ -f /tmp/recent_logs.txt ]; then
|
|
||||||
ERROR_COUNT=\$(grep -c -i "error\\|fatal\\|exception" /tmp/recent_logs.txt 2>/dev/null || echo "0")
|
|
||||||
if [ "\$ERROR_COUNT" -gt 0 ]; then
|
|
||||||
echo "⚠️ Found \$ERROR_COUNT potential errors in logs - please review"
|
|
||||||
echo "Recent error lines:"
|
|
||||||
grep -i "error\\|fatal\\|exception" /tmp/recent_logs.txt | head -5 || true
|
|
||||||
else
|
|
||||||
echo "✅ No errors found in recent application logs"
|
|
||||||
fi
|
|
||||||
rm -f /tmp/recent_logs.txt
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ All validation checks completed successfully"
|
|
||||||
"""
|
|
||||||
|
|
||||||
// Update build description with URL
|
|
||||||
currentBuild.description = "${currentBuild.description} | URL: http://${publicIp}:8080"
|
|
||||||
|
|
||||||
echo "✅ VERIFICATION: Deployment verification completed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
script {
|
|
||||||
echo "🧹 CLEANUP: Performing post-build cleanup..."
|
|
||||||
|
|
||||||
// Archive deployment artifacts
|
|
||||||
try {
|
|
||||||
archiveArtifacts artifacts: 'deployment-audit.json,task-definition.json', allowEmptyArchive: true
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "⚠️ Could not archive artifacts: ${e.getMessage()}"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up Docker images to save space
|
|
||||||
sh '''
|
|
||||||
echo "🧹 Cleaning up Docker images..."
|
|
||||||
docker system prune -f || echo "Docker cleanup failed"
|
|
||||||
'''
|
|
||||||
|
|
||||||
echo "📊 SUMMARY: Build completed"
|
|
||||||
echo " Build Number: ${BUILD_NUMBER}"
|
|
||||||
echo " Image Tag: ${IMAGE_TAG}"
|
|
||||||
echo " Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
||||||
echo " Status: ${currentBuild.currentResult}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
success {
|
|
||||||
script {
|
|
||||||
if (env.DEPLOYMENT_TYPE == "DESTROY") {
|
|
||||||
echo "🎉 SUCCESS: Infrastructure destroyed successfully!"
|
|
||||||
} else {
|
|
||||||
echo "🎉 SUCCESS: Deployment completed successfully!"
|
|
||||||
echo " Version ${IMAGE_TAG} deployed to ECS cluster ${TF_VAR_cluster_name}"
|
|
||||||
|
|
||||||
// Get application URL for success message
|
|
||||||
def appUrl = ""
|
|
||||||
try {
|
|
||||||
appUrl = sh(
|
|
||||||
script: "cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unknown'",
|
|
||||||
returnStdout: true
|
|
||||||
).trim()
|
|
||||||
if (appUrl != "unknown" && appUrl != "") {
|
|
||||||
echo "🌐 Application available at: http://${appUrl}:8080"
|
|
||||||
echo "🏥 Health check: http://${appUrl}:8080/health"
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "⚠️ Could not determine application URL"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
failure {
|
|
||||||
script {
|
|
||||||
echo "❌ FAILURE: Deployment failed"
|
|
||||||
echo " Check the logs above for error details"
|
|
||||||
|
|
||||||
// Try to get some debug information
|
|
||||||
try {
|
|
||||||
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
||||||
echo "🔍 DEBUG: Checking ECS cluster status..."
|
|
||||||
sh """
|
|
||||||
aws ecs describe-clusters --clusters ${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Cluster check failed"
|
|
||||||
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Instance list failed"
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
echo "⚠️ Could not get debug information: ${e.getMessage()}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unstable {
|
|
||||||
script {
|
|
||||||
echo "⚠️ UNSTABLE: Build completed with warnings"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
66
README.md
66
README.md
@@ -1,66 +0,0 @@
|
|||||||
# nvhi-atsila-microservice
|
|
||||||
|
|
||||||
# AWS ECS CI/CD Pipeline with Terraform, Ansible & Jenkins
|
|
||||||
|
|
||||||
A complete CI/CD pipeline for deploying microservices to AWS ECS using infrastructure as code and configuration management.
|
|
||||||
|
|
||||||
## 🚀 Overview
|
|
||||||
|
|
||||||
This project implements an end-to-end CI/CD pipeline that automates the build, test, and deployment of microservices to AWS ECS. The pipeline leverages DevOps best practices to create a reproducible, scalable deployment solution optimized for AWS Free Tier.
|
|
||||||
|
|
||||||
## 🛠️ Technology Stack
|
|
||||||
|
|
||||||
- **Terraform** - Provisions all AWS infrastructure
|
|
||||||
- **Ansible** - Configures EC2 instances with Docker and ECS agent
|
|
||||||
- **Jenkins** - Orchestrates the entire CI/CD workflow
|
|
||||||
- **Artifactory** - Hosts Docker images
|
|
||||||
- **SonarQube** - Enforces code quality gates
|
|
||||||
- **Gitea** - Git repository hosting
|
|
||||||
- **AWS ECS** - Container orchestration (EC2-backed)
|
|
||||||
|
|
||||||
## 📋 Pipeline Workflow
|
|
||||||
|
|
||||||
1. Developer pushes code to Gitea
|
|
||||||
2. Jenkins webhook triggers the pipeline
|
|
||||||
3. SonarQube scans code for quality compliance
|
|
||||||
4. Docker image is built from approved code
|
|
||||||
5. Image is pushed to Artifactory registry
|
|
||||||
6. Terraform provisions/updates AWS infrastructure
|
|
||||||
7. Ansible configures EC2 instances for ECS
|
|
||||||
8. Microservice is deployed to ECS cluster
|
|
||||||
|
|
||||||
## 🏗️ Infrastructure Components
|
|
||||||
|
|
||||||
### AWS Resources (Managed by Terraform)
|
|
||||||
- VPC with public/private subnets
|
|
||||||
- ECS cluster with EC2 container instances
|
|
||||||
- Application Load Balancer (ALB)
|
|
||||||
- Security groups and IAM roles
|
|
||||||
- Auto-scaling capabilities
|
|
||||||
|
|
||||||
### DevOps Tools (Self-hosted)
|
|
||||||
All DevOps tools run on a dedicated Linux server:
|
|
||||||
- Jenkins for CI/CD automation
|
|
||||||
- Gitea for version control
|
|
||||||
- SonarQube for code analysis
|
|
||||||
- Artifactory for artifact management
|
|
||||||
|
|
||||||
## 📁 Project Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
├── terraform/ # Infrastructure as Code
|
|
||||||
├── ansible/ # Configuration management
|
|
||||||
├── jenkins/ # CI/CD pipeline definitions
|
|
||||||
├── microservice/ # Sample application
|
|
||||||
├── scripts/ # Setup and utility scripts
|
|
||||||
└── docs/ # Documentation
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔒 Key Features
|
|
||||||
|
|
||||||
- **Fully Automated** - Push code and deploy automatically
|
|
||||||
- **Quality Gates** - SonarQube ensures code standards
|
|
||||||
- **Infrastructure as Code** - All resources defined in Terraform
|
|
||||||
- **Configuration Management** - Ansible ensures consistent server setup
|
|
||||||
- **AWS Free Tier** - Optimized for minimal AWS costs
|
|
||||||
- **Modular Design** - Easy to extend and customize
|
|
@@ -1,19 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
inventory = hosts
|
|
||||||
host_key_checking = False
|
|
||||||
retry_files_enabled = False
|
|
||||||
gathering = smart
|
|
||||||
fact_caching = memory
|
|
||||||
stdout_callback = yaml
|
|
||||||
stderr_callback = yaml
|
|
||||||
timeout = 30
|
|
||||||
log_path = ./ansible.log
|
|
||||||
nocows = 1
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10
|
|
||||||
pipelining = True
|
|
||||||
control_path = /tmp/ansible-ssh-%%h-%%p-%%r
|
|
||||||
|
|
||||||
[inventory]
|
|
||||||
enable_plugins = host_list, script, auto, yaml, ini
|
|
@@ -1,493 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Configure and Deploy ECS Application (Enterprise Security)
|
|
||||||
hosts: inventory_hosts
|
|
||||||
# DO NOT use blanket root access
|
|
||||||
become: no
|
|
||||||
gather_facts: yes
|
|
||||||
vars:
|
|
||||||
ecs_cluster_name: "nvhi-atsila-cluster"
|
|
||||||
service_name: "nvhi-atsila-cluster-service"
|
|
||||||
task_family: "nvhi-atsila-cluster-task"
|
|
||||||
container_name: "nvhi-atsila-microservice"
|
|
||||||
app_version: "{{ app_version | default('latest') }}"
|
|
||||||
aws_region: "{{ aws_region | default('us-east-2') }}"
|
|
||||||
log_group: "/ecs/{{ ecs_cluster_name }}"
|
|
||||||
# Security: Use dedicated service account
|
|
||||||
ecs_user: "ecs-user"
|
|
||||||
ecs_group: "ecs-group"
|
|
||||||
|
|
||||||
pre_tasks:
|
|
||||||
- name: Validate required variables
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- ecs_cluster_name is defined
|
|
||||||
- aws_region is defined
|
|
||||||
- aws_account_id is defined
|
|
||||||
- task_execution_role_arn is defined
|
|
||||||
fail_msg: "Required variables missing. Check app_version, aws_account_id, task_execution_role_arn"
|
|
||||||
tags: [validation]
|
|
||||||
|
|
||||||
- name: Test connectivity
|
|
||||||
ping:
|
|
||||||
tags: [validation]
|
|
||||||
|
|
||||||
# Security: Create dedicated service account
|
|
||||||
- name: Create ECS service group
|
|
||||||
group:
|
|
||||||
name: "{{ ecs_group }}"
|
|
||||||
state: present
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [security, users]
|
|
||||||
|
|
||||||
- name: Create ECS service user
|
|
||||||
user:
|
|
||||||
name: "{{ ecs_user }}"
|
|
||||||
group: "{{ ecs_group }}"
|
|
||||||
system: yes
|
|
||||||
shell: /bin/bash
|
|
||||||
home: /home/{{ ecs_user }}
|
|
||||||
create_home: yes
|
|
||||||
state: present
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [security, users]
|
|
||||||
|
|
||||||
- name: Add ECS user to docker group
|
|
||||||
user:
|
|
||||||
name: "{{ ecs_user }}"
|
|
||||||
groups: docker
|
|
||||||
append: yes
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [security, users]
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
# Infrastructure Setup - Only escalate when necessary
|
|
||||||
- name: Update system packages
|
|
||||||
yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest
|
|
||||||
update_cache: yes
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
async: 300
|
|
||||||
poll: 0
|
|
||||||
register: yum_update
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
- name: Wait for package update to complete
|
|
||||||
async_status:
|
|
||||||
jid: "{{ yum_update.ansible_job_id }}"
|
|
||||||
register: update_result
|
|
||||||
until: update_result.finished
|
|
||||||
retries: 30
|
|
||||||
delay: 10
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
- name: Install required packages
|
|
||||||
yum:
|
|
||||||
name:
|
|
||||||
- docker
|
|
||||||
- ecs-init
|
|
||||||
- curl
|
|
||||||
- wget
|
|
||||||
- jq
|
|
||||||
state: present
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
retries: 3
|
|
||||||
delay: 5
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
# Security: Configure Docker securely
|
|
||||||
- name: Create Docker configuration directory
|
|
||||||
file:
|
|
||||||
path: /etc/docker
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
- name: Configure Docker daemon securely
|
|
||||||
copy:
|
|
||||||
dest: /etc/docker/daemon.json
|
|
||||||
content: |
|
|
||||||
{
|
|
||||||
"log-driver": "json-file",
|
|
||||||
"log-opts": {
|
|
||||||
"max-size": "100m",
|
|
||||||
"max-file": "3"
|
|
||||||
},
|
|
||||||
"live-restore": true,
|
|
||||||
"userland-proxy": false,
|
|
||||||
"no-new-privileges": true
|
|
||||||
}
|
|
||||||
mode: '0644'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
notify: restart docker
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
- name: Start and enable Docker
|
|
||||||
systemd:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
daemon_reload: true
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
register: docker_service
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
- name: Verify Docker is running
|
|
||||||
command: docker info
|
|
||||||
register: docker_check
|
|
||||||
failed_when: docker_check.rc != 0
|
|
||||||
retries: 3
|
|
||||||
delay: 5
|
|
||||||
changed_when: false
|
|
||||||
# Security: Run as regular user (ECS user is in docker group)
|
|
||||||
become: yes
|
|
||||||
become_user: "{{ ecs_user }}"
|
|
||||||
tags: [infrastructure, validation]
|
|
||||||
|
|
||||||
# Security: Create ECS directory with proper permissions
|
|
||||||
- name: Create ECS config directory
|
|
||||||
file:
|
|
||||||
path: /etc/ecs
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
owner: root
|
|
||||||
group: "{{ ecs_group }}"
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
- name: Configure ECS agent
|
|
||||||
copy:
|
|
||||||
dest: /etc/ecs/ecs.config
|
|
||||||
content: |
|
|
||||||
ECS_CLUSTER={{ ecs_cluster_name }}
|
|
||||||
ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs"]
|
|
||||||
ECS_ENABLE_TASK_IAM_ROLE=true
|
|
||||||
ECS_ENABLE_CONTAINER_METADATA=true
|
|
||||||
ECS_CONTAINER_STOP_TIMEOUT=30s
|
|
||||||
# Security: Disable privileged containers by default
|
|
||||||
ECS_DISABLE_PRIVILEGED=true
|
|
||||||
# Security: Enable AppArmor/SELinux support
|
|
||||||
ECS_SELINUX_CAPABLE=true
|
|
||||||
ECS_APPARMOR_CAPABLE=true
|
|
||||||
mode: '0640' # Security: More restrictive permissions
|
|
||||||
owner: root
|
|
||||||
group: "{{ ecs_group }}" # Security: Group ownership for ECS
|
|
||||||
backup: yes
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
notify: restart ecs
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
# Security: Configure ECS agent service with proper user
|
|
||||||
- name: Create ECS service override directory
|
|
||||||
file:
|
|
||||||
path: /etc/systemd/system/ecs.service.d
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
- name: Configure ECS service security settings
|
|
||||||
copy:
|
|
||||||
dest: /etc/systemd/system/ecs.service.d/security.conf
|
|
||||||
content: |
|
|
||||||
[Service]
|
|
||||||
# Security: Additional hardening
|
|
||||||
NoNewPrivileges=true
|
|
||||||
ProtectSystem=strict
|
|
||||||
ProtectHome=true
|
|
||||||
PrivateTmp=true
|
|
||||||
# Allow access to ECS directories
|
|
||||||
ReadWritePaths=/var/lib/ecs /var/log/ecs /etc/ecs
|
|
||||||
mode: '0644'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
notify:
|
|
||||||
- reload systemd
|
|
||||||
- restart ecs
|
|
||||||
tags: [infrastructure, security]
|
|
||||||
|
|
||||||
- name: Start and enable ECS agent
|
|
||||||
systemd:
|
|
||||||
name: ecs
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
daemon_reload: true
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
- name: Wait for ECS agent to register
|
|
||||||
shell: |
|
|
||||||
count=0
|
|
||||||
while [ $count -lt 30 ]; do
|
|
||||||
instances=$(aws ecs list-container-instances --cluster {{ ecs_cluster_name }} --region {{ aws_region }} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo "0")
|
|
||||||
if [ "$instances" != "0" ] && [ "$instances" != "None" ]; then
|
|
||||||
echo "ECS agent registered successfully"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
echo "Waiting for ECS agent registration (attempt $((count+1))/30)..."
|
|
||||||
sleep 10
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
echo "ECS agent failed to register"
|
|
||||||
exit 1
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: Run AWS CLI as regular user with proper AWS credentials
|
|
||||||
become: no
|
|
||||||
tags: [infrastructure]
|
|
||||||
|
|
||||||
# Application Deployment - No root required
|
|
||||||
- name: Create CloudWatch log group
|
|
||||||
shell: |
|
|
||||||
aws logs create-log-group --log-group-name "{{ log_group }}" --region {{ aws_region }} 2>/dev/null || echo "Log group exists"
|
|
||||||
aws logs put-retention-policy --log-group-name "{{ log_group }}" --retention-in-days 7 --region {{ aws_region }} 2>/dev/null || echo "Retention policy exists"
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: No root required for AWS API calls
|
|
||||||
become: no
|
|
||||||
tags: [deployment]
|
|
||||||
|
|
||||||
# Security: Create temp file in user's home directory
|
|
||||||
- name: Create task definition file
|
|
||||||
copy:
|
|
||||||
dest: "/tmp/task-definition-{{ ansible_date_time.epoch }}.json"
|
|
||||||
content: |
|
|
||||||
{
|
|
||||||
"family": "{{ task_family }}",
|
|
||||||
"executionRoleArn": "{{ task_execution_role_arn }}",
|
|
||||||
"networkMode": "bridge",
|
|
||||||
"requiresCompatibilities": ["EC2"],
|
|
||||||
"cpu": "256",
|
|
||||||
"memory": "512",
|
|
||||||
"containerDefinitions": [
|
|
||||||
{
|
|
||||||
"name": "{{ container_name }}",
|
|
||||||
"image": "{{ aws_account_id }}.dkr.ecr.{{ aws_region }}.amazonaws.com/{{ container_name }}:{{ app_version }}",
|
|
||||||
"cpu": 256,
|
|
||||||
"memory": 512,
|
|
||||||
"essential": true,
|
|
||||||
"user": "1000:1000",
|
|
||||||
"readonlyRootFilesystem": true,
|
|
||||||
"portMappings": [
|
|
||||||
{
|
|
||||||
"containerPort": 8080,
|
|
||||||
"hostPort": 8080,
|
|
||||||
"protocol": "tcp"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"logConfiguration": {
|
|
||||||
"logDriver": "awslogs",
|
|
||||||
"options": {
|
|
||||||
"awslogs-group": "{{ log_group }}",
|
|
||||||
"awslogs-region": "{{ aws_region }}",
|
|
||||||
"awslogs-stream-prefix": "ecs"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"healthCheck": {
|
|
||||||
"command": [
|
|
||||||
"CMD-SHELL",
|
|
||||||
"curl -f http://localhost:8080/health || exit 1"
|
|
||||||
],
|
|
||||||
"interval": 30,
|
|
||||||
"timeout": 5,
|
|
||||||
"retries": 3,
|
|
||||||
"startPeriod": 60
|
|
||||||
},
|
|
||||||
"tmpfs": [
|
|
||||||
{
|
|
||||||
"containerPath": "/tmp",
|
|
||||||
"size": 100
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"mountPoints": [],
|
|
||||||
"volumesFrom": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
mode: '0644'
|
|
||||||
# Security: File owned by current user, not root
|
|
||||||
owner: "{{ ansible_user | default(ansible_ssh_user) }}"
|
|
||||||
group: "{{ ansible_user | default(ansible_ssh_user) }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: No root required
|
|
||||||
become: no
|
|
||||||
register: task_def_file
|
|
||||||
tags: [deployment, security]
|
|
||||||
|
|
||||||
- name: Register task definition
|
|
||||||
shell: |
|
|
||||||
aws ecs register-task-definition \
|
|
||||||
--cli-input-json file://{{ task_def_file.dest }} \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--output json
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: No root required for AWS API calls
|
|
||||||
become: no
|
|
||||||
register: task_registration
|
|
||||||
tags: [deployment]
|
|
||||||
|
|
||||||
- name: Update ECS service
|
|
||||||
shell: |
|
|
||||||
aws ecs update-service \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--service {{ service_name }} \
|
|
||||||
--task-definition {{ task_family }} \
|
|
||||||
--desired-count 1 \
|
|
||||||
--force-new-deployment \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--output json
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: No root required
|
|
||||||
become: no
|
|
||||||
register: service_update
|
|
||||||
tags: [deployment]
|
|
||||||
|
|
||||||
- name: Wait for service deployment to complete
|
|
||||||
shell: |
|
|
||||||
echo "Waiting for service to stabilize..."
|
|
||||||
count=0
|
|
||||||
while [ $count -lt 30 ]; do
|
|
||||||
service_status=$(aws ecs describe-services \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--services {{ service_name }} \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--query 'services[0]' \
|
|
||||||
--output json 2>/dev/null)
|
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
running=$(echo "$service_status" | jq -r '.runningCount // 0')
|
|
||||||
pending=$(echo "$service_status" | jq -r '.pendingCount // 0')
|
|
||||||
|
|
||||||
echo "Running: $running, Pending: $pending"
|
|
||||||
|
|
||||||
if [ "$running" -ge "1" ] && [ "$pending" -eq "0" ]; then
|
|
||||||
echo "Service deployment completed successfully"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Waiting for deployment completion (attempt $((count+1))/30)..."
|
|
||||||
sleep 20
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Service deployment did not complete within expected time"
|
|
||||||
exit 1
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
# Security: No root required
|
|
||||||
become: no
|
|
||||||
tags: [deployment]
|
|
||||||
|
|
||||||
# Health Verification - No root required
|
|
||||||
- name: Wait for application health check
|
|
||||||
uri:
|
|
||||||
url: "http://{{ ansible_default_ipv4.address }}:8080/health"
|
|
||||||
method: GET
|
|
||||||
timeout: 10
|
|
||||||
status_code: 200
|
|
||||||
register: health_check
|
|
||||||
until: health_check.status == 200
|
|
||||||
retries: 10
|
|
||||||
delay: 15
|
|
||||||
# Security: No root required for HTTP requests
|
|
||||||
become: no
|
|
||||||
tags: [verification]
|
|
||||||
|
|
||||||
- name: Display deployment summary
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
========================================
|
|
||||||
🎉 SECURE DEPLOYMENT COMPLETED
|
|
||||||
========================================
|
|
||||||
Cluster: {{ ecs_cluster_name }}
|
|
||||||
Service: {{ service_name }}
|
|
||||||
Task Family: {{ task_family }}
|
|
||||||
Image Version: {{ app_version }}
|
|
||||||
Instance IP: {{ ansible_default_ipv4.address }}
|
|
||||||
Health Status: HEALTHY
|
|
||||||
Security: Non-root containers, least privilege
|
|
||||||
Application URL: http://{{ ansible_default_ipv4.address }}:8080
|
|
||||||
========================================
|
|
||||||
tags: [reporting]
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- name: reload systemd
|
|
||||||
systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
|
|
||||||
- name: restart docker
|
|
||||||
systemd:
|
|
||||||
name: docker
|
|
||||||
state: restarted
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
|
|
||||||
- name: restart ecs
|
|
||||||
systemd:
|
|
||||||
name: ecs
|
|
||||||
state: restarted
|
|
||||||
daemon_reload: true
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
|
|
||||||
post_tasks:
|
|
||||||
- name: Cleanup temporary files
|
|
||||||
file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
loop:
|
|
||||||
- "/tmp/task-definition-{{ ansible_date_time.epoch }}.json"
|
|
||||||
delegate_to: localhost
|
|
||||||
# Security: No root required for cleanup
|
|
||||||
become: no
|
|
||||||
tags: [cleanup]
|
|
||||||
|
|
||||||
# Security: Audit log
|
|
||||||
- name: Log deployment action
|
|
||||||
lineinfile:
|
|
||||||
path: /var/log/ecs-deployments.log
|
|
||||||
line: "{{ ansible_date_time.iso8601 }} - Deployment v{{ app_version }} by {{ ansible_user | default('unknown') }} from {{ ansible_env.SSH_CLIENT.split()[0] | default('unknown') }}"
|
|
||||||
create: yes
|
|
||||||
mode: '0644'
|
|
||||||
owner: root
|
|
||||||
group: "{{ ecs_group }}"
|
|
||||||
become: yes
|
|
||||||
become_user: root
|
|
||||||
tags: [audit, security]
|
|
@@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
# Global variables for all environments
|
|
||||||
# These can be overridden by host-specific variables or command line
|
|
||||||
|
|
||||||
# ECS Configuration
|
|
||||||
ecs_cluster_name: nvhi-atsila-cluster
|
|
||||||
service_name: nvhi-atsila-cluster-service
|
|
||||||
task_family: nvhi-atsila-cluster-task
|
|
||||||
container_name: nvhi-atsila-microservice
|
|
||||||
|
|
||||||
# AWS Configuration
|
|
||||||
aws_region: us-east-2
|
|
||||||
container_port: 8080
|
|
||||||
health_check_path: /health
|
|
||||||
|
|
||||||
# Connection Settings
|
|
||||||
ansible_ssh_common_args: '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 -o ServerAliveInterval=60'
|
|
||||||
ansible_ssh_retries: 3
|
|
||||||
ansible_timeout: 30
|
|
||||||
|
|
||||||
# Application Settings
|
|
||||||
app_port: 8080
|
|
||||||
health_check_timeout: 10
|
|
||||||
health_check_retries: 10
|
|
||||||
health_check_delay: 15
|
|
||||||
|
|
||||||
# Deployment Settings
|
|
||||||
deployment_timeout: 600
|
|
||||||
service_stabilization_retries: 30
|
|
||||||
service_stabilization_delay: 20
|
|
||||||
|
|
||||||
# Logging
|
|
||||||
log_retention_days: 7
|
|
@@ -1,14 +0,0 @@
|
|||||||
[inventory_hosts]
|
|
||||||
# This file will be dynamically generated by Jenkins
|
|
||||||
# Format: hostname ansible_host=IP_ADDRESS ansible_user=USERNAME
|
|
||||||
|
|
||||||
[inventory_hosts:vars]
|
|
||||||
# SSH connection settings
|
|
||||||
ansible_ssh_private_key_file=~/.ssh/id_rsa
|
|
||||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 -o ServerAliveInterval=60'
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
ansible_connection=ssh
|
|
||||||
ansible_ssh_retries=3
|
|
||||||
|
|
||||||
# AWS configuration
|
|
||||||
aws_region=us-east-2
|
|
@@ -1,147 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Rollback ECS Service
|
|
||||||
hosts: localhost
|
|
||||||
connection: local
|
|
||||||
gather_facts: false
|
|
||||||
vars:
|
|
||||||
ecs_cluster_name: "nvhi-atsila-cluster"
|
|
||||||
service_name: "nvhi-atsila-cluster-service"
|
|
||||||
task_family: "nvhi-atsila-cluster-task"
|
|
||||||
aws_region: "us-east-2"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Get current service task definition
|
|
||||||
shell: |
|
|
||||||
aws ecs describe-services \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--services {{ service_name }} \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--query 'services[0].taskDefinition' \
|
|
||||||
--output text
|
|
||||||
register: current_task_def
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
|
|
||||||
- name: Extract current revision number
|
|
||||||
set_fact:
|
|
||||||
current_revision: "{{ current_task_def.stdout.split(':')[-1] | int }}"
|
|
||||||
|
|
||||||
- name: Calculate rollback revision
|
|
||||||
set_fact:
|
|
||||||
rollback_revision: "{{ (current_revision | int) - 1 }}"
|
|
||||||
when: rollback_revision is not defined
|
|
||||||
|
|
||||||
- name: Validate rollback revision
|
|
||||||
fail:
|
|
||||||
msg: "Cannot rollback - target revision {{ rollback_revision }} is invalid (must be >= 1)"
|
|
||||||
when: (rollback_revision | int) < 1
|
|
||||||
|
|
||||||
- name: Display rollback information
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
=================================
|
|
||||||
ROLLBACK INFORMATION
|
|
||||||
=================================
|
|
||||||
Service: {{ service_name }}
|
|
||||||
Cluster: {{ ecs_cluster_name }}
|
|
||||||
Current Revision: {{ current_revision }}
|
|
||||||
Target Revision: {{ rollback_revision }}
|
|
||||||
=================================
|
|
||||||
|
|
||||||
- name: Confirm rollback (interactive)
|
|
||||||
pause:
|
|
||||||
prompt: |
|
|
||||||
WARNING: You are about to rollback the ECS service!
|
|
||||||
|
|
||||||
Service: {{ service_name }}
|
|
||||||
From: {{ task_family }}:{{ current_revision }}
|
|
||||||
To: {{ task_family }}:{{ rollback_revision }}
|
|
||||||
|
|
||||||
Do you want to continue? (yes/no)
|
|
||||||
register: rollback_confirm
|
|
||||||
when: auto_rollback is not defined
|
|
||||||
|
|
||||||
- name: Set automatic confirmation
|
|
||||||
set_fact:
|
|
||||||
rollback_confirm:
|
|
||||||
user_input: "yes"
|
|
||||||
when: auto_rollback is defined and auto_rollback
|
|
||||||
|
|
||||||
- name: Execute rollback
|
|
||||||
shell: |
|
|
||||||
aws ecs update-service \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--service {{ service_name }} \
|
|
||||||
--task-definition {{ task_family }}:{{ rollback_revision }} \
|
|
||||||
--force-new-deployment \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--output json
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
when: rollback_confirm.user_input | lower == 'yes'
|
|
||||||
register: rollback_result
|
|
||||||
|
|
||||||
- name: Wait for rollback completion
|
|
||||||
shell: |
|
|
||||||
echo "Waiting for rollback to complete..."
|
|
||||||
count=0
|
|
||||||
while [ $count -lt 20 ]; do
|
|
||||||
service_status=$(aws ecs describe-services \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--services {{ service_name }} \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--query 'services[0]' \
|
|
||||||
--output json 2>/dev/null)
|
|
||||||
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
running=$(echo "$service_status" | jq -r '.runningCount // 0')
|
|
||||||
pending=$(echo "$service_status" | jq -r '.pendingCount // 0')
|
|
||||||
|
|
||||||
echo "Running: $running, Pending: $pending"
|
|
||||||
|
|
||||||
if [ "$running" -ge "1" ] && [ "$pending" -eq "0" ]; then
|
|
||||||
echo "Rollback completed successfully"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Waiting for rollback completion (attempt $((count+1))/20)..."
|
|
||||||
sleep 15
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "WARNING: Rollback may not have completed within expected time"
|
|
||||||
exit 1
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
when: rollback_confirm.user_input | lower == 'yes'
|
|
||||||
|
|
||||||
- name: Verify rollback status
|
|
||||||
shell: |
|
|
||||||
aws ecs describe-services \
|
|
||||||
--cluster {{ ecs_cluster_name }} \
|
|
||||||
--services {{ service_name }} \
|
|
||||||
--region {{ aws_region }} \
|
|
||||||
--query 'services[0].{TaskDefinition:taskDefinition,RunningCount:runningCount,Status:status}' \
|
|
||||||
--output table
|
|
||||||
environment:
|
|
||||||
AWS_DEFAULT_REGION: "{{ aws_region }}"
|
|
||||||
when: rollback_confirm.user_input | lower == 'yes'
|
|
||||||
register: final_status
|
|
||||||
|
|
||||||
- name: Display rollback results
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
========================================
|
|
||||||
🔄 ROLLBACK COMPLETED
|
|
||||||
========================================
|
|
||||||
Service: {{ service_name }}
|
|
||||||
Rolled back to: {{ task_family }}:{{ rollback_revision }}
|
|
||||||
Status: Check output above
|
|
||||||
========================================
|
|
||||||
when: rollback_confirm.user_input | lower == 'yes'
|
|
||||||
|
|
||||||
- name: Rollback cancelled
|
|
||||||
debug:
|
|
||||||
msg: "Rollback operation was cancelled by user"
|
|
||||||
when: rollback_confirm.user_input | lower != 'yes'
|
|
@@ -1,220 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Enterprise Ansible Setup and Test Script
|
|
||||||
# This script sets up the Ansible environment and runs tests
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Colors for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Function to print colored output
|
|
||||||
print_status() {
|
|
||||||
echo -e "${BLUE}[INFO]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_success() {
|
|
||||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_warning() {
|
|
||||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_error() {
|
|
||||||
echo -e "${RED}[ERROR]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if we're in the right directory
|
|
||||||
if [ ! -d "ansible" ]; then
|
|
||||||
print_error "ansible directory not found. Please run this script from your project root."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd ansible
|
|
||||||
|
|
||||||
print_status "Setting up Enterprise Ansible environment..."
|
|
||||||
|
|
||||||
# Create necessary directories
|
|
||||||
print_status "Creating directory structure..."
|
|
||||||
mkdir -p group_vars
|
|
||||||
mkdir -p templates
|
|
||||||
mkdir -p roles
|
|
||||||
mkdir -p inventories/production
|
|
||||||
mkdir -p inventories/staging
|
|
||||||
|
|
||||||
# Install Python dependencies
|
|
||||||
print_status "Installing Python dependencies..."
|
|
||||||
pip3 install --user boto3 botocore jmespath > /dev/null 2>&1 || {
|
|
||||||
print_warning "Could not install Python dependencies. Install manually: pip3 install boto3 botocore jmespath"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check Ansible installation
|
|
||||||
if ! command -v ansible &> /dev/null; then
|
|
||||||
print_error "Ansible not found. Please install Ansible first:"
|
|
||||||
echo " Ubuntu/Debian: sudo apt update && sudo apt install ansible"
|
|
||||||
echo " RHEL/CentOS: sudo yum install ansible"
|
|
||||||
echo " macOS: brew install ansible"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
ANSIBLE_VERSION=$(ansible --version | head -n1)
|
|
||||||
print_success "Found: $ANSIBLE_VERSION"
|
|
||||||
|
|
||||||
# Check AWS CLI
|
|
||||||
if ! command -v aws &> /dev/null; then
|
|
||||||
print_error "AWS CLI not found. Please install AWS CLI first."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
AWS_VERSION=$(aws --version)
|
|
||||||
print_success "Found: $AWS_VERSION"
|
|
||||||
|
|
||||||
# Validate configuration files
|
|
||||||
print_status "Validating Ansible configuration files..."
|
|
||||||
|
|
||||||
# Check if main playbook exists
|
|
||||||
if [ ! -f "configure_ecs.yml" ]; then
|
|
||||||
print_error "configure_ecs.yml not found!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate YAML syntax
|
|
||||||
if command -v yamllint &> /dev/null; then
|
|
||||||
print_status "Checking YAML syntax..."
|
|
||||||
yamllint configure_ecs.yml || print_warning "YAML syntax issues found (non-critical)"
|
|
||||||
else
|
|
||||||
print_warning "yamllint not found. Install with: pip3 install yamllint"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate Ansible playbook syntax
|
|
||||||
print_status "Validating Ansible playbook syntax..."
|
|
||||||
ansible-playbook configure_ecs.yml --syntax-check || {
|
|
||||||
print_error "Ansible syntax validation failed!"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
print_success "Ansible syntax validation passed"
|
|
||||||
|
|
||||||
# Test functions
|
|
||||||
test_connectivity() {
|
|
||||||
local ip=$1
|
|
||||||
if [ -z "$ip" ]; then
|
|
||||||
print_error "No IP address provided for connectivity test"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_status "Testing connectivity to $ip..."
|
|
||||||
|
|
||||||
# Test SSH connectivity
|
|
||||||
if timeout 10 bash -c "nc -z $ip 22" &>/dev/null; then
|
|
||||||
print_success "SSH port (22) is reachable"
|
|
||||||
else
|
|
||||||
print_error "SSH port (22) is not reachable"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test Ansible ping
|
|
||||||
if ansible inventory_hosts -m ping -i hosts &>/dev/null; then
|
|
||||||
print_success "Ansible connectivity test passed"
|
|
||||||
else
|
|
||||||
print_error "Ansible connectivity test failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create a test inventory for validation
|
|
||||||
create_test_inventory() {
|
|
||||||
local ip=${1:-"127.0.0.1"}
|
|
||||||
|
|
||||||
print_status "Creating test inventory with IP: $ip"
|
|
||||||
|
|
||||||
cat > hosts_test << EOF
|
|
||||||
[inventory_hosts]
|
|
||||||
test-instance ansible_host=$ip ansible_user=ec2-user
|
|
||||||
|
|
||||||
[inventory_hosts:vars]
|
|
||||||
ansible_ssh_private_key_file=~/.ssh/id_rsa
|
|
||||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10'
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
ansible_connection=ssh
|
|
||||||
aws_region=us-east-2
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
print_status "Ansible Enterprise Setup Complete!"
|
|
||||||
echo
|
|
||||||
echo "Available operations:"
|
|
||||||
echo " 1. Test connectivity (requires EC2 IP)"
|
|
||||||
echo " 2. Run simple deployment test"
|
|
||||||
echo " 3. Validate all playbooks"
|
|
||||||
echo " 4. Show configuration summary"
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Interactive mode
|
|
||||||
if [ "$1" == "--interactive" ]; then
|
|
||||||
echo -n "Enter operation number (1-4): "
|
|
||||||
read -r operation
|
|
||||||
|
|
||||||
case $operation in
|
|
||||||
1)
|
|
||||||
echo -n "Enter EC2 instance IP: "
|
|
||||||
read -r ec2_ip
|
|
||||||
create_test_inventory "$ec2_ip"
|
|
||||||
if test_connectivity "$ec2_ip"; then
|
|
||||||
print_success "Connectivity test passed!"
|
|
||||||
else
|
|
||||||
print_error "Connectivity test failed!"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
2)
|
|
||||||
echo -n "Enter EC2 instance IP: "
|
|
||||||
read -r ec2_ip
|
|
||||||
create_test_inventory "$ec2_ip"
|
|
||||||
print_status "Running simple deployment test..."
|
|
||||||
ansible-playbook simple-deploy.yml -i hosts_test -v
|
|
||||||
;;
|
|
||||||
3)
|
|
||||||
print_status "Validating all playbooks..."
|
|
||||||
for playbook in *.yml; do
|
|
||||||
if [ -f "$playbook" ]; then
|
|
||||||
print_status "Validating $playbook..."
|
|
||||||
ansible-playbook "$playbook" --syntax-check
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
print_success "All playbooks validated!"
|
|
||||||
;;
|
|
||||||
4)
|
|
||||||
print_status "Configuration Summary:"
|
|
||||||
echo " - Working Directory: $(pwd)"
|
|
||||||
echo " - Ansible Version: $(ansible --version | head -n1)"
|
|
||||||
echo " - AWS CLI Version: $(aws --version 2>&1)"
|
|
||||||
echo " - Available Playbooks:"
|
|
||||||
ls -la *.yml 2>/dev/null | awk '{print " - " $9}' || echo " - None found"
|
|
||||||
echo " - Python Dependencies:"
|
|
||||||
python3 -c "import boto3, botocore; print(' - boto3: ' + boto3.__version__); print(' - botocore: ' + botocore.__version__)" 2>/dev/null || echo " - Not installed"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
print_error "Invalid operation number"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
if [ -f "hosts_test" ]; then
|
|
||||||
rm -f hosts_test
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_success "Setup script completed!"
|
|
||||||
echo
|
|
||||||
echo "Next steps:"
|
|
||||||
echo " 1. Update your Jenkins pipeline with the new Ansible integration"
|
|
||||||
echo " 2. Test with: ./setup-ansible.sh --interactive"
|
|
||||||
echo " 3. Run deployment: ansible-playbook configure_ecs.yml -i hosts -v"
|
|
||||||
echo
|
|
@@ -1,109 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Simple ECS Configuration Test
|
|
||||||
hosts: inventory_hosts
|
|
||||||
become: yes
|
|
||||||
gather_facts: yes
|
|
||||||
vars:
|
|
||||||
ecs_cluster_name: "nvhi-atsila-cluster"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Test connectivity
|
|
||||||
ping:
|
|
||||||
tags: [test]
|
|
||||||
|
|
||||||
- name: Check system information
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
System: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
|
||||||
Hostname: {{ ansible_hostname }}
|
|
||||||
IP: {{ ansible_default_ipv4.address }}
|
|
||||||
tags: [info]
|
|
||||||
|
|
||||||
- name: Update system packages
|
|
||||||
yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest
|
|
||||||
update_cache: yes
|
|
||||||
async: 300
|
|
||||||
poll: 0
|
|
||||||
register: yum_update
|
|
||||||
tags: [packages]
|
|
||||||
|
|
||||||
- name: Wait for package update
|
|
||||||
async_status:
|
|
||||||
jid: "{{ yum_update.ansible_job_id }}"
|
|
||||||
register: update_result
|
|
||||||
until: update_result.finished
|
|
||||||
retries: 30
|
|
||||||
delay: 10
|
|
||||||
tags: [packages]
|
|
||||||
|
|
||||||
- name: Install Docker and ECS components
|
|
||||||
yum:
|
|
||||||
name:
|
|
||||||
- docker
|
|
||||||
- ecs-init
|
|
||||||
- curl
|
|
||||||
- jq
|
|
||||||
state: present
|
|
||||||
tags: [install]
|
|
||||||
|
|
||||||
- name: Start Docker service
|
|
||||||
systemd:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
daemon_reload: true
|
|
||||||
tags: [services]
|
|
||||||
|
|
||||||
- name: Verify Docker is working
|
|
||||||
command: docker --version
|
|
||||||
register: docker_version
|
|
||||||
changed_when: false
|
|
||||||
tags: [verify]
|
|
||||||
|
|
||||||
- name: Create ECS configuration directory
|
|
||||||
file:
|
|
||||||
path: /etc/ecs
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
tags: [config]
|
|
||||||
|
|
||||||
- name: Write ECS configuration
|
|
||||||
copy:
|
|
||||||
dest: /etc/ecs/ecs.config
|
|
||||||
content: |
|
|
||||||
ECS_CLUSTER={{ ecs_cluster_name }}
|
|
||||||
ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs"]
|
|
||||||
ECS_ENABLE_TASK_IAM_ROLE=true
|
|
||||||
mode: '0644'
|
|
||||||
backup: yes
|
|
||||||
notify: restart ecs
|
|
||||||
tags: [config]
|
|
||||||
|
|
||||||
- name: Start ECS agent
|
|
||||||
systemd:
|
|
||||||
name: ecs
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
daemon_reload: true
|
|
||||||
tags: [services]
|
|
||||||
|
|
||||||
- name: Display configuration summary
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
========================================
|
|
||||||
✅ SIMPLE CONFIGURATION COMPLETED
|
|
||||||
========================================
|
|
||||||
Docker Version: {{ docker_version.stdout }}
|
|
||||||
ECS Cluster: {{ ecs_cluster_name }}
|
|
||||||
Instance IP: {{ ansible_default_ipv4.address }}
|
|
||||||
========================================
|
|
||||||
tags: [summary]
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- name: restart ecs
|
|
||||||
systemd:
|
|
||||||
name: ecs
|
|
||||||
state: restarted
|
|
||||||
daemon_reload: true
|
|
@@ -1 +0,0 @@
|
|||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDFBAOogBj/GHKXQs6FLROGQfXkZe2uKbRron0We7ZOLgt6e1bI7U8IMe+DIH250CHSi4R5DBYFQF5Bk1TkS5cgMtPIAb87vRUGI3sLs29DQA/kllYiZlQi9ejxcEz2+TRWn10Q/Kltlb6ESNLnnnTsIUUxKUeY3MKFFd+V13FleSVLGYondwPWYwD/XJ6a3VwSTJ1wFKO+lpKknSjDl2ZOgYpWFALPH+EwMlRGVMrUXAB604zqR1XOzYXAAWnhmmC9IGgCzU/5JnEgFyhfZbR3kpEH8SmSXahvdFZERp+3j9d3ROjchqnf0Z0zZ7vzX+G+jvzT/jGOkzH9tx0/OqIO9f47OFF8iUfZgUtJU1QGbepdsmQqognhxfJQfMZbVtKUw7zt+mzJz3A0XcRp7IwVHaqJ2QW2dpXi4UbWtejtZqROg6byWq2FpvFGNIT3eiKTf+EpCoOec6YGSrRQlj73Ob0+FhmsyQ6e8KKncaRYx38PqtnWsI3UnLtdKmEJmDBPI0ipxJzmKJKtb0vtJPVYvFEpgiXSwnDX883rAUQrXR/EhOMmbMwk7JSes6/GXH9rWN10JHh1/i1LLpl+rg6VyktFgVBHzVw++y29QSfFixeTvFkkTS5kl//CpKd1GDQb9ZBH6SPgkgOjmASPUo+p5e/NiN/SIBSpYpMjOKs7Q== jacques@Xochiquetzal
|
|
@@ -1,99 +0,0 @@
|
|||||||
# Backend Infrastructure - Creates the S3 bucket and DynamoDB table for remote state
|
|
||||||
# This should be run FIRST with local state, then never changed
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
# No backend configuration - uses local state for bootstrap
|
|
||||||
required_providers {
|
|
||||||
aws = {
|
|
||||||
source = "hashicorp/aws"
|
|
||||||
version = "~> 5.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "aws" {
|
|
||||||
region = var.aws_region
|
|
||||||
}
|
|
||||||
|
|
||||||
# S3 bucket for Terraform state
|
|
||||||
resource "aws_s3_bucket" "tfstate" {
|
|
||||||
bucket = var.backend_bucket_name
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = var.backend_bucket_name
|
|
||||||
Environment = "Production"
|
|
||||||
Purpose = "Terraform State Storage"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# S3 bucket versioning
|
|
||||||
resource "aws_s3_bucket_versioning" "tfstate_versioning" {
|
|
||||||
bucket = aws_s3_bucket.tfstate.id
|
|
||||||
versioning_configuration {
|
|
||||||
status = "Enabled"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# S3 bucket encryption
|
|
||||||
resource "aws_s3_bucket_server_side_encryption_configuration" "tfstate_encryption" {
|
|
||||||
bucket = aws_s3_bucket.tfstate.id
|
|
||||||
|
|
||||||
rule {
|
|
||||||
apply_server_side_encryption_by_default {
|
|
||||||
sse_algorithm = "AES256"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# S3 bucket public access block
|
|
||||||
resource "aws_s3_bucket_public_access_block" "tfstate_block" {
|
|
||||||
bucket = aws_s3_bucket.tfstate.id
|
|
||||||
|
|
||||||
block_public_acls = true
|
|
||||||
block_public_policy = true
|
|
||||||
ignore_public_acls = true
|
|
||||||
restrict_public_buckets = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# DynamoDB table for state locking
|
|
||||||
resource "aws_dynamodb_table" "locks" {
|
|
||||||
name = var.lock_table_name
|
|
||||||
billing_mode = "PAY_PER_REQUEST"
|
|
||||||
hash_key = "LockID"
|
|
||||||
|
|
||||||
attribute {
|
|
||||||
name = "LockID"
|
|
||||||
type = "S"
|
|
||||||
}
|
|
||||||
|
|
||||||
point_in_time_recovery {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = var.lock_table_name
|
|
||||||
Environment = "Production"
|
|
||||||
Purpose = "Terraform State Locking"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs for reference
|
|
||||||
output "s3_bucket_name" {
|
|
||||||
description = "Name of the S3 bucket for Terraform state"
|
|
||||||
value = aws_s3_bucket.tfstate.bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
output "dynamodb_table_name" {
|
|
||||||
description = "Name of the DynamoDB table for state locking"
|
|
||||||
value = aws_dynamodb_table.locks.name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "s3_bucket_arn" {
|
|
||||||
description = "ARN of the S3 bucket"
|
|
||||||
value = aws_s3_bucket.tfstate.arn
|
|
||||||
}
|
|
||||||
|
|
||||||
output "dynamodb_table_arn" {
|
|
||||||
description = "ARN of the DynamoDB table"
|
|
||||||
value = aws_dynamodb_table.locks.arn
|
|
||||||
}
|
|
@@ -1,17 +0,0 @@
|
|||||||
variable "aws_region" {
|
|
||||||
description = "AWS region for resources"
|
|
||||||
type = string
|
|
||||||
default = "us-east-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "backend_bucket_name" {
|
|
||||||
description = "Name of the S3 bucket for Terraform state"
|
|
||||||
type = string
|
|
||||||
default = "nvhi-atsila-tf-state"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "lock_table_name" {
|
|
||||||
description = "Name of the DynamoDB table for state locking"
|
|
||||||
type = string
|
|
||||||
default = "nvhi-atsila-locks"
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
terraform {
|
|
||||||
backend "s3" {
|
|
||||||
# Backend configuration values provided via command line during terraform init
|
|
||||||
# This allows for environment-specific backends while keeping code DRY
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,328 +0,0 @@
|
|||||||
# Application Infrastructure
|
|
||||||
# Provider configuration is in versions.tf
|
|
||||||
|
|
||||||
data "aws_availability_zones" "azs" {}
|
|
||||||
|
|
||||||
# VPC
|
|
||||||
resource "aws_vpc" "main" {
|
|
||||||
cidr_block = var.vpc_cidr
|
|
||||||
enable_dns_hostnames = true
|
|
||||||
enable_dns_support = true
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-vpc"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Internet Gateway
|
|
||||||
resource "aws_internet_gateway" "main" {
|
|
||||||
vpc_id = aws_vpc.main.id
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-igw"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Public Subnets
|
|
||||||
resource "aws_subnet" "public" {
|
|
||||||
count = length(split(",", var.public_subnets))
|
|
||||||
vpc_id = aws_vpc.main.id
|
|
||||||
cidr_block = element(split(",", var.public_subnets), count.index)
|
|
||||||
availability_zone = data.aws_availability_zones.azs.names[count.index]
|
|
||||||
map_public_ip_on_launch = true
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-public-${count.index}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Route Table for public subnets
|
|
||||||
resource "aws_route_table" "public" {
|
|
||||||
vpc_id = aws_vpc.main.id
|
|
||||||
|
|
||||||
route {
|
|
||||||
cidr_block = "0.0.0.0/0"
|
|
||||||
gateway_id = aws_internet_gateway.main.id
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-public-rt"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Route Table Associations
|
|
||||||
resource "aws_route_table_association" "public" {
|
|
||||||
count = length(aws_subnet.public)
|
|
||||||
subnet_id = aws_subnet.public[count.index].id
|
|
||||||
route_table_id = aws_route_table.public.id
|
|
||||||
}
|
|
||||||
|
|
||||||
# Security Group - Updated for SSM (removed SSH, kept application access)
|
|
||||||
resource "aws_security_group" "ecs_sg" {
|
|
||||||
name = "${var.cluster_name}-sg"
|
|
||||||
description = "Allow HTTP to ECS and HTTPS outbound for SSM/ECR"
|
|
||||||
vpc_id = aws_vpc.main.id
|
|
||||||
|
|
||||||
# HTTP access for application
|
|
||||||
ingress {
|
|
||||||
description = "HTTP from anywhere"
|
|
||||||
from_port = 8080
|
|
||||||
to_port = 8080
|
|
||||||
protocol = "tcp"
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# HTTPS outbound for SSM, ECR, and AWS services
|
|
||||||
egress {
|
|
||||||
description = "HTTPS outbound for AWS services"
|
|
||||||
from_port = 443
|
|
||||||
to_port = 443
|
|
||||||
protocol = "tcp"
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# HTTP outbound for package updates
|
|
||||||
egress {
|
|
||||||
description = "HTTP outbound for package updates"
|
|
||||||
from_port = 80
|
|
||||||
to_port = 80
|
|
||||||
protocol = "tcp"
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# DNS resolution
|
|
||||||
egress {
|
|
||||||
description = "DNS resolution"
|
|
||||||
from_port = 53
|
|
||||||
to_port = 53
|
|
||||||
protocol = "udp"
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-sg"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Key Pair (keeping for compatibility, but not needed for SSM)
|
|
||||||
resource "aws_key_pair" "deployer" {
|
|
||||||
key_name = var.key_pair_name
|
|
||||||
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDFBAOogBj/GHKXQs6FLROGQfXkZe2uKbRron0We7ZOLgt6e1bI7U8IMe+DIH250CHSi4R5DBYFQF5Bk1TkS5cgMtPIAb87vRUGI3sLs29DQA/kllYiZlQi9ejxcEz2+TRWn10Q/Kltlb6ESNLnnnTsIUUxKUeY3MKFFd+V13FleSVLGYondwPWYwD/XJ6a3VwSTJ1wFKO+lpKknSjDl2ZOgYpWFALPH+EwMlRGVMrUXAB604zqR1XOzYXAAWnhmmC9IGgCzU/5JnEgFyhfZbR3kpEH8SmSXahvdFZERp+3j9d3ROjchqnf0Z0zZ7vzX+G+jvzT/jGOkzH9tx0/OqIO9f47OFF8iUfZgUtJU1QGbepdsmQqognhxfJQfMZbVtKUw7zt+mzJz3A0XcRp7IwVHaqJ2QW2dpXi4UbWtejtZqROg6byWq2FpvFGNIT3eiKTf+EpCoOec6YGSrRQlj73Ob0+FhmsyQ6e8KKncaRYx38PqtnWsI3UnLtdKmEJmDBPI0ipxJzmKJKtb0vtJPVYvFEpgiXSwnDX883rAUQrXR/EhOMmbMwk7JSes6/GXH9rWN10JHh1/i1LLpl+rg6VyktFgVBHzVw++y29QSfFixeTvFkkTS5kl//CpKd1GDQb9ZBH6SPgkgOjmASPUo+p5e/NiN/SIBSpYpMjOKs7Q== jacques@Xochiquetzal"
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = var.key_pair_name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Amazon Linux 2 AMI (better for ECS)
|
|
||||||
data "aws_ami" "amazon_linux" {
|
|
||||||
most_recent = true
|
|
||||||
owners = ["amazon"]
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "name"
|
|
||||||
values = ["amzn2-ami-ecs-hvm-*-x86_64-ebs"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# IAM Role for ECS Instance
|
|
||||||
resource "aws_iam_role" "ecs_instance_role" {
|
|
||||||
name = "${var.cluster_name}-ecs-instance-role"
|
|
||||||
|
|
||||||
assume_role_policy = jsonencode({
|
|
||||||
Version = "2012-10-17"
|
|
||||||
Statement = [
|
|
||||||
{
|
|
||||||
Action = "sts:AssumeRole"
|
|
||||||
Effect = "Allow"
|
|
||||||
Principal = {
|
|
||||||
Service = "ec2.amazonaws.com"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-ecs-instance-role"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# IAM Role Policy Attachment for ECS
|
|
||||||
resource "aws_iam_role_policy_attachment" "ecs_instance_role_policy" {
|
|
||||||
role = aws_iam_role.ecs_instance_role.name
|
|
||||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
|
|
||||||
}
|
|
||||||
|
|
||||||
# IAM Role Policy Attachment for SSM
|
|
||||||
resource "aws_iam_role_policy_attachment" "ecs_instance_ssm_policy" {
|
|
||||||
role = aws_iam_role.ecs_instance_role.name
|
|
||||||
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
|
||||||
}
|
|
||||||
|
|
||||||
# ECS Task Execution Role
|
|
||||||
resource "aws_iam_role" "ecs_task_execution_role" {
|
|
||||||
name = "${var.cluster_name}-task-execution-role"
|
|
||||||
|
|
||||||
assume_role_policy = jsonencode({
|
|
||||||
Version = "2012-10-17"
|
|
||||||
Statement = [
|
|
||||||
{
|
|
||||||
Action = "sts:AssumeRole"
|
|
||||||
Effect = "Allow"
|
|
||||||
Principal = {
|
|
||||||
Service = "ecs-tasks.amazonaws.com"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-task-execution-role"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Attach AWS managed policy for ECS task execution
|
|
||||||
resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_policy" {
|
|
||||||
role = aws_iam_role.ecs_task_execution_role.name
|
|
||||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Additional policy for ECR access
|
|
||||||
resource "aws_iam_role_policy" "ecs_task_execution_ecr_policy" {
|
|
||||||
name = "${var.cluster_name}-task-execution-ecr-policy"
|
|
||||||
role = aws_iam_role.ecs_task_execution_role.id
|
|
||||||
|
|
||||||
policy = jsonencode({
|
|
||||||
Version = "2012-10-17"
|
|
||||||
Statement = [
|
|
||||||
{
|
|
||||||
Effect = "Allow"
|
|
||||||
Action = [
|
|
||||||
"ecr:GetAuthorizationToken",
|
|
||||||
"ecr:BatchCheckLayerAvailability",
|
|
||||||
"ecr:GetDownloadUrlForLayer",
|
|
||||||
"ecr:BatchGetImage"
|
|
||||||
]
|
|
||||||
Resource = "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
# IAM Instance Profile
|
|
||||||
resource "aws_iam_instance_profile" "ecs_instance_profile" {
|
|
||||||
name = "${var.cluster_name}-ecs-instance-profile"
|
|
||||||
role = aws_iam_role.ecs_instance_role.name
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-ecs-instance-profile"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ECS Cluster
|
|
||||||
resource "aws_ecs_cluster" "main" {
|
|
||||||
name = var.cluster_name
|
|
||||||
|
|
||||||
setting {
|
|
||||||
name = "containerInsights"
|
|
||||||
value = "enabled"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = var.cluster_name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# User data script for ECS instance with SSM
|
|
||||||
locals {
|
|
||||||
user_data = base64encode(templatefile("${path.module}/user_data.sh", {
|
|
||||||
cluster_name = var.cluster_name
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
# EC2 Instance for ECS
|
|
||||||
resource "aws_instance" "ecs_instance" {
|
|
||||||
ami = data.aws_ami.amazon_linux.id
|
|
||||||
instance_type = var.instance_type
|
|
||||||
subnet_id = aws_subnet.public[0].id
|
|
||||||
vpc_security_group_ids = [aws_security_group.ecs_sg.id]
|
|
||||||
key_name = aws_key_pair.deployer.key_name
|
|
||||||
iam_instance_profile = aws_iam_instance_profile.ecs_instance_profile.name
|
|
||||||
user_data_base64 = local.user_data
|
|
||||||
|
|
||||||
root_block_device {
|
|
||||||
volume_type = "gp3"
|
|
||||||
volume_size = 30
|
|
||||||
encrypted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-instance"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# ECS Service (will be created by Jenkins pipeline)
|
|
||||||
# Commented out because Jenkins will create the service
|
|
||||||
# resource "aws_ecs_service" "main" {
|
|
||||||
# name = "${var.cluster_name}-service"
|
|
||||||
# cluster = aws_ecs_cluster.main.id
|
|
||||||
# desired_count = 1
|
|
||||||
# launch_type = "EC2"
|
|
||||||
# task_definition = "${var.cluster_name}-task:1"
|
|
||||||
#
|
|
||||||
# depends_on = [aws_instance.ecs_instance]
|
|
||||||
#
|
|
||||||
# lifecycle {
|
|
||||||
# ignore_changes = [task_definition]
|
|
||||||
# }
|
|
||||||
#
|
|
||||||
# tags = {
|
|
||||||
# Name = "${var.cluster_name}-service"
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
|
|
||||||
# CloudWatch Log Group for ECS
|
|
||||||
resource "aws_cloudwatch_log_group" "ecs_logs" {
|
|
||||||
name = "/ecs/${var.cluster_name}"
|
|
||||||
retention_in_days = 7
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "${var.cluster_name}-logs"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Outputs
|
|
||||||
output "ecs_instance_public_ip" {
|
|
||||||
description = "Public IP of the ECS instance"
|
|
||||||
value = aws_instance.ecs_instance.public_ip
|
|
||||||
}
|
|
||||||
|
|
||||||
output "ecs_instance_id" {
|
|
||||||
description = "Instance ID for SSM access"
|
|
||||||
value = aws_instance.ecs_instance.id
|
|
||||||
}
|
|
||||||
|
|
||||||
output "ecs_cluster_name" {
|
|
||||||
description = "Name of the ECS cluster"
|
|
||||||
value = aws_ecs_cluster.main.name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "vpc_id" {
|
|
||||||
description = "ID of the VPC"
|
|
||||||
value = aws_vpc.main.id
|
|
||||||
}
|
|
||||||
|
|
||||||
output "public_subnet_ids" {
|
|
||||||
description = "IDs of the public subnets"
|
|
||||||
value = aws_subnet.public[*].id
|
|
||||||
}
|
|
||||||
|
|
||||||
output "ecs_task_execution_role_arn" {
|
|
||||||
description = "ARN of the ECS task execution role"
|
|
||||||
value = aws_iam_role.ecs_task_execution_role.arn
|
|
||||||
}
|
|
@@ -1,73 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Enhanced user data script with SSM and better logging
|
|
||||||
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
|
|
||||||
|
|
||||||
echo "=== Starting EC2 User Data Script ==="
|
|
||||||
echo "Timestamp: $(date)"
|
|
||||||
echo "Instance ID: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
|
|
||||||
echo "Cluster Name: ${cluster_name}"
|
|
||||||
|
|
||||||
# Update system
|
|
||||||
echo "=== Updating system packages ==="
|
|
||||||
yum update -y
|
|
||||||
|
|
||||||
# Install and configure SSM agent (should already be installed on Amazon Linux 2)
|
|
||||||
echo "=== Configuring SSM Agent ==="
|
|
||||||
yum install -y amazon-ssm-agent
|
|
||||||
systemctl enable amazon-ssm-agent
|
|
||||||
systemctl start amazon-ssm-agent
|
|
||||||
|
|
||||||
# Install ECS agent
|
|
||||||
echo "=== Installing ECS Agent ==="
|
|
||||||
yum install -y ecs-init
|
|
||||||
|
|
||||||
# Configure ECS cluster
|
|
||||||
echo "=== Configuring ECS Cluster ==="
|
|
||||||
cat > /etc/ecs/ecs.config << EOF
|
|
||||||
ECS_CLUSTER=${cluster_name}
|
|
||||||
ECS_ENABLE_LOGGING=true
|
|
||||||
ECS_LOGLEVEL=info
|
|
||||||
ECS_ENABLE_CONTAINER_METADATA=true
|
|
||||||
ECS_ENABLE_TASK_IAM_ROLE=true
|
|
||||||
ECS_AVAILABLE_LOGGING_DRIVERS=["json-file","awslogs"]
|
|
||||||
ECS_CONTAINER_STOP_TIMEOUT=30s
|
|
||||||
ECS_CONTAINER_START_TIMEOUT=3m
|
|
||||||
ECS_DISABLE_IMAGE_CLEANUP=false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Start Docker and ECS
|
|
||||||
echo "=== Starting Docker and ECS services ==="
|
|
||||||
systemctl enable docker
|
|
||||||
systemctl start docker
|
|
||||||
systemctl enable ecs
|
|
||||||
systemctl start ecs
|
|
||||||
|
|
||||||
# Wait for services to be ready
|
|
||||||
echo "=== Waiting for services to initialize ==="
|
|
||||||
sleep 30
|
|
||||||
|
|
||||||
# Verify services
|
|
||||||
echo "=== Service Status Check ==="
|
|
||||||
echo "SSM Agent Status:"
|
|
||||||
systemctl status amazon-ssm-agent --no-pager || echo "SSM agent status check failed"
|
|
||||||
|
|
||||||
echo "Docker Status:"
|
|
||||||
systemctl status docker --no-pager || echo "Docker status check failed"
|
|
||||||
|
|
||||||
echo "ECS Status:"
|
|
||||||
systemctl status ecs --no-pager || echo "ECS status check failed"
|
|
||||||
|
|
||||||
# Check ECS agent connection
|
|
||||||
echo "=== ECS Agent Status ==="
|
|
||||||
for i in {1..5}; do
|
|
||||||
if curl -s http://localhost:51678/v1/metadata; then
|
|
||||||
echo "ECS agent is responding"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "ECS agent not ready yet, attempt $i/5"
|
|
||||||
sleep 10
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "=== User Data Script Completed ==="
|
|
||||||
echo "Timestamp: $(date)"
|
|
@@ -1,35 +0,0 @@
|
|||||||
variable "aws_region" {
|
|
||||||
description = "AWS region for resources"
|
|
||||||
type = string
|
|
||||||
default = "us-east-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "jenkins_ip_cidr" {
|
|
||||||
description = "CIDR block for SSH access from Jenkins"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "cluster_name" {
|
|
||||||
description = "Name of the ECS cluster"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vpc_cidr" {
|
|
||||||
description = "VPC CIDR block"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "public_subnets" {
|
|
||||||
description = "Comma-separated public subnet CIDRs"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type" {
|
|
||||||
description = "EC2 instance type"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "key_pair_name" {
|
|
||||||
description = "EC2 Key Pair name"
|
|
||||||
type = string
|
|
||||||
}
|
|
@@ -1,31 +0,0 @@
|
|||||||
# versions.tf - Enterprise-grade version management
|
|
||||||
# This file pins provider versions for consistency across environments
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_version = ">= 1.5.0"
|
|
||||||
|
|
||||||
required_providers {
|
|
||||||
aws = {
|
|
||||||
source = "hashicorp/aws"
|
|
||||||
version = "~> 6.3.0" # Pin to specific minor version for stability
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Provider configuration with default tags (enterprise best practice)
|
|
||||||
provider "aws" {
|
|
||||||
region = var.aws_region
|
|
||||||
|
|
||||||
# Default tags applied to all resources (enterprise requirement)
|
|
||||||
default_tags {
|
|
||||||
tags = {
|
|
||||||
Environment = "production"
|
|
||||||
Project = "nvhi-atsila"
|
|
||||||
ManagedBy = "terraform"
|
|
||||||
Owner = "devops-team"
|
|
||||||
CostCenter = "engineering"
|
|
||||||
SecurityReview = "2024-Q4"
|
|
||||||
DataClassification = "internal"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Reference in New Issue
Block a user