742 lines
41 KiB
Groovy
742 lines
41 KiB
Groovy
pipeline {
|
|
agent any
|
|
|
|
parameters {
|
|
booleanParam(
|
|
name: 'FORCE_INFRASTRUCTURE_DEPLOY',
|
|
defaultValue: false,
|
|
description: 'Force infrastructure deployment regardless of change detection'
|
|
)
|
|
booleanParam(
|
|
name: 'SKIP_QUALITY_GATES',
|
|
defaultValue: false,
|
|
description: 'Skip SonarQube quality gates (use with caution)'
|
|
)
|
|
}
|
|
|
|
environment {
|
|
// Core configuration
|
|
GITEA_REPO = 'https://code.jacquesingram.online/lenape/nvhi-atsila-microservice.git '
|
|
GITEA_CREDS = '52ee0829-6e65-4951-925b-4186254c3f21'
|
|
SONAR_HOST = 'https://sonar.jacquesingram.online '
|
|
SONAR_TOKEN = credentials('sonar-token')
|
|
// AWS configuration with ECR
|
|
AWS_CRED_ID = 'aws-ci'
|
|
AWS_ACCOUNT_ID = credentials('AWS_ACCOUNT_ID')
|
|
AWS_REGION = 'us-east-2'
|
|
ECR_REPO = 'nvhi-atsila-microservice'
|
|
// Backend configuration
|
|
TF_BACKEND_BUCKET = 'nvhi-atsila-tf-state'
|
|
TF_BACKEND_PREFIX = 'ecs/terraform.tfstate'
|
|
TF_DDB_TABLE = 'nvhi-atsila-locks'
|
|
// Application variables
|
|
TF_VAR_cluster_name = 'nvhi-atsila-cluster'
|
|
TF_VAR_vpc_cidr = '10.0.0.0/16'
|
|
TF_VAR_public_subnets = '10.0.1.0/24,10.0.2.0/24'
|
|
TF_VAR_instance_type = 't2.micro'
|
|
TF_VAR_key_pair_name = 'nvhi-atsila-deployer'
|
|
TF_VAR_jenkins_ip_cidr = "0.0.0.0/0" // For demo; tighten in production
|
|
TF_VAR_aws_region = "${AWS_REGION}"
|
|
// Enhanced deployment tracking
|
|
IMAGE_TAG = "v1.0.${BUILD_NUMBER}"
|
|
DEPLOYMENT_TYPE = "APPLICATION"
|
|
// Enterprise settings
|
|
TF_IN_AUTOMATION = 'true'
|
|
TF_INPUT = 'false'
|
|
}
|
|
|
|
stages {
|
|
stage('Debug: Show File Structure') {
|
|
steps {
|
|
echo "📂 Current directory contents:"
|
|
sh 'ls -la'
|
|
echo "🔍 Full file tree:"
|
|
sh 'find . -type f | sort'
|
|
}
|
|
}
|
|
|
|
stage('Bootstrap Terraform Backend') {
|
|
steps {
|
|
script {
|
|
def tfBackendDir = "terraform-backend"
|
|
|
|
echo "🔐 Using Jenkins credentials to authenticate with AWS"
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
echo "🔄 Checking/Bootstrapping Terraform backend..."
|
|
dir(tfBackendDir) {
|
|
def exitCode = sh(
|
|
script: """
|
|
terraform init \\
|
|
-var="aws_region=${TF_VAR_aws_region}" \\
|
|
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
|
-var="lock_table_name=${TF_DDB_TABLE}"
|
|
terraform apply -auto-approve \\
|
|
-var="aws_region=${TF_VAR_aws_region}" \\
|
|
-var="backend_bucket_name=${TF_BACKEND_BUCKET}" \\
|
|
-var="lock_table_name=${TF_DDB_TABLE}"
|
|
""",
|
|
returnStatus: true
|
|
)
|
|
|
|
if (exitCode == 0) {
|
|
echo "✅ Terraform backend created successfully"
|
|
} else {
|
|
echo "⚠️ Terraform apply failed, checking if resources already exist..."
|
|
def bucketExists = sh(
|
|
script: "aws s3api head-bucket --bucket ${TF_BACKEND_BUCKET} --region ${TF_VAR_aws_region} 2>/dev/null",
|
|
returnStatus: true
|
|
) == 0
|
|
def tableExists = sh(
|
|
script: "aws dynamodb describe-table --table-name ${TF_DDB_TABLE} --region ${TF_VAR_aws_region} 2>/dev/null",
|
|
returnStatus: true
|
|
) == 0
|
|
|
|
if (bucketExists && tableExists) {
|
|
echo "✅ Terraform backend already exists - continuing..."
|
|
} else {
|
|
echo "❌ Backend bootstrap failed and resources don't exist:"
|
|
echo " S3 Bucket exists: ${bucketExists}"
|
|
echo " DynamoDB Table exists: ${tableExists}"
|
|
error("Manual intervention required.")
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Security Assessment & Checkout') {
|
|
steps {
|
|
checkout scm
|
|
script {
|
|
def infrastructureFiles = sh(
|
|
script: '''
|
|
if git rev-parse HEAD~1 >/dev/null 2>&1; then
|
|
git diff --name-only HEAD~1 2>/dev/null | grep -E "^terraform/" || echo "none"
|
|
else
|
|
echo "initial"
|
|
fi
|
|
''',
|
|
returnStdout: true
|
|
).trim()
|
|
|
|
// Check force parameter first - this overrides everything
|
|
if (params.FORCE_INFRASTRUCTURE_DEPLOY) {
|
|
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
echo "🚨 FORCED: Infrastructure deployment requested via parameter"
|
|
echo "✅ Deployment type set to: INFRASTRUCTURE (forced)"
|
|
} else if (infrastructureFiles == "initial") {
|
|
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
echo "✅ First run detected. Deploying infrastructure."
|
|
} else if (infrastructureFiles != "none") {
|
|
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
echo "🚨 SECURITY NOTICE: Infrastructure changes detected - elevated permissions required"
|
|
echo " Changed files: ${infrastructureFiles}"
|
|
} else {
|
|
// Check if infrastructure actually exists in AWS
|
|
def clusterExists = false
|
|
try {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
def clusterCheck = sh(
|
|
script: "aws ecs describe-clusters --clusters ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'clusters[0].status' --output text 2>/dev/null || echo 'NOTFOUND'",
|
|
returnStdout: true
|
|
).trim()
|
|
clusterExists = (clusterCheck == "ACTIVE")
|
|
}
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not check cluster status: ${e.getMessage()}"
|
|
}
|
|
|
|
if (!clusterExists) {
|
|
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
echo "🚨 CLEAN AWS DETECTED: No existing infrastructure found - deploying from scratch"
|
|
} else {
|
|
env.DEPLOYMENT_TYPE = "APPLICATION"
|
|
echo "✅ SECURITY: Application-only deployment - using restricted permissions"
|
|
}
|
|
}
|
|
|
|
def gitCommit = sh(script: 'git rev-parse HEAD', returnStdout: true).trim()
|
|
def gitAuthor = sh(script: 'git log -1 --pretty=format:"%an"', returnStdout: true).trim()
|
|
currentBuild.description = "${env.DEPLOYMENT_TYPE} | ${env.IMAGE_TAG} | ${gitCommit.take(8)}"
|
|
echo "📋 SECURITY AUDIT TRAIL:"
|
|
echo " • Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
echo " • Version: ${env.IMAGE_TAG}"
|
|
echo " • Commit: ${gitCommit.take(8)}"
|
|
echo " • Author: ${gitAuthor}"
|
|
echo " • Container Registry: ECR (AWS-native, secure)"
|
|
echo " • Architecture: SSM-based ECS access (secure, keyless)"
|
|
echo " • Security Model: Principle of Least Privilege"
|
|
echo " • Timestamp: ${new Date()}"
|
|
|
|
writeFile file: 'deployment-audit.json', text: """{
|
|
"build_number": "${BUILD_NUMBER}",
|
|
"deployment_type": "${env.DEPLOYMENT_TYPE}",
|
|
"image_tag": "${env.IMAGE_TAG}",
|
|
"git_commit": "${gitCommit}",
|
|
"git_author": "${gitAuthor}",
|
|
"infrastructure_files_changed": "${infrastructureFiles}",
|
|
"container_registry": "ECR",
|
|
"architecture": "ssm_based_ecs_access",
|
|
"security_model": "principle_of_least_privilege",
|
|
"timestamp": "${new Date()}"
|
|
}"""
|
|
archiveArtifacts artifacts: 'deployment-audit.json', fingerprint: true
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Security & Quality Checks') {
|
|
parallel {
|
|
stage('SonarQube Security Analysis') {
|
|
when {
|
|
expression { !params.SKIP_QUALITY_GATES }
|
|
}
|
|
steps {
|
|
script {
|
|
def scannerHome = tool 'SonarQubeScanner'
|
|
withSonarQubeEnv('SonarQube') {
|
|
sh """
|
|
echo "🔒 SECURITY: Running SonarQube security analysis..."
|
|
${scannerHome}/bin/sonar-scanner \\
|
|
-Dsonar.projectKey=nvhi-atsila-microservice \\
|
|
-Dsonar.sources=. \\
|
|
-Dsonar.projectVersion=${BUILD_NUMBER} \\
|
|
-Dsonar.login=${SONAR_TOKEN}
|
|
"""
|
|
}
|
|
echo "✅ SECURITY: Code quality and security scan completed"
|
|
}
|
|
}
|
|
}
|
|
stage('Terraform Security Validation') {
|
|
steps {
|
|
script {
|
|
echo "🔒 SECURITY: Running Terraform security and validation checks..."
|
|
sh '''
|
|
echo "Validating Terraform configuration..."
|
|
cd terraform && terraform init -backend=false
|
|
terraform validate
|
|
echo "✅ Terraform validation passed"
|
|
echo "🔒 SECURITY: Checking infrastructure security compliance..."
|
|
grep -r "encrypted.*true" . --include="*.tf" && echo "✅ Encryption policies found" || echo "⚠️ Review encryption settings"
|
|
echo "🔒 SECURITY: Checking for open security groups..."
|
|
if grep -r "0.0.0.0/0" . --include="*.tf" --exclude-dir=".terraform" | grep -v "# Approved:"; then
|
|
echo "⚠️ Review open access rules found"
|
|
else
|
|
echo "✅ No unauthorized open access rules"
|
|
fi
|
|
'''
|
|
echo "✅ SECURITY: Infrastructure validation and security checks passed"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Secure Container Build & Registry') {
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "🔐 SECURITY: Using ECR for secure, AWS-native container registry"
|
|
|
|
// Create ECR repository if it doesn't exist
|
|
echo "🔍 Checking/Creating ECR repository..."
|
|
sh """
|
|
if ! aws ecr describe-repositories --repository-names ${ECR_REPO} --region ${AWS_REGION} 2>/dev/null; then
|
|
echo "📦 Creating ECR repository: ${ECR_REPO}"
|
|
aws ecr create-repository --repository-name ${ECR_REPO} --region ${AWS_REGION}
|
|
echo "✅ ECR repository created successfully"
|
|
else
|
|
echo "✅ ECR repository already exists"
|
|
fi
|
|
"""
|
|
|
|
sh """
|
|
echo "🔐 Authenticating with ECR using temporary credentials..."
|
|
aws ecr get-login-password --region ${AWS_REGION} | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com
|
|
"""
|
|
echo "🐳 Building secure container with metadata..."
|
|
sh """
|
|
docker build -t ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} .
|
|
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}
|
|
docker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG} ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
|
docker push ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:latest
|
|
"""
|
|
echo "✅ SECURITY: Container built and pushed to ECR successfully"
|
|
echo " Image: ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}"
|
|
echo " Registry: ECR (AWS-native, IAM-secured)"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Infrastructure Readiness Check') {
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "🔍 SECURITY: Checking if infrastructure is ready for deployment..."
|
|
echo "🔍 Current deployment type: ${env.DEPLOYMENT_TYPE}"
|
|
|
|
// Only check readiness if deployment type is APPLICATION
|
|
if (env.DEPLOYMENT_TYPE == "APPLICATION") {
|
|
def serviceExists = sh(
|
|
script: """
|
|
aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} 2>/dev/null | grep -q 'ACTIVE' && echo 'true' || echo 'false'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
def instanceCount = sh(
|
|
script: """
|
|
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
if (serviceExists == "false" || instanceCount == "0" || instanceCount == "null") {
|
|
echo "🚨 SECURITY NOTICE: Infrastructure not ready - forcing deployment"
|
|
echo " Service Exists: ${serviceExists}"
|
|
echo " Container Instances: ${instanceCount}"
|
|
env.DEPLOYMENT_TYPE = "INFRASTRUCTURE"
|
|
currentBuild.description = "INFRASTRUCTURE (auto-detected) | ${env.IMAGE_TAG}"
|
|
}
|
|
} else {
|
|
echo "✅ Infrastructure deployment already forced - skipping readiness check"
|
|
}
|
|
|
|
echo "📋 SECURITY: Infrastructure readiness assessment completed"
|
|
echo " Final Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Deploy Infrastructure') {
|
|
when {
|
|
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
|
}
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
dir('terraform') {
|
|
script {
|
|
echo "🔍 DEPLOYMENT: Deployment type is ${env.DEPLOYMENT_TYPE}"
|
|
echo "🚨 SECURITY NOTICE: Infrastructure deployment requested"
|
|
echo "🏗️ ARCHITECTURE: Deploying ECS Cluster with SSM access (secure, keyless)"
|
|
echo "🔐 In production: This would require infrastructure-admin role"
|
|
echo "🚀 Attempting infrastructure deployment..."
|
|
sh """
|
|
echo "🔄 Initializing Terraform with remote backend..."
|
|
terraform init \\
|
|
-backend-config="bucket=${TF_BACKEND_BUCKET}" \\
|
|
-backend-config="key=${TF_BACKEND_PREFIX}" \\
|
|
-backend-config="region=${AWS_REGION}" \\
|
|
-backend-config="dynamodb_table=${TF_DDB_TABLE}"
|
|
|
|
echo "🔄 Planning infrastructure changes..."
|
|
terraform plan \\
|
|
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
-var="aws_region=${TF_VAR_aws_region}"
|
|
|
|
echo "🔄 Applying infrastructure changes..."
|
|
terraform apply -auto-approve \\
|
|
-var="cluster_name=${TF_VAR_cluster_name}" \\
|
|
-var="vpc_cidr=${TF_VAR_vpc_cidr}" \\
|
|
-var="public_subnets=${TF_VAR_public_subnets}" \\
|
|
-var="instance_type=${TF_VAR_instance_type}" \\
|
|
-var="key_pair_name=${TF_VAR_key_pair_name}" \\
|
|
-var="jenkins_ip_cidr=${TF_VAR_jenkins_ip_cidr}" \\
|
|
-var="aws_region=${TF_VAR_aws_region}"
|
|
"""
|
|
echo "✅ SECURITY: Infrastructure deployment completed with compliance verification"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Wait for ECS Agents') {
|
|
when {
|
|
expression { env.DEPLOYMENT_TYPE == "INFRASTRUCTURE" }
|
|
}
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "⏳ Waiting for ECS agents to register with cluster..."
|
|
timeout(time: 10, unit: 'MINUTES') {
|
|
waitUntil {
|
|
def count = sh(
|
|
script: """
|
|
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'length(containerInstanceArns)' --output text 2>/dev/null || echo '0'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
if (count != "0" && count != "null") {
|
|
echo "✅ ECS agents registered: ${count} instance(s)"
|
|
def activeCount = sh(
|
|
script: """
|
|
aws ecs describe-container-instances --cluster ${TF_VAR_cluster_name} --container-instances \$(aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} --query 'containerInstanceArns[*]' --output text) --region ${AWS_REGION} --query 'length(containerInstances[?status==\\`ACTIVE\\`])' --output text 2>/dev/null || echo '0'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
if (activeCount != "0" && activeCount != "null") {
|
|
echo "✅ Active ECS instances: ${activeCount}"
|
|
return true
|
|
} else {
|
|
echo "⏳ Waiting for instances to become ACTIVE..."
|
|
sleep(20)
|
|
return false
|
|
}
|
|
} else {
|
|
echo "⏳ No ECS agents registered yet..."
|
|
sleep(20)
|
|
return false
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Configure & Deploy Application') {
|
|
parallel {
|
|
stage('Configure EC2 Instance via SSM') {
|
|
when {
|
|
expression {
|
|
def hasInstances = false
|
|
try {
|
|
def instanceId = sh(
|
|
script: """
|
|
cd terraform && terraform output -raw ecs_instance_id 2>/dev/null || echo ''
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
hasInstances = (instanceId != "" && instanceId != "null")
|
|
} catch (Exception e) {
|
|
echo "⚠️ No instances to configure: ${e.getMessage()}"
|
|
}
|
|
return hasInstances
|
|
}
|
|
}
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "🔧 ENTERPRISE: Configuring EC2 instance via SSM (no SSH required)"
|
|
def instanceId = ""
|
|
def ec2_ip = ""
|
|
try {
|
|
sh "test -d terraform || (echo 'Terraform directory not found' && exit 1)"
|
|
instanceId = sh(
|
|
script: """
|
|
cd terraform && terraform output -raw ecs_instance_id
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
ec2_ip = sh(
|
|
script: """
|
|
cd terraform && terraform output -raw ecs_instance_public_ip
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not get instance details: ${e.getMessage()}"
|
|
echo "⚠️ Skipping SSM configuration - no instances available"
|
|
return
|
|
}
|
|
echo "📍 Target Instance: ${instanceId} (${ec2_ip})"
|
|
echo "⏳ Waiting for SSM agent to be ready..."
|
|
timeout(time: 10, unit: 'MINUTES') {
|
|
waitUntil {
|
|
def ssmStatus = sh(
|
|
script: """
|
|
aws ssm describe-instance-information --filters "Key=InstanceIds,Values=${instanceId}" --region ${AWS_REGION} --query 'InstanceInformationList[0].PingStatus' --output text 2>/dev/null || echo 'Offline'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
if (ssmStatus == "Online") {
|
|
echo "✅ SSM agent is online"
|
|
return true
|
|
} else {
|
|
echo "⏳ SSM agent status: ${ssmStatus}, waiting..."
|
|
sleep(30)
|
|
return false
|
|
}
|
|
}
|
|
}
|
|
|
|
echo "🔧 Running configuration commands via SSM..."
|
|
sh """
|
|
# Install or update Docker if needed
|
|
aws ssm send-command \\
|
|
--instance-ids ${instanceId} \\
|
|
--document-name "AWS-RunShellScript" \\
|
|
--parameters 'commands=["sudo yum update -y && sudo yum install -y docker && sudo systemctl start docker && sudo systemctl enable docker"]' \\
|
|
--region ${AWS_REGION} \\
|
|
--comment "Installing Docker on ECS instance"
|
|
|
|
# Wait for command to complete
|
|
sleep 60
|
|
|
|
# Configure ECS agent
|
|
aws ssm send-command \\
|
|
--instance-ids ${instanceId} \\
|
|
--document-name "AWS-RunShellScript" \\
|
|
--parameters 'commands=["echo ECS_CLUSTER=${TF_VAR_cluster_name} | sudo tee -a /etc/ecs/ecs.config","sudo systemctl restart ecs"]' \\
|
|
--region ${AWS_REGION} \\
|
|
--comment "Configuring ECS agent"
|
|
"""
|
|
echo "✅ ENTERPRISE: EC2 instance configured via SSM"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Deploy ECS Service') {
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "🚀 DEPLOYMENT: Deploying application to ECS cluster"
|
|
|
|
// Create task definition
|
|
def executionRoleArn = ""
|
|
try {
|
|
executionRoleArn = sh(
|
|
script: 'cd terraform && terraform output -raw ecs_task_execution_role_arn',
|
|
returnStdout: true
|
|
).trim()
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not get execution role ARN: ${e.getMessage()}"
|
|
echo "⚠️ Task definition will be created without execution role"
|
|
}
|
|
|
|
def taskDefinition = """
|
|
{
|
|
"family": "${TF_VAR_cluster_name}-task",
|
|
"networkMode": "bridge",
|
|
"requiresCompatibilities": ["EC2"],
|
|
"memory": "512",
|
|
"cpu": "256"${executionRoleArn ? ",\n \"executionRoleArn\": \"${executionRoleArn}\"" : ""},
|
|
"containerDefinitions": [
|
|
{
|
|
"name": "${ECR_REPO}",
|
|
"image": "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}:${IMAGE_TAG}",
|
|
"memory": 512,
|
|
"cpu": 256,
|
|
"essential": true,
|
|
"portMappings": [
|
|
{
|
|
"containerPort": 8080,
|
|
"hostPort": 8080,
|
|
"protocol": "tcp"
|
|
}
|
|
],
|
|
"healthCheck": {
|
|
"command": ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"],
|
|
"interval": 30,
|
|
"timeout": 5,
|
|
"retries": 3,
|
|
"startPeriod": 60
|
|
},
|
|
"logConfiguration": {
|
|
"logDriver": "awslogs",
|
|
"options": {
|
|
"awslogs-group": "/ecs/${TF_VAR_cluster_name}",
|
|
"awslogs-region": "${AWS_REGION}",
|
|
"awslogs-stream-prefix": "ecs"
|
|
}
|
|
}
|
|
}
|
|
]
|
|
}
|
|
"""
|
|
|
|
writeFile file: 'task-definition.json', text: taskDefinition
|
|
|
|
sh """
|
|
# Create CloudWatch log group if it doesn't exist
|
|
aws logs create-log-group --log-group-name /ecs/${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Log group already exists"
|
|
|
|
# Register task definition
|
|
aws ecs register-task-definition \\
|
|
--cli-input-json file://task-definition.json \\
|
|
--region ${AWS_REGION}
|
|
|
|
# Check if service exists
|
|
if aws ecs describe-services --cluster ${TF_VAR_cluster_name} --services ${TF_VAR_cluster_name}-service --region ${AWS_REGION} --query 'services[0].status' --output text 2>/dev/null | grep -q 'ACTIVE'; then
|
|
echo "✅ Service exists, updating..."
|
|
aws ecs update-service \\
|
|
--cluster ${TF_VAR_cluster_name} \\
|
|
--service ${TF_VAR_cluster_name}-service \\
|
|
--task-definition ${TF_VAR_cluster_name}-task \\
|
|
--desired-count 1 \\
|
|
--force-new-deployment \\
|
|
--region ${AWS_REGION}
|
|
else
|
|
echo "✅ Creating new service..."
|
|
aws ecs create-service \\
|
|
--cluster ${TF_VAR_cluster_name} \\
|
|
--service-name ${TF_VAR_cluster_name}-service \\
|
|
--task-definition ${TF_VAR_cluster_name}-task \\
|
|
--desired-count 1 \\
|
|
--region ${AWS_REGION}
|
|
fi
|
|
"""
|
|
|
|
echo "✅ DEPLOYMENT: ECS service deployment initiated"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Verify Deployment') {
|
|
steps {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
script {
|
|
echo "🔍 VERIFICATION: Checking deployment status..."
|
|
|
|
timeout(time: 15, unit: 'MINUTES') {
|
|
waitUntil {
|
|
def serviceStatus = sh(
|
|
script: """
|
|
aws ecs describe-services \\
|
|
--cluster ${TF_VAR_cluster_name} \\
|
|
--services ${TF_VAR_cluster_name}-service \\
|
|
--region ${AWS_REGION} \\
|
|
--query 'services[0].deployments[0].status' \\
|
|
--output text 2>/dev/null || echo 'UNKNOWN'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
|
|
def runningCount = sh(
|
|
script: """
|
|
aws ecs describe-services \\
|
|
--cluster ${TF_VAR_cluster_name} \\
|
|
--services ${TF_VAR_cluster_name}-service \\
|
|
--region ${AWS_REGION} \\
|
|
--query 'services[0].runningCount' \\
|
|
--output text 2>/dev/null || echo '0'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
|
|
echo "Service Status: ${serviceStatus}, Running Tasks: ${runningCount}"
|
|
|
|
if (serviceStatus == "STEADY" && runningCount.toInteger() > 0) {
|
|
echo "✅ Service deployment completed successfully"
|
|
return true
|
|
} else {
|
|
echo "⏳ Waiting for service to stabilize..."
|
|
sleep(30)
|
|
return false
|
|
}
|
|
}
|
|
}
|
|
|
|
// Get application URL
|
|
def appUrl = ""
|
|
try {
|
|
appUrl = sh(
|
|
script: """
|
|
cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unavailable'
|
|
""",
|
|
returnStdout: true
|
|
).trim()
|
|
|
|
if (appUrl != "unavailable" && appUrl != "") {
|
|
echo "🌐 APPLICATION URL: http://${appUrl}:8080"
|
|
currentBuild.description = "${currentBuild.description} | URL: http://${appUrl}:8080"
|
|
}
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not determine application URL: ${e.getMessage()}"
|
|
}
|
|
|
|
echo "✅ VERIFICATION: Deployment verification completed"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
post {
|
|
always {
|
|
script {
|
|
echo "🧹 CLEANUP: Performing post-build cleanup..."
|
|
|
|
// Archive deployment artifacts
|
|
try {
|
|
archiveArtifacts artifacts: 'deployment-audit.json,task-definition.json', allowEmptyArchive: true
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not archive artifacts: ${e.getMessage()}"
|
|
}
|
|
|
|
// Clean up Docker images to save space
|
|
sh '''
|
|
echo "🧹 Cleaning up Docker images..."
|
|
docker system prune -f || echo "Docker cleanup failed"
|
|
'''
|
|
|
|
echo "📊 SUMMARY: Build completed"
|
|
echo " Build Number: ${BUILD_NUMBER}"
|
|
echo " Image Tag: ${IMAGE_TAG}"
|
|
echo " Deployment Type: ${env.DEPLOYMENT_TYPE}"
|
|
echo " Status: ${currentBuild.currentResult}"
|
|
}
|
|
}
|
|
|
|
success {
|
|
script {
|
|
echo "🎉 SUCCESS: Deployment completed successfully!"
|
|
echo " Version ${IMAGE_TAG} deployed to ECS cluster ${TF_VAR_cluster_name}"
|
|
|
|
// Get application URL for success message
|
|
def appUrl = ""
|
|
try {
|
|
appUrl = sh(
|
|
script: "cd terraform && terraform output -raw ecs_instance_public_ip 2>/dev/null || echo 'unknown'",
|
|
returnStdout: true
|
|
).trim()
|
|
if (appUrl != "unknown" && appUrl != "") {
|
|
echo "🌐 Application available at: http://${appUrl}:8080"
|
|
echo "🏥 Health check: http://${appUrl}:8080/health"
|
|
}
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not determine application URL"
|
|
}
|
|
}
|
|
}
|
|
|
|
failure {
|
|
script {
|
|
echo "❌ FAILURE: Deployment failed"
|
|
echo " Check the logs above for error details"
|
|
|
|
// Try to get some debug information
|
|
try {
|
|
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.AWS_CRED_ID]]) {
|
|
echo "🔍 DEBUG: Checking ECS cluster status..."
|
|
sh """
|
|
aws ecs describe-clusters --clusters ${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Cluster check failed"
|
|
aws ecs list-container-instances --cluster ${TF_VAR_cluster_name} --region ${AWS_REGION} || echo "Instance list failed"
|
|
"""
|
|
}
|
|
} catch (Exception e) {
|
|
echo "⚠️ Could not get debug information: ${e.getMessage()}"
|
|
}
|
|
}
|
|
}
|
|
|
|
unstable {
|
|
script {
|
|
echo "⚠️ UNSTABLE: Build completed with warnings"
|
|
}
|
|
}
|
|
}
|
|
} |